From patchwork Sat Nov 18 02:07:04 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Logan.Lyu" X-Patchwork-Id: 44705 Delivered-To: ffmpegpatchwork2@gmail.com Received: by 2002:a05:6a20:6a89:b0:181:818d:5e7f with SMTP id bi9csp293408pzb; Fri, 17 Nov 2023 18:07:45 -0800 (PST) X-Google-Smtp-Source: AGHT+IGsGwd5ROrUQGIOyXftD6A7EcL3Kukky+3VdDqhVY9uaDC8y4opc6Fo9BLAquDMSCIsBKTS X-Received: by 2002:a05:651c:54c:b0:2c5:509:c080 with SMTP id q12-20020a05651c054c00b002c50509c080mr947399ljp.3.1700273264595; Fri, 17 Nov 2023 18:07:44 -0800 (PST) ARC-Seal: i=1; a=rsa-sha256; t=1700273264; cv=none; d=google.com; s=arc-20160816; b=Lznob/B3uB8Ow8H5Uvxe/sI9WJmP/Xck/f1FhMPqNICRD2JhhdJnWGkIyEMIz4IvMM sp6umKXkPGfQbg0XBzqsj3iggDCYvd0goWIEMlvPyHElJcXEL8IJBPYG5xwX147N5IQB svcERwg9CYmemYvzB9iIZeVwFmotQxvGPqz7yjMnQt8djXZ/yEo1CQ0+4eOB6H6Ur7C9 wwa4pHpqbcHx0ZS1/hSqozA/91pp7A+d610zcK1QFcvKPmP9KR2h5U3327tct67GMTWC pG6n6hpWZbQ9yhdLyDqgbX/HcfzeHLIXjXH8+YSTHKzm1QZmhvkg/Xrs5nbivT2cWy2m BRXQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=sender:errors-to:cc:reply-to:list-subscribe:list-help:list-post :list-archive:list-unsubscribe:list-id:precedence:subject :organization:to:from:user-agent:mime-version:date:message-id :delivered-to; bh=jY/0Y5Oma9mPdtCN5H1fh6DKZbWP2EnEgfr+ttd7P4k=; fh=4VBelKDE4DH3L7jF6H/1Jmu78FdN+YP76yfdJCQTJ30=; b=HGlOhyoTP6shAtJotd7xGlfqLvRG7BVCj2v+KwScHxeIJTpysaqCRmnLLjTN37yOm7 ib9wmFf9zZNDX6dk2d4gQsOO0GMEdrBF/UgZkfbJ+gQPPPelekCBGz9GgQOjtS6gTpBj NqNCx6O+rSRP6MqXVPNnbAKEeVOKRzyeC60g1tY3+ELqWujF1dDDGNseFGxIa5ODXhjK 9U8OZkbYGVDOCrE2QxBAjzuZ/YKiXHtY0FyW68oFk7y8mXnsCtsxWeAxyrjVb61AYw4l J2abJKFmduOFrYbH3sTg5zoHtgi7+qXAGgYoQOftcLNJ2J9cHz31AxNwnQWsVxKX9zvz Kx2g== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Return-Path: Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org. [79.124.17.100]) by mx.google.com with ESMTP id s18-20020a170906501200b009929566f00asi1587208ejj.467.2023.11.17.18.07.44; Fri, 17 Nov 2023 18:07:44 -0800 (PST) Received-SPF: pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) client-ip=79.124.17.100; Authentication-Results: mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 50F3368CD85; Sat, 18 Nov 2023 04:07:16 +0200 (EET) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from smtp-my3-01p4.yunyou.top (smtp-my3-01p4.yunyou.top [60.247.169.4]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id 29B6968C96C for ; Sat, 18 Nov 2023 04:07:09 +0200 (EET) Received: from [192.168.15.105] (unknown [122.233.146.151]) by smtp-my-01.yunyou.top (WestCloudMail) with ESMTPA id 55DB7141F28; Sat, 18 Nov 2023 10:07:04 +0800 (CST) Message-ID: <9dc635d0-a01d-43a8-aeff-bec37d6143a1@myais.com.cn> Date: Sat, 18 Nov 2023 10:07:04 +0800 MIME-Version: 1.0 User-Agent: Mozilla Thunderbird From: "Logan.Lyu" To: ffmpeg-devel@ffmpeg.org Organization: myais Subject: [FFmpeg-devel] [PATCH 5/6] lavc/aarch64: new optimization for 8-bit hevc_qpel_bi_v X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Cc: jdek@itanimul.li Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" X-TUID: oaTmVa1SsgAs put_hevc_qpel_bi_v4_8_c: 166.1 put_hevc_qpel_bi_v4_8_neon: 61.9 put_hevc_qpel_bi_v6_8_c: 309.4 put_hevc_qpel_bi_v6_8_neon: 75.6 put_hevc_qpel_bi_v8_8_c: 531.1 put_hevc_qpel_bi_v8_8_neon: 78.1 put_hevc_qpel_bi_v12_8_c: 1139.9 put_hevc_qpel_bi_v12_8_neon: 238.1 put_hevc_qpel_bi_v16_8_c: 2063.6 put_hevc_qpel_bi_v16_8_neon: 308.9 put_hevc_qpel_bi_v24_8_c: 4317.1 put_hevc_qpel_bi_v24_8_neon: 629.9 put_hevc_qpel_bi_v32_8_c: 8241.9 put_hevc_qpel_bi_v32_8_neon: 1140.1 put_hevc_qpel_bi_v48_8_c: 18422.9 put_hevc_qpel_bi_v48_8_neon: 2533.9 put_hevc_qpel_bi_v64_8_c: 37508.6 put_hevc_qpel_bi_v64_8_neon: 4520.1 Co-Authored-By: J. Dekker Signed-off-by: Logan Lyu --- libavcodec/aarch64/hevcdsp_init_aarch64.c | 5 + libavcodec/aarch64/hevcdsp_qpel_neon.S | 248 ++++++++++++++++++++++ 2 files changed, 253 insertions(+) + bl X(ff_hevc_put_hevc_qpel_bi_v16_8_neon) + ldr x30, [sp, #8] + add sp, sp, #16 + ret +endfunc + +function ff_hevc_put_hevc_qpel_bi_v64_8_neon, export=1 + b X(ff_hevc_put_hevc_qpel_bi_v32_8_neon) +endfunc function ff_hevc_put_hevc_pel_uni_pixels4_8_neon, export=1 1: diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c b/libavcodec/aarch64/hevcdsp_init_aarch64.c index da5e23575d..f2cce3d221 100644 --- a/libavcodec/aarch64/hevcdsp_init_aarch64.c +++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c @@ -251,6 +251,10 @@ NEON8_FNPROTO_PARTIAL_5(qpel_uni_w_hv, (uint8_t *_dst, ptrdiff_t _dststride, int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width), _i8mm); +NEON8_FNPROTO(qpel_bi_v, (uint8_t *dst, ptrdiff_t dststride, + const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2, + int height, intptr_t mx, intptr_t my, int width),); + #define NEON8_FNASSIGN(member, v, h, fn, ext) \ member[1][v][h] = ff_hevc_put_hevc_##fn##4_8_neon##ext; \ member[2][v][h] = ff_hevc_put_hevc_##fn##6_8_neon##ext; \ @@ -344,6 +348,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth) NEON8_FNASSIGN(c->put_hevc_epel_bi, 0, 1, epel_bi_h,); NEON8_FNASSIGN(c->put_hevc_epel_bi, 1, 0, epel_bi_v,); NEON8_FNASSIGN(c->put_hevc_qpel_bi, 0, 0, pel_bi_pixels,); + NEON8_FNASSIGN(c->put_hevc_qpel_bi, 1, 0, qpel_bi_v,); NEON8_FNASSIGN(c->put_hevc_epel_uni, 0, 0, pel_uni_pixels,); NEON8_FNASSIGN(c->put_hevc_epel_uni, 1, 0, epel_uni_v,); NEON8_FNASSIGN(c->put_hevc_qpel_uni, 0, 0, pel_uni_pixels,); diff --git a/libavcodec/aarch64/hevcdsp_qpel_neon.S b/libavcodec/aarch64/hevcdsp_qpel_neon.S index 8e257208e4..f9422942d8 100644 --- a/libavcodec/aarch64/hevcdsp_qpel_neon.S +++ b/libavcodec/aarch64/hevcdsp_qpel_neon.S @@ -866,6 +866,254 @@ function ff_hevc_put_hevc_qpel_v64_8_neon, export=1 ret endfunc +function ff_hevc_put_hevc_qpel_bi_v4_8_neon, export=1 + load_qpel_filterb x7, x6 + sub x2, x2, x3, lsl #1 + sub x2, x2, x3 + mov x12, #(MAX_PB_SIZE * 2) + ld1 {v16.s}[0], [x2], x3 + ld1 {v17.s}[0], [x2], x3 + ld1 {v18.s}[0], [x2], x3 + ld1 {v19.s}[0], [x2], x3 + ld1 {v20.s}[0], [x2], x3 + ld1 {v21.s}[0], [x2], x3 + ld1 {v22.s}[0], [x2], x3 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().s}[0], [x2], x3 + movi v24.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + ld1 {v25.4h}, [x4], x12 // src2 + sqadd v24.8h, v24.8h, v25.8h + sqrshrun v25.8b, v24.8h, #7 + subs w5, w5, #1 + st1 {v25.s}[0], [x0], x1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_bi_v6_8_neon, export=1 + load_qpel_filterb x7, x6 + sub x2, x2, x3, lsl #1 + sub x2, x2, x3 + ld1 {v16.8b}, [x2], x3 + sub x1, x1, #4 + ld1 {v17.8b}, [x2], x3 + mov x12, #(MAX_PB_SIZE * 2) + ld1 {v18.8b}, [x2], x3 + ld1 {v19.8b}, [x2], x3 + ld1 {v20.8b}, [x2], x3 + ld1 {v21.8b}, [x2], x3 + ld1 {v22.8b}, [x2], x3 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().8b}, [x2], x3 + movi v24.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + ld1 {v25.8h}, [x4], x12 // src2 + sqadd v24.8h, v24.8h, v25.8h + sqrshrun v25.8b, v24.8h, #7 + st1 {v25.s}[0], [x0], #4 + subs w5, w5, #1 + st1 {v25.h}[2], [x0], x1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_bi_v8_8_neon, export=1 + load_qpel_filterb x7, x6 + sub x2, x2, x3, lsl #1 + sub x2, x2, x3 + mov x12, #(MAX_PB_SIZE * 2) + ld1 {v16.8b}, [x2], x3 + ld1 {v17.8b}, [x2], x3 + ld1 {v18.8b}, [x2], x3 + ld1 {v19.8b}, [x2], x3 + ld1 {v20.8b}, [x2], x3 + ld1 {v21.8b}, [x2], x3 + ld1 {v22.8b}, [x2], x3 + .macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().8b}, [x2], x3 + movi v24.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + ld1 {v25.8h}, [x4], x12 // src2 + sqadd v24.8h, v24.8h, v25.8h + sqrshrun v25.8b, v24.8h, #7 + subs w5, w5, #1 + st1 {v25.8b}, [x0], x1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_bi_v12_8_neon, export=1 + load_qpel_filterb x7, x6 + sub x2, x2, x3, lsl #1 + sub x2, x2, x3 + sub x1, x1, #8 + ld1 {v16.16b}, [x2], x3 + mov x12, #(MAX_PB_SIZE * 2) + ld1 {v17.16b}, [x2], x3 + ld1 {v18.16b}, [x2], x3 + ld1 {v19.16b}, [x2], x3 + ld1 {v20.16b}, [x2], x3 + ld1 {v21.16b}, [x2], x3 + ld1 {v22.16b}, [x2], x3 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().16b}, [x2], x3 + movi v24.8h, #0 + movi v25.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb2 v25, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + ld1 {v26.8h, v27.8h}, [x4], x12 // src2 + sqadd v24.8h, v24.8h, v26.8h + sqadd v25.8h, v25.8h, v27.8h + sqrshrun v26.8b, v24.8h, #7 + sqrshrun2 v26.16b, v25.8h, #7 + st1 {v26.8b}, [x0], #8 + subs w5, w5, #1 + st1 {v26.s}[2], [x0], x1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_bi_v16_8_neon, export=1 + load_qpel_filterb x7, x6 + sub x2, x2, x3, lsl #1 + sub x2, x2, x3 + mov x12, #(MAX_PB_SIZE * 2) + ld1 {v16.16b}, [x2], x3 + ld1 {v17.16b}, [x2], x3 + ld1 {v18.16b}, [x2], x3 + ld1 {v19.16b}, [x2], x3 + ld1 {v20.16b}, [x2], x3 + ld1 {v21.16b}, [x2], x3 + ld1 {v22.16b}, [x2], x3 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().16b}, [x2], x3 + movi v24.8h, #0 + movi v25.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb2 v25, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + ld1 {v26.8h, v27.8h}, [x4], x12 // src2 + sqadd v24.8h, v24.8h, v26.8h + sqadd v25.8h, v25.8h, v27.8h + sqrshrun v26.8b, v24.8h, #7 + subs w5, w5, #1 + sqrshrun2 v26.16b, v25.8h, #7 + st1 {v26.16b}, [x0], x1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_bi_v24_8_neon, export=1 + stp x4, x5, [sp, #-64]! + stp x2, x3, [sp, #16] + stp x0, x1, [sp, #32] + stp x7, x30, [sp, #48] + bl X(ff_hevc_put_hevc_qpel_bi_v16_8_neon) + ldp x2, x3, [sp, #16] + ldp x0, x1, [sp, #32] + ldr x7, [sp, #48] + ldp x4, x5, [sp], #48 + add x0, x0, #16 + add x2, x2, #16 + add x4, x4, #32 + bl X(ff_hevc_put_hevc_qpel_bi_v8_8_neon) + ldr x30, [sp, #8] + add sp, sp, #16 + ret +endfunc + +function ff_hevc_put_hevc_qpel_bi_v32_8_neon, export=1 + stp d8, d9, [sp, #-64]! + stp d10, d11, [sp, #16] + stp d12, d13, [sp, #32] + stp d14, d15, [sp, #48] + sub x2, x2, x3, lsl #1 + sub x2, x2, x3 + load_qpel_filterb x7, x6 + ldr w6, [sp, #64] + mov x12, #(MAX_PB_SIZE * 2) +0: mov x8, x2 // src + ld1 {v16.16b, v17.16b}, [x8], x3 + mov w11, w5 // height + ld1 {v18.16b, v19.16b}, [x8], x3 + mov x10, x0 // dst + ld1 {v20.16b, v21.16b}, [x8], x3 + mov x9, x4 // src2 + ld1 {v22.16b, v23.16b}, [x8], x3 + ld1 {v24.16b, v25.16b}, [x8], x3 + ld1 {v26.16b, v27.16b}, [x8], x3 + ld1 {v28.16b, v29.16b}, [x8], x3 +.macro calc tmp0, tmp1, src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11, src12, src13, src14, src15 + ld1 {\tmp0\().8h, \tmp1\().8h}, [x8], x3 + movi v8.8h, #0 + movi v9.8h, #0 + movi v10.8h, #0 + movi v11.8h, #0 + calc_qpelb v8, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb2 v9, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb v10, \src8, \src9, \src10, \src11, \src12, \src13, \src14, \src15 + calc_qpelb2 v11, \src8, \src9, \src10, \src11, \src12, \src13, \src14, \src15 + ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [x9], x12 // src2 + sqadd v8.8h, v8.8h, v12.8h + sqadd v9.8h, v9.8h, v13.8h + sqadd v10.8h, v10.8h, v14.8h + sqadd v11.8h, v11.8h, v15.8h + sqrshrun v12.8b, v8.8h, #7 + sqrshrun2 v12.16b, v9.8h, #7 + sqrshrun v13.8b, v10.8h, #7 + sqrshrun2 v13.16b, v11.8h, #7 + subs x11, x11, #1 + st1 {v12.16b, v13.16b}, [x10], x1 +.endm +1: calc_all2 +.purgem calc +2: add x0, x0, #32 // dst + add x2, x2, #32 // src + add x4, x4, #64 // src2 + subs w6, w6, #32 + b.ne 0b + ldp d10, d11, [sp, #16] + ldp d12, d13, [sp, #32] + ldp d14, d15, [sp, #48] + ldp d8, d9, [sp], #64 + ret +endfunc + +function ff_hevc_put_hevc_qpel_bi_v48_8_neon, export=1 + mov x8, #32 + str x8, [sp, #-80]! + stp x4, x5, [sp, #16] + stp x2, x3, [sp, #32] + stp x0, x1, [sp, #48] + stp x7, x30, [sp, #64] + bl X(ff_hevc_put_hevc_qpel_bi_v32_8_neon) + ldp x4, x5, [sp, #16] + ldp x2, x3, [sp, #32] + ldp x0, x1, [sp, #48] + ldr x7, [sp, #64] + add sp, sp, #64 + add x0, x0, #32 + add x2, x2, #32 + add x4, x4, #64