From patchwork Sat Aug 26 08:49:38 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Logan.Lyu" X-Patchwork-Id: 43323 Delivered-To: ffmpegpatchwork2@gmail.com Received: by 2002:a05:6a20:7194:b0:149:dfde:5c0a with SMTP id s20csp355883pzb; Sat, 26 Aug 2023 01:49:58 -0700 (PDT) X-Google-Smtp-Source: AGHT+IHcsc96St+GKMB0C7bNwzI4GyWXO0krH85dJIRWZkHBAQEetA0gGLcatjHzFmyFK3AnQnxk X-Received: by 2002:a05:6402:648:b0:52a:8bb:4068 with SMTP id u8-20020a056402064800b0052a08bb4068mr15724831edx.29.1693039797939; Sat, 26 Aug 2023 01:49:57 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1693039797; cv=none; d=google.com; s=arc-20160816; b=XGvHfqBJVzmSwkyj1Frniw2umNNrnEVs0/exM54HBAA/NqM09kxQL3AVvDRR71HlG6 lniXE5qZxENEjCw9ok1E9iW9QYwOarXQkPEEdHJ7xY7HBYv9mxov+kuxNAX3BPFF2e22 Wy7G7sLNhFofswrIO455TuxHaIyGcW8MiDo9prlbFMWaKV67Emr0rEBpSGjHNoXGesu4 43fMjqIRO3CbnJPDE+VFTkkZ4Ay7l85jZ+3UXhWtvAinqm5YzkRoLT4krKg/hDaqFyoV 4jBvYs5CXD9+A4V+OSyG43ExjCj4RWNWI+pVWPxtxSY6djG/hZzNB/3WKHirlCU129JZ BLxQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=sender:errors-to:content-transfer-encoding:cc:reply-to :list-subscribe:list-help:list-post:list-archive:list-unsubscribe :list-id:precedence:subject:organization:to:from:user-agent :mime-version:date:message-id:delivered-to; bh=uJeRL7Dd1uGftpx3pOq4O5TtLRNDg50Vyjb9ogeE/Bg=; fh=+fqFbc7xIMnG/cSZsGs3FRxJ55tKLvNlnjRSUGhXbk0=; b=RAJWxwz8O8Iy9kAwCg7K0T+KepRMiINFa/ypiFLxPAfhJTlLRqZrC9Yv+uyVzTQJBk aBIyVu3YhfAqRUh/qjZx/Yh/3gqdE7NqSMsKnoBXr5liWT3ONgHzMYhFz7rSjdUTNhaO Q2zmNuqh7PTJiA1bgxIkF9eIi5PH2s92DYex2rjyXsC/nQ5F7Hxl96GfaEXYhRxCtu1d QJ5bohlZqf5XMXz2xpiKEDGwemnCGjT5NCkmzYLxLsJ8xllm33lH4L1YHWljgMQVplqF Hx1mQY6BVEjjowh+7QGpG3FEBOHINYz10FF+qp0QUopTUadf5Gc5r3Mw7qq+RN2UuJOY bIYQ== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Return-Path: Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org. [79.124.17.100]) by mx.google.com with ESMTP id j22-20020aa7c0d6000000b0052a08b8f4fbsi18489edp.269.2023.08.26.01.49.57; Sat, 26 Aug 2023 01:49:57 -0700 (PDT) Received-SPF: pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) client-ip=79.124.17.100; Authentication-Results: mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 5E6AC68C5A1; Sat, 26 Aug 2023 11:49:53 +0300 (EEST) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from smtp-my3-01p7.yunyou.top (smtp-my3-01p7.yunyou.top [60.247.169.7]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id 2DFE568AAB3 for ; Sat, 26 Aug 2023 11:49:46 +0300 (EEST) Received: from [192.168.15.105] (unknown [183.158.247.103]) by smtp-my-01.yunyou.top (WestCloudMail) with ESMTPA id BF335FE635; Sat, 26 Aug 2023 16:49:38 +0800 (CST) Message-ID: <33af9c88-c31d-e11e-58a3-7f9a05718c8f@myais.com.cn> Date: Sat, 26 Aug 2023 16:49:38 +0800 MIME-Version: 1.0 User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Thunderbird/102.14.0 From: "Logan.Lyu" To: ffmpeg-devel@ffmpeg.org Organization: myais Subject: [FFmpeg-devel] [PATCH 1/4] lavc/aarch64: new optimization for 8-bit hevc_epel_uni_v X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Cc: jb@videolan.org, jdek@itanimul.li Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" X-TUID: YwFGGsRf3Q4E checkasm bench: put_hevc_epel_uni_hv64_8_i8mm: 6568.7 put_hevc_epel_uni_v4_8_c: 88.7 put_hevc_epel_uni_v4_8_neon: 32.7 put_hevc_epel_uni_v6_8_c: 185.4 put_hevc_epel_uni_v6_8_neon: 44.9 put_hevc_epel_uni_v8_8_c: 333.9 put_hevc_epel_uni_v8_8_neon: 44.4 put_hevc_epel_uni_v12_8_c: 728.7 put_hevc_epel_uni_v12_8_neon: 119.7 put_hevc_epel_uni_v16_8_c: 1224.2 put_hevc_epel_uni_v16_8_neon: 139.7 put_hevc_epel_uni_v24_8_c: 2531.2 put_hevc_epel_uni_v24_8_neon: 329.9 put_hevc_epel_uni_v32_8_c: 4739.9 put_hevc_epel_uni_v32_8_neon: 562.7 put_hevc_epel_uni_v48_8_c: 10618.7 put_hevc_epel_uni_v48_8_neon: 1256.2 put_hevc_epel_uni_v64_8_c: 19169.9 put_hevc_epel_uni_v64_8_neon: 2179.2 Co-Authored-By: J. Dekker Signed-off-by: Logon Lyu --- libavcodec/aarch64/hevcdsp_epel_neon.S | 320 ++++++++++++++++++++++ libavcodec/aarch64/hevcdsp_init_aarch64.c | 5 + 2 files changed, 325 insertions(+) NEON8_FNASSIGN(c->put_hevc_qpel_uni_w, 0, 0, pel_uni_w_pixels,); diff --git a/libavcodec/aarch64/hevcdsp_epel_neon.S b/libavcodec/aarch64/hevcdsp_epel_neon.S index a8d694639b..7ce7eec829 100644 --- a/libavcodec/aarch64/hevcdsp_epel_neon.S +++ b/libavcodec/aarch64/hevcdsp_epel_neon.S @@ -32,6 +32,326 @@ const epel_filters, align=4 .byte -2, 10, 58, -2 endconst +.macro load_epel_filterb freg, xreg + movrel \xreg, epel_filters + add \xreg, \xreg, \freg, lsl #2 + ld4r {v0.16b, v1.16b, v2.16b, v3.16b}, [\xreg] // filter + neg v0.16b, v0.16b + neg v3.16b, v3.16b +.endm + +.macro calc_epelb dst, src0, src1, src2, src3 + umlsl \dst\().8h, \src0\().8b, v0.8b + umlal \dst\().8h, \src1\().8b, v1.8b + umlal \dst\().8h, \src2\().8b, v2.8b + umlsl \dst\().8h, \src3\().8b, v3.8b +.endm + +.macro calc_epelb2 dst, src0, src1, src2, src3 + umlsl2 \dst\().8h, \src0\().16b, v0.16b + umlal2 \dst\().8h, \src1\().16b, v1.16b + umlal2 \dst\().8h, \src2\().16b, v2.16b + umlsl2 \dst\().8h, \src3\().16b, v3.16b +.endm + +.macro calc_all4 + calc v16, v17, v18, v19 + b.eq 2f + calc v17, v18, v19, v16 + b.eq 2f + calc v18, v19, v16, v17 + b.eq 2f + calc v19, v16, v17, v18 + b.ne 1b +.endm + +.macro calc_all8 + calc v16, v17, v18, v19, v20, v21, v22, v23 + b.eq 2f + calc v18, v19, v20, v21, v22, v23, v16, v17 + b.eq 2f + calc v20, v21, v22, v23, v16, v17, v18, v19 + b.eq 2f + calc v22, v23, v16, v17, v18, v19, v20, v21 + b.ne 1b +.endm + +.macro calc_all12 + calc v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27 + b.eq 2f + calc v19, v20, v21, v22, v23, v24, v25, v26, v27, v16, v17, v18 + b.eq 2f + calc v22, v23, v24, v25, v26, v27, v16, v17, v18, v19, v20, v21 + b.eq 2f + calc v25, v26, v27, v16, v17, v18, v19, v20, v21, v22, v23, v24 + b.ne 1b +.endm + +.macro calc_all16 + calc v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31 + b.eq 2f + calc v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v16, v17, v18, v19 + b.eq 2f + calc v24, v25, v26, v27, v28, v29, v30, v31, v16, v17, v18, v19, v20, v21, v22, v23 + b.eq 2f + calc v28, v29, v30, v31, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27 + b.ne 1b +.endm + +function ff_hevc_put_hevc_epel_uni_v4_8_neon, export=1 + load_epel_filterb x6, x5 + sxtw x3, w3 + sxtw x1, w1 + sub x2, x2, x3 + ld1 {v16.s}[0], [x2], x3 + ld1 {v17.s}[0], [x2], x3 + ld1 {v18.s}[0], [x2], x3 +.macro calc src0, src1, src2, src3 + ld1 {\src3\().s}[0], [x2], x3 + movi v4.8h, #0 + calc_epelb v4, \src0, \src1, \src2, \src3 + sqrshrun v4.8b, v4.8h, #6 + subs w4, w4, #1 + st1 {v4.s}[0], [x0], x1 +.endm +1: calc_all4 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_v6_8_neon, export=1 + load_epel_filterb x6, x5 + sxtw x3, w3 + sxtw x1, w1 + sub x2, x2, x3 + sub x1, x1, #4 + ld1 {v16.8b}, [x2], x3 + ld1 {v17.8b}, [x2], x3 + ld1 {v18.8b}, [x2], x3 +.macro calc src0, src1, src2, src3 + ld1 {\src3\().8b}, [x2], x3 + movi v4.8h, #0 + calc_epelb v4, \src0, \src1, \src2, \src3 + sqrshrun v4.8b, v4.8h, #6 + st1 {v4.s}[0], [x0], #4 + subs w4, w4, #1 + st1 {v4.h}[2], [x0], x1 +.endm +1: calc_all4 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_v8_8_neon, export=1 + load_epel_filterb x6, x5 + sxtw x3, w3 + sxtw x1, w1 + sub x2, x2, x3 + ld1 {v16.8b}, [x2], x3 + ld1 {v17.8b}, [x2], x3 + ld1 {v18.8b}, [x2], x3 +.macro calc src0, src1, src2, src3 + ld1 {\src3\().8b}, [x2], x3 + movi v4.8h, #0 + calc_epelb v4, \src0, \src1, \src2, \src3 + sqrshrun v4.8b, v4.8h, #6 + subs w4, w4, #1 + st1 {v4.8b}, [x0], x1 +.endm +1: calc_all4 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_v12_8_neon, export=1 + load_epel_filterb x6, x5 + sxtw x3, w3 + sxtw x1, w1 + sub x2, x2, x3 + sub x1, x1, #8 + ld1 {v16.16b}, [x2], x3 + ld1 {v17.16b}, [x2], x3 + ld1 {v18.16b}, [x2], x3 +.macro calc src0, src1, src2, src3 + ld1 {\src3\().16b}, [x2], x3 + movi v4.8h, #0 + movi v5.8h, #0 + calc_epelb v4, \src0, \src1, \src2, \src3 + calc_epelb2 v5, \src0, \src1, \src2, \src3 + sqrshrun v4.8b, v4.8h, #6 + sqrshrun2 v4.16b, v5.8h, #6 + subs w4, w4, #1 + st1 {v4.8b}, [x0], #8 + st1 {v4.s}[2], [x0], x1 +.endm +1: calc_all4 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_v16_8_neon, export=1 + load_epel_filterb x6, x5 + sxtw x3, w3 + sxtw x1, w1 + sub x2, x2, x3 + ld1 {v16.16b}, [x2], x3 + ld1 {v17.16b}, [x2], x3 + ld1 {v18.16b}, [x2], x3 +.macro calc src0, src1, src2, src3 + ld1 {\src3\().16b}, [x2], x3 + movi v4.8h, #0 + movi v5.8h, #0 + calc_epelb v4, \src0, \src1, \src2, \src3 + calc_epelb2 v5, \src0, \src1, \src2, \src3 + sqrshrun v4.8b, v4.8h, #6 + sqrshrun2 v4.16b, v5.8h, #6 + subs w4, w4, #1 + st1 {v4.16b}, [x0], x1 +.endm +1: calc_all4 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_v24_8_neon, export=1 + load_epel_filterb x6, x5 + sxtw x3, w3 + sxtw x1, w1 + sub x2, x2, x3 + ld1 {v16.8b, v17.8b, v18.8b}, [x2], x3 + ld1 {v19.8b, v20.8b, v21.8b}, [x2], x3 + ld1 {v22.8b, v23.8b, v24.8b}, [x2], x3 +.macro calc src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11 + ld1 {\src9\().8b, \src10\().8b, \src11\().8b}, [x2], x3 + movi v4.8h, #0 + movi v5.8h, #0 + movi v6.8h, #0 + calc_epelb v4, \src0, \src3, \src6, \src9 + calc_epelb v5, \src1, \src4, \src7, \src10 + calc_epelb v6, \src2, \src5, \src8, \src11 + sqrshrun v4.8b, v4.8h, #6 + sqrshrun v5.8b, v5.8h, #6 + sqrshrun v6.8b, v6.8h, #6 + subs w4, w4, #1 + st1 {v4.8b-v6.8b}, [x0], x1 +.endm +1: calc_all12 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_v32_8_neon, export=1 + load_epel_filterb x6, x5 + sxtw x3, w3 + sxtw x1, w1 + sub x2, x2, x3 + ld1 {v16.16b, v17.16b}, [x2], x3 + ld1 {v18.16b, v19.16b}, [x2], x3 + ld1 {v20.16b, v21.16b}, [x2], x3 +.macro calc src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\src6\().16b, \src7\().16b}, [x2], x3 + movi v4.8h, #0 + movi v5.8h, #0 + movi v6.8h, #0 + movi v7.8h, #0 + calc_epelb v4, \src0, \src2, \src4, \src6 + calc_epelb2 v5, \src0, \src2, \src4, \src6 + calc_epelb v6, \src1, \src3, \src5, \src7 + calc_epelb2 v7, \src1, \src3, \src5, \src7 + sqrshrun v4.8b, v4.8h, #6 + sqrshrun2 v4.16b, v5.8h, #6 + sqrshrun v5.8b, v6.8h, #6 + sqrshrun2 v5.16b, v7.8h, #6 + subs w4, w4, #1 + st1 {v4.16b, v5.16b}, [x0], x1 +.endm +1: calc_all8 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_v48_8_neon, export=1 + load_epel_filterb x6, x5 + sxtw x3, w3 + sxtw x1, w1 + sub x2, x2, x3 + ld1 {v16.16b, v17.16b, v18.16b}, [x2], x3 + ld1 {v19.16b, v20.16b, v21.16b}, [x2], x3 + ld1 {v22.16b, v23.16b, v24.16b}, [x2], x3 +.macro calc src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11 + ld1 {\src9\().16b, \src10\().16b, \src11\().16b}, [x2], x3 + movi v4.8h, #0 + movi v5.8h, #0 + movi v6.8h, #0 + movi v7.8h, #0 + movi v28.8h, #0 + movi v29.8h, #0 + calc_epelb v4, \src0, \src3, \src6, \src9 + calc_epelb2 v5, \src0, \src3, \src6, \src9 + calc_epelb v6, \src1, \src4, \src7, \src10 + calc_epelb2 v7, \src1, \src4, \src7, \src10 + calc_epelb v28, \src2, \src5, \src8, \src11 + calc_epelb2 v29, \src2, \src5, \src8, \src11 + sqrshrun v4.8b, v4.8h, #6 + sqrshrun2 v4.16b, v5.8h, #6 + sqrshrun v5.8b, v6.8h, #6 + sqrshrun2 v5.16b, v7.8h, #6 + sqrshrun v6.8b, v28.8h, #6 + sqrshrun2 v6.16b, v29.8h, #6 + subs w4, w4, #1 + st1 {v4.16b, v5.16b, v6.16b}, [x0], x1 +.endm +1: calc_all12 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_v64_8_neon, export=1 + load_epel_filterb x6, x5 + sub sp, sp, #32 + sxtw x3, w3 + sxtw x1, w1 + st1 {v8.8b-v11.8b}, [sp] + sub x2, x2, x3 + ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x2], x3 + ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [x2], x3 + ld1 {v24.16b, v25.16b, v26.16b, v27.16b}, [x2], x3 +.macro calc src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11, src12, src13, src14, src15 + ld1 {\src12\().16b, \src13\().16b, \src14\().16b, \src15\().16b}, [x2], x3 + movi v4.8h, #0 + movi v5.8h, #0 + movi v6.8h, #0 + movi v7.8h, #0 + movi v8.8h, #0 + movi v9.8h, #0 + movi v10.8h, #0 + movi v11.8h, #0 + calc_epelb v10, \src3, \src7, \src11, \src15 + calc_epelb2 v11, \src3, \src7, \src11, \src15 + calc_epelb v4, \src0, \src4, \src8, \src12 + calc_epelb2 v5, \src0, \src4, \src8, \src12 + calc_epelb v6, \src1, \src5, \src9, \src13 + calc_epelb2 v7, \src1, \src5, \src9, \src13 + calc_epelb v8, \src2, \src6, \src10, \src14 + calc_epelb2 v9, \src2, \src6, \src10, \src14 + sqrshrun v4.8b, v4.8h, #6 + sqrshrun2 v4.16b, v5.8h, #6 + sqrshrun v5.8b, v6.8h, #6 + sqrshrun2 v5.16b, v7.8h, #6 + sqrshrun v6.8b, v8.8h, #6 + sqrshrun2 v6.16b, v9.8h, #6 + sqrshrun v7.8b, v10.8h, #6 + sqrshrun2 v7.16b, v11.8h, #6 + subs w4, w4, #1 + st1 {v4.16b, v5.16b, v6.16b, v7.16b}, [x0], x1 +.endm +1: calc_all16 +.purgem calc +2: ld1 {v8.8b-v11.8b}, [sp] + add sp, sp, #32 + ret +endfunc + #if HAVE_I8MM .macro EPEL_H_HEADER diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c b/libavcodec/aarch64/hevcdsp_init_aarch64.c index e125b0cfb2..f1e167c50b 100644 --- a/libavcodec/aarch64/hevcdsp_init_aarch64.c +++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c @@ -161,6 +161,10 @@ NEON8_FNPROTO(pel_uni_w_pixels, (uint8_t *_dst, ptrdiff_t _dststride, int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width),); +NEON8_FNPROTO(epel_uni_v, (uint8_t *dst, ptrdiff_t dststride, + const uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width),); + NEON8_FNPROTO(epel_uni_w_v, (uint8_t *_dst, ptrdiff_t _dststride, const uint8_t *_src, ptrdiff_t _srcstride, int height, int denom, int wx, int ox, @@ -285,6 +289,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth) c->put_hevc_qpel_bi[9][0][1] = ff_hevc_put_hevc_qpel_bi_h16_8_neon; NEON8_FNASSIGN(c->put_hevc_epel_uni, 0, 0, pel_uni_pixels,); + NEON8_FNASSIGN(c->put_hevc_epel_uni, 1, 0, epel_uni_v,); NEON8_FNASSIGN(c->put_hevc_qpel_uni, 0, 0, pel_uni_pixels,); NEON8_FNASSIGN(c->put_hevc_epel_uni_w, 0, 0, pel_uni_w_pixels,); From patchwork Sat Aug 26 08:49:42 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Logan.Lyu" X-Patchwork-Id: 43325 Delivered-To: ffmpegpatchwork2@gmail.com Received: by 2002:a05:6a20:7194:b0:149:dfde:5c0a with SMTP id s20csp356000pzb; Sat, 26 Aug 2023 01:50:22 -0700 (PDT) X-Google-Smtp-Source: AGHT+IF2bW9ViX3XQdW9zCVM2YPJ77up4SM8+DbCLWqUtvFVggf+xPElaj8s+RTyQN+jkRmShC02 X-Received: by 2002:a17:906:18b2:b0:99d:ed5e:cc79 with SMTP id c18-20020a17090618b200b0099ded5ecc79mr14038159ejf.31.1693039822592; Sat, 26 Aug 2023 01:50:22 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1693039822; cv=none; d=google.com; s=arc-20160816; b=f+Rjtn0MaDVdxb3bA3t5fTYW8oPuK2OTHBEWPUrqOGOmidAyu+q5mkg+MeLXQjpaU7 xGnFlgIY6JEpOcXziKNJ0hgz0U6NhPA5yG7AmcISItDUnvwAUoRVZJmmWfZobCWGi5nz 6NTZERckqtGSKleMlyJfQmT3Oa+XUBhJWon/Q4cvyJqY9eGT3Vge7nArPFZ1In4D5ru4 Ps9p/nK0XxFHLbQoqAGl1RXKN0Gsa/hvNkrcHaMLzeqfArND4XgIwHXcCZkSgRTWRod0 09f30UialDCvjPGGED1H1aleaYy7LFkfyF5JE1I0wj+E5VBV/JcFx3bgm0vbLRQOnPeG sqHA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=sender:errors-to:content-transfer-encoding:cc:reply-to :list-subscribe:list-help:list-post:list-archive:list-unsubscribe :list-id:precedence:subject:organization:to:from:user-agent :mime-version:date:message-id:delivered-to; bh=vGVZ56hfIiY6BXXbYprI3sAHeLVs95CatOWIj10n3pE=; fh=+fqFbc7xIMnG/cSZsGs3FRxJ55tKLvNlnjRSUGhXbk0=; b=zKuDUmaQjrHJAaEd8vIo/bgXK/3jOWcv3/khObEQgoFagpsLO2RT7V0RU2x3PuY3ye igbgTAbcLiQgp1/WV7ZTdg0hHhPiaZZJPVUDnkT8IThbWtkCr6zy226X2I0xY+mEdNAG baqZ36Lccr72A/jGy8PdYKAqoRCpVA3+2UxKBuYtG/ku9r5TKTrAmjsJcbxmeYPDlXL+ 6vK1cCSO2gth2Okxy+u//HQ/dWtrNOwYn9LpDxUPmNl/WRp5696X8NtyGV63weF6FH9i qXu/0p5HIGmEINYjglG7yUVbm4RAPWj/1m93p/7aa5hukyN+S2KLJd5K9j0O3OCJRcCB eBWA== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Return-Path: Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org. [79.124.17.100]) by mx.google.com with ESMTP id sa7-20020a170906eda700b0099d96df4cd9si1953868ejb.361.2023.08.26.01.50.22; Sat, 26 Aug 2023 01:50:22 -0700 (PDT) Received-SPF: pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) client-ip=79.124.17.100; Authentication-Results: mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 6687468C5DD; Sat, 26 Aug 2023 11:50:19 +0300 (EEST) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from smtp-my3-02p13.yunyou.top (smtp-my3-02p13.yunyou.top [60.247.169.13]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id 7E36B68AB90 for ; Sat, 26 Aug 2023 11:50:12 +0300 (EEST) Received: from [192.168.15.105] (unknown [183.158.247.103]) by smtp-my-02.yunyou.top (WestCloudMail) with ESMTPA id 6EF34DB00E; Sat, 26 Aug 2023 16:49:42 +0800 (CST) Message-ID: Date: Sat, 26 Aug 2023 16:49:42 +0800 MIME-Version: 1.0 User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Thunderbird/102.14.0 From: "Logan.Lyu" To: ffmpeg-devel@ffmpeg.org Organization: myais Subject: [FFmpeg-devel] [PATCH 2/4] lavc/aarch64: new optimization for 8-bit hevc_epel_uni_hv X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Cc: jb@videolan.org, jdek@itanimul.li Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" X-TUID: mF3RubujXlmt checkasm bench: put_hevc_epel_uni_hv4_8_c: 204.7 put_hevc_epel_uni_hv4_8_i8mm: 70.2 put_hevc_epel_uni_hv6_8_c: 378.2 put_hevc_epel_uni_hv6_8_i8mm: 131.9 put_hevc_epel_uni_hv8_8_c: 637.7 put_hevc_epel_uni_hv8_8_i8mm: 137.9 put_hevc_epel_uni_hv12_8_c: 1301.9 put_hevc_epel_uni_hv12_8_i8mm: 314.2 put_hevc_epel_uni_hv16_8_c: 2203.4 put_hevc_epel_uni_hv16_8_i8mm: 454.7 put_hevc_epel_uni_hv24_8_c: 4848.2 put_hevc_epel_uni_hv24_8_i8mm: 1065.2 put_hevc_epel_uni_hv32_8_c: 8517.4 put_hevc_epel_uni_hv32_8_i8mm: 1898.4 put_hevc_epel_uni_hv48_8_c: 19591.7 put_hevc_epel_uni_hv48_8_i8mm: 4107.2 put_hevc_epel_uni_hv64_8_c: 33880.2 put_hevc_epel_uni_hv64_8_i8mm: 6568.7 Co-Authored-By: J. Dekker Signed-off-by: Logon Lyu --- libavcodec/aarch64/hevcdsp_epel_neon.S | 344 ++++++++++++++++++++-- libavcodec/aarch64/hevcdsp_init_aarch64.c | 5 + 2 files changed, 328 insertions(+), 21 deletions(-) NEON8_FNASSIGN(c->put_hevc_qpel_uni_w, 0, 1, qpel_uni_w_h, _i8mm); diff --git a/libavcodec/aarch64/hevcdsp_epel_neon.S b/libavcodec/aarch64/hevcdsp_epel_neon.S index 7ce7eec829..4ad1b67081 100644 --- a/libavcodec/aarch64/hevcdsp_epel_neon.S +++ b/libavcodec/aarch64/hevcdsp_epel_neon.S @@ -54,6 +54,29 @@ endconst umlsl2 \dst\().8h, \src3\().16b, v3.16b .endm +.macro load_epel_filterh freg, xreg + movrel \xreg, epel_filters + add \xreg, \xreg, \freg, lsl #2 + ld1 {v0.8b}, [\xreg] + sxtl v0.8h, v0.8b +.endm + +.macro calc_epelh dst, src0, src1, src2, src3 + smull \dst\().4s, \src0\().4h, v0.h[0] + smlal \dst\().4s, \src1\().4h, v0.h[1] + smlal \dst\().4s, \src2\().4h, v0.h[2] + smlal \dst\().4s, \src3\().4h, v0.h[3] + sqshrn \dst\().4h, \dst\().4s, #6 +.endm + +.macro calc_epelh2 dst, tmp, src0, src1, src2, src3 + smull2 \tmp\().4s, \src0\().8h, v0.h[0] + smlal2 \tmp\().4s, \src1\().8h, v0.h[1] + smlal2 \tmp\().4s, \src2\().8h, v0.h[2] + smlal2 \tmp\().4s, \src3\().8h, v0.h[3] + sqshrn2 \dst\().8h, \tmp\().4s, #6 +.endm + .macro calc_all4 calc v16, v17, v18, v19 b.eq 2f @@ -696,6 +719,306 @@ function ff_hevc_put_hevc_epel_h64_8_neon_i8mm, export=1 ret endfunc +function ff_hevc_put_hevc_epel_uni_hv4_8_neon_i8mm, export=1 + add w10, w4, #3 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16] + stp x4, x6, [sp, #-32] + stp xzr, x30, [sp, #-48]! + add x0, sp, #48 + sub x1, x2, x3 + mov x2, x3 + add w3, w4, #3 + mov x4, x5 + bl X(ff_hevc_put_hevc_epel_h4_8_neon_i8mm) + ldp xzr, x30, [sp] + ldp x4, x6, [sp, #16] + ldp x0, x1, [sp, #32] + add sp, sp, #48 + load_epel_filterh x6, x5 + mov x10, #(MAX_PB_SIZE * 2) + ld1 {v16.4h}, [sp], x10 + ld1 {v17.4h}, [sp], x10 + ld1 {v18.4h}, [sp], x10 +.macro calc src0, src1, src2, src3 + ld1 {\src3\().4h}, [sp], x10 + calc_epelh v4, \src0, \src1, \src2, \src3 + sqrshrun v4.8b, v4.8h, #6 + subs w4, w4, #1 + st1 {v4.s}[0], [x0], x1 +.endm +1: calc_all4 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_hv6_8_neon_i8mm, export=1 + add w10, w4, #3 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16] + stp x4, x6, [sp, #-32] + stp xzr, x30, [sp, #-48]! + add x0, sp, #48 + sub x1, x2, x3 + mov x2, x3 + add w3, w4, #3 + mov x4, x5 + bl X(ff_hevc_put_hevc_epel_h6_8_neon_i8mm) + ldp xzr, x30, [sp] + ldp x4, x6, [sp, #16] + ldp x0, x1, [sp, #32] + add sp, sp, #48 + load_epel_filterh x6, x5 + sub x1, x1, #4 + mov x10, #(MAX_PB_SIZE * 2) + ld1 {v16.8h}, [sp], x10 + ld1 {v17.8h}, [sp], x10 + ld1 {v18.8h}, [sp], x10 +.macro calc src0, src1, src2, src3 + ld1 {\src3\().8h}, [sp], x10 + calc_epelh v4, \src0, \src1, \src2, \src3 + calc_epelh2 v4, v5, \src0, \src1, \src2, \src3 + sqrshrun v4.8b, v4.8h, #6 + st1 {v4.s}[0], [x0], #4 + subs w4, w4, #1 + st1 {v4.h}[2], [x0], x1 +.endm +1: calc_all4 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_hv8_8_neon_i8mm, export=1 + add w10, w4, #3 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16] + stp x4, x6, [sp, #-32] + stp xzr, x30, [sp, #-48]! + add x0, sp, #48 + sub x1, x2, x3 + mov x2, x3 + add w3, w4, #3 + mov x4, x5 + bl X(ff_hevc_put_hevc_epel_h8_8_neon_i8mm) + ldp xzr, x30, [sp] + ldp x4, x6, [sp, #16] + ldp x0, x1, [sp, #32] + add sp, sp, #48 + load_epel_filterh x6, x5 + mov x10, #(MAX_PB_SIZE * 2) + ld1 {v16.8h}, [sp], x10 + ld1 {v17.8h}, [sp], x10 + ld1 {v18.8h}, [sp], x10 +.macro calc src0, src1, src2, src3 + ld1 {\src3\().8h}, [sp], x10 + calc_epelh v4, \src0, \src1, \src2, \src3 + calc_epelh2 v4, v5, \src0, \src1, \src2, \src3 + sqrshrun v4.8b, v4.8h, #6 + subs w4, w4, #1 + st1 {v4.8b}, [x0], x1 +.endm +1: calc_all4 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_hv12_8_neon_i8mm, export=1 + add w10, w4, #3 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16] + stp x4, x6, [sp, #-32] + stp xzr, x30, [sp, #-48]! + add x0, sp, #48 + sub x1, x2, x3 + mov x2, x3 + add w3, w4, #3 + mov x4, x5 + bl X(ff_hevc_put_hevc_epel_h12_8_neon_i8mm) + ldp xzr, x30, [sp] + ldp x4, x6, [sp, #16] + ldp x0, x1, [sp, #32] + add sp, sp, #48 + load_epel_filterh x6, x5 + sub x1, x1, #8 + mov x10, #(MAX_PB_SIZE * 2) + ld1 {v16.8h, v17.8h}, [sp], x10 + ld1 {v18.8h, v19.8h}, [sp], x10 + ld1 {v20.8h, v21.8h}, [sp], x10 +.macro calc src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\src6\().8h, \src7\().8h}, [sp], x10 + calc_epelh v4, \src0, \src2, \src4, \src6 + calc_epelh2 v4, v5, \src0, \src2, \src4, \src6 + calc_epelh v5, \src1, \src3, \src5, \src7 + sqrshrun v4.8b, v4.8h, #6 + sqrshrun2 v4.16b, v5.8h, #6 + st1 {v4.8b}, [x0], #8 + st1 {v4.s}[2], [x0], x1 + subs w4, w4, #1 +.endm +1: calc_all8 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_hv16_8_neon_i8mm, export=1 + add w10, w4, #3 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16] + stp x4, x6, [sp, #-32] + stp xzr, x30, [sp, #-48]! + add x0, sp, #48 + sub x1, x2, x3 + mov x2, x3 + add w3, w4, #3 + mov x4, x5 + bl X(ff_hevc_put_hevc_epel_h16_8_neon_i8mm) + ldp xzr, x30, [sp] + ldp x4, x6, [sp, #16] + ldp x0, x1, [sp, #32] + add sp, sp, #48 + load_epel_filterh x6, x5 + mov x10, #(MAX_PB_SIZE * 2) + ld1 {v16.8h, v17.8h}, [sp], x10 + ld1 {v18.8h, v19.8h}, [sp], x10 + ld1 {v20.8h, v21.8h}, [sp], x10 +.macro calc src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\src6\().8h, \src7\().8h}, [sp], x10 + calc_epelh v4, \src0, \src2, \src4, \src6 + calc_epelh2 v4, v5, \src0, \src2, \src4, \src6 + calc_epelh v5, \src1, \src3, \src5, \src7 + calc_epelh2 v5, v6, \src1, \src3, \src5, \src7 + sqrshrun v4.8b, v4.8h, #6 + sqrshrun2 v4.16b, v5.8h, #6 + subs w4, w4, #1 + st1 {v4.16b}, [x0], x1 +.endm +1: calc_all8 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_hv24_8_neon_i8mm, export=1 + add w10, w4, #3 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16] + stp x4, x6, [sp, #-32] + stp xzr, x30, [sp, #-48]! + add x0, sp, #48 + sub x1, x2, x3 + mov x2, x3 + add w3, w4, #3 + mov x4, x5 + bl X(ff_hevc_put_hevc_epel_h24_8_neon_i8mm) + ldp xzr, x30, [sp] + ldp x4, x6, [sp, #16] + ldp x0, x1, [sp, #32] + add sp, sp, #48 + load_epel_filterh x6, x5 + mov x10, #(MAX_PB_SIZE * 2) + ld1 {v16.8h, v17.8h, v18.8h}, [sp], x10 + ld1 {v19.8h, v20.8h, v21.8h}, [sp], x10 + ld1 {v22.8h, v23.8h, v24.8h}, [sp], x10 +.macro calc src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11 + ld1 {\src9\().8h, \src10\().8h, \src11\().8h}, [sp], x10 + calc_epelh v4, \src0, \src3, \src6, \src9 + calc_epelh2 v4, v5, \src0, \src3, \src6, \src9 + calc_epelh v5, \src1, \src4, \src7, \src10 + calc_epelh2 v5, v6, \src1, \src4, \src7, \src10 + calc_epelh v6, \src2, \src5, \src8, \src11 + calc_epelh2 v6, v7, \src2, \src5, \src8, \src11 + sqrshrun v4.8b, v4.8h, #6 + sqrshrun v5.8b, v5.8h, #6 + sqrshrun v6.8b, v6.8h, #6 + subs w4, w4, #1 + st1 {v4.8b, v5.8b, v6.8b}, [x0], x1 +.endm +1: calc_all12 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_epel_uni_hv32_8_neon_i8mm, export=1 + stp x0, x30, [sp, #-16] + stp x1, x2, [sp, #-32] + stp x3, x4, [sp, #-48] + stp x5, x6, [sp, #-64]! + mov x7, #16 + bl X(ff_hevc_put_hevc_epel_uni_hv16_8_neon_i8mm) + ldp x5, x6, [sp] + ldp x3, x4, [sp, #16] + ldp x1, x2, [sp, #32] + ldr x0, [sp, #48] + add x0, x0, #16 + add x2, x2, #16 + mov x7, #16 + bl X(ff_hevc_put_hevc_epel_uni_hv16_8_neon_i8mm) + ldr x30, [sp, #56] + add sp, sp, #64 + ret +endfunc + +function ff_hevc_put_hevc_epel_uni_hv48_8_neon_i8mm, export=1 + stp x0, x30, [sp, #-16] + stp x1, x2, [sp, #-32] + stp x3, x4, [sp, #-48] + stp x5, x6, [sp, #-64]! + mov x7, #24 + bl X(ff_hevc_put_hevc_epel_uni_hv24_8_neon_i8mm) + ldp x5, x6, [sp] + ldp x3, x4, [sp, #16] + ldp x1, x2, [sp, #32] + ldr x0, [sp, #48] + add x0, x0, #24 + add x2, x2, #24 + mov x7, #24 + bl X(ff_hevc_put_hevc_epel_uni_hv24_8_neon_i8mm) + ldr x30, [sp, #56] + add sp, sp, #64 + ret +endfunc + +function ff_hevc_put_hevc_epel_uni_hv64_8_neon_i8mm, export=1 + stp x0, x30, [sp, #-16] + stp x1, x2, [sp, #-32] + stp x3, x4, [sp, #-48] + stp x5, x6, [sp, #-64]! + mov x7, #16 + bl X(ff_hevc_put_hevc_epel_uni_hv16_8_neon_i8mm) + ldp x5, x6, [sp] + ldp x3, x4, [sp, #16] + ldp x1, x2, [sp, #32] + ldr x0, [sp, #48] + add x0, x0, #16 + add x2, x2, #16 + mov x7, #16 + bl X(ff_hevc_put_hevc_epel_uni_hv16_8_neon_i8mm) + ldp x5, x6, [sp] + ldp x3, x4, [sp, #16] + ldp x1, x2, [sp, #32] + ldr x0, [sp, #48] + add x0, x0, #32 + add x2, x2, #32 + mov x7, #16 + bl X(ff_hevc_put_hevc_epel_uni_hv16_8_neon_i8mm) + ldp x5, x6, [sp] + ldp x3, x4, [sp, #16] + ldp x1, x2, [sp, #32] + ldr x0, [sp, #48] + add x0, x0, #48 + add x2, x2, #48 + mov x7, #16 + bl X(ff_hevc_put_hevc_epel_uni_hv16_8_neon_i8mm) + ldr x30, [sp, #56] + add sp, sp, #64 + ret +endfunc + .macro EPEL_UNI_W_H_HEADER ldr x12, [sp] sub x2, x2, #1 @@ -1138,28 +1461,7 @@ endfunc sqxtn2 v6.8h, v31.4s .endm -.macro calc_epelh dst, src0, src1, src2, src3 - smull \dst\().4s, \src0\().4h, v0.h[0] - smlal \dst\().4s, \src1\().4h, v0.h[1] - smlal \dst\().4s, \src2\().4h, v0.h[2] - smlal \dst\().4s, \src3\().4h, v0.h[3] - sqshrn \dst\().4h, \dst\().4s, #6 -.endm - -.macro calc_epelh2 dst, tmp, src0, src1, src2, src3 - smull2 \tmp\().4s, \src0\().8h, v0.h[0] - smlal2 \tmp\().4s, \src1\().8h, v0.h[1] - smlal2 \tmp\().4s, \src2\().8h, v0.h[2] - smlal2 \tmp\().4s, \src3\().8h, v0.h[3] - sqshrn2 \dst\().8h, \tmp\().4s, #6 -.endm -.macro load_epel_filterh freg, xreg - movrel \xreg, epel_filters - add \xreg, \xreg, \freg, lsl #2 - ld1 {v0.8b}, [\xreg] - sxtl v0.8h, v0.8b -.endm function ff_hevc_put_hevc_epel_uni_w_hv4_8_neon_i8mm, export=1 epel_uni_w_hv_start diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c b/libavcodec/aarch64/hevcdsp_init_aarch64.c index f1e167c50b..bf4e466af8 100644 --- a/libavcodec/aarch64/hevcdsp_init_aarch64.c +++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c @@ -165,6 +165,10 @@ NEON8_FNPROTO(epel_uni_v, (uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width),); +NEON8_FNPROTO(epel_uni_hv, (uint8_t *dst, ptrdiff_t _dststride, + const uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width), _i8mm); + NEON8_FNPROTO(epel_uni_w_v, (uint8_t *_dst, ptrdiff_t _dststride, const uint8_t *_src, ptrdiff_t _srcstride, int height, int denom, int wx, int ox, @@ -298,6 +302,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth) if (have_i8mm(cpu_flags)) { NEON8_FNASSIGN(c->put_hevc_epel, 0, 1, epel_h, _i8mm); + NEON8_FNASSIGN(c->put_hevc_epel_uni, 1, 1, epel_uni_hv, _i8mm); NEON8_FNASSIGN(c->put_hevc_epel_uni_w, 0, 1, epel_uni_w_h ,_i8mm); NEON8_FNASSIGN(c->put_hevc_qpel, 0, 1, qpel_h, _i8mm); From patchwork Sat Aug 26 08:49:46 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Logan.Lyu" X-Patchwork-Id: 43326 Delivered-To: ffmpegpatchwork2@gmail.com Received: by 2002:a05:6a20:7194:b0:149:dfde:5c0a with SMTP id s20csp356043pzb; Sat, 26 Aug 2023 01:50:31 -0700 (PDT) X-Google-Smtp-Source: AGHT+IFiiAwNRYjysgu1Q5SFh1ExY80QFGG/5By0YWSlSRnqeJjpqcQwPNJFMQr4jbQckQFe5Gss X-Received: by 2002:a17:906:73d8:b0:99c:e38d:a823 with SMTP id n24-20020a17090673d800b0099ce38da823mr15454015ejl.67.1693039831627; Sat, 26 Aug 2023 01:50:31 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1693039831; cv=none; d=google.com; s=arc-20160816; b=UT/lJ22vVxtkH30uszOTvQvaZbDquaa6cJI/Rlm+7wnPMCSROfh5L99LyFqZ4w/a7B 93kfejHIbQAelgOJLj6/p3LFfuwIOt8pgGfUkP8tlsANFEdBkJug42/6Lcf4udfIMnPO O/pR7gnVjtckSCl1ytlkP1Qz4Y8WggIpuZ5vHQMzmNR0EsV/riTCNtPT0+6ObeDw5a/S dfT0tRV4Tml4VvgKtA4mhkd8d7YTDK8B8aGavmz/IW5E6ZZ27NswQNiPobFTiwS/o9hG +zaqeTKoPkd+58zj+R7phVPyLm3S80W/K0qQeA1xOGDbjqy4WBR898Pbj+j5bfQaDZ5W 9oGA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=sender:errors-to:content-transfer-encoding:cc:reply-to :list-subscribe:list-help:list-post:list-archive:list-unsubscribe :list-id:precedence:subject:organization:to:from:user-agent :mime-version:date:message-id:delivered-to; bh=T0NIsGW+K/P4+98jJGzOudWs8pn6XM1h5FBTVGtU/QA=; fh=+fqFbc7xIMnG/cSZsGs3FRxJ55tKLvNlnjRSUGhXbk0=; b=miNqgGkOCuXh0h8bxhbOMe8fZr+aUQB4fbyd7XlQMlSxeIpYymW5itGZG66oyM1ACT bZpQXmYS/MnheCmDc/PxL1BhoX1bvNajsCw9s/idplSPc9NeuJAbeMCs0IrQQeL2ksj0 Bj5gW32SOgFJ1nwVp06/h0/chuQzdSJXzbf5BqenKsF4rPcIqXlVFKoWd6htZA+AEZPN aZ0h2loFzL3VxuMs/XUXxGCXYk++fmjfI4KhXVxRhfZ0pcNxh2Pf7pQwxEcESh1Wnb1Z oAQ+6qMyclgQ9GeU9AM0wSswCCSRRAqjw9YwBb0jvcxwK5mgRJj0teNpHvdR7L0ZiFbE 3mYA== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Return-Path: Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org. [79.124.17.100]) by mx.google.com with ESMTP id u5-20020a17090626c500b0098283e90548si23594ejc.570.2023.08.26.01.50.31; Sat, 26 Aug 2023 01:50:31 -0700 (PDT) Received-SPF: pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) client-ip=79.124.17.100; Authentication-Results: mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 7308A68C618; Sat, 26 Aug 2023 11:50:24 +0300 (EEST) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from smtp-my3-02p17.yunyou.top (smtp-my3-02p17.yunyou.top [60.247.169.17]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id BAB8168C579 for ; Sat, 26 Aug 2023 11:50:17 +0300 (EEST) Received: from [192.168.15.105] (unknown [183.158.247.103]) by smtp-my-02.yunyou.top (WestCloudMail) with ESMTPA id AD8AEDB017; Sat, 26 Aug 2023 16:49:46 +0800 (CST) Message-ID: <6702183e-e345-fcf5-76dd-f9844880dd00@myais.com.cn> Date: Sat, 26 Aug 2023 16:49:46 +0800 MIME-Version: 1.0 User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Thunderbird/102.14.0 From: "Logan.Lyu" To: ffmpeg-devel@ffmpeg.org Organization: myais Subject: [FFmpeg-devel] [PATCH 3/4] lavc/aarch64: new optimization for 8-bit hevc_qpel_uni_v X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Cc: jb@videolan.org, jdek@itanimul.li Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" X-TUID: Je3u0JE6zbjc checkasm bench: put_hevc_qpel_uni_v4_8_c: 146.2 put_hevc_qpel_uni_v4_8_neon: 43.2 put_hevc_qpel_uni_v6_8_c: 303.9 put_hevc_qpel_uni_v6_8_neon: 69.7 put_hevc_qpel_uni_v8_8_c: 495.2 put_hevc_qpel_uni_v8_8_neon: 74.7 put_hevc_qpel_uni_v12_8_c: 1100.9 put_hevc_qpel_uni_v12_8_neon: 222.4 put_hevc_qpel_uni_v16_8_c: 1955.2 put_hevc_qpel_uni_v16_8_neon: 269.2 put_hevc_qpel_uni_v24_8_c: 4571.9 put_hevc_qpel_uni_v24_8_neon: 832.4 put_hevc_qpel_uni_v32_8_c: 8226.4 put_hevc_qpel_uni_v32_8_neon: 1035.7 put_hevc_qpel_uni_v48_8_c: 18324.2 put_hevc_qpel_uni_v48_8_neon: 2321.2 put_hevc_qpel_uni_v64_8_c: 37659.4 put_hevc_qpel_uni_v64_8_neon: 4122.2 Co-Authored-By: J. Dekker Signed-off-by: Logon Lyu --- libavcodec/aarch64/hevcdsp_init_aarch64.c | 5 + libavcodec/aarch64/hevcdsp_qpel_neon.S | 232 ++++++++++++++++++++++ 2 files changed, 237 insertions(+) export=1 ret endfunc +.macro calc_all + calc v23, v16, v17, v18, v19, v20, v21, v22, v23 + b.eq 2f + calc v16, v17, v18, v19, v20, v21, v22, v23, v16 + b.eq 2f + calc v17, v18, v19, v20, v21, v22, v23, v16, v17 + b.eq 2f + calc v18, v19, v20, v21, v22, v23, v16, v17, v18 + b.eq 2f + calc v19, v20, v21, v22, v23, v16, v17, v18, v19 + b.eq 2f + calc v20, v21, v22, v23, v16, v17, v18, v19, v20 + b.eq 2f + calc v21, v22, v23, v16, v17, v18, v19, v20, v21 + b.eq 2f + calc v22, v23, v16, v17, v18, v19, v20, v21, v22 + b.hi 1b +.endm + +function ff_hevc_put_hevc_qpel_uni_v4_8_neon, export=1 + load_qpel_filterb x6, x5 + sub x2, x2, x3, lsl #1 + sub x2, x2, x3 + ldr s16, [x2] + ldr s17, [x2, x3] + add x2, x2, x3, lsl #1 + ldr s18, [x2] + ldr s19, [x2, x3] + add x2, x2, x3, lsl #1 + ldr s20, [x2] + ldr s21, [x2, x3] + add x2, x2, x3, lsl #1 + ldr s22, [x2] + add x2, x2, x3 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().s}[0], [x2], x3 + movi v24.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + sqrshrun v24.8b, v24.8h, #6 + subs w4, w4, #1 + st1 {v24.s}[0], [x0], x1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_uni_v6_8_neon, export=1 + load_qpel_filterb x6, x5 + sub x2, x2, x3, lsl #1 + sub x1, x1, #4 + sub x2, x2, x3 + ldr d16, [x2] + ldr d17, [x2, x3] + add x2, x2, x3, lsl #1 + ldr d18, [x2] + ldr d19, [x2, x3] + add x2, x2, x3, lsl #1 + ldr d20, [x2] + ldr d21, [x2, x3] + add x2, x2, x3, lsl #1 + ldr d22, [x2] + add x2, x2, x3 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().8b}, [x2], x3 + movi v24.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + sqrshrun v24.8b, v24.8h, #6 + st1 {v24.s}[0], [x0], #4 + subs w4, w4, #1 + st1 {v24.h}[2], [x0], x1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_uni_v8_8_neon, export=1 + load_qpel_filterb x6, x5 + sub x2, x2, x3, lsl #1 + sub x2, x2, x3 + ldr d16, [x2] + ldr d17, [x2, x3] + add x2, x2, x3, lsl #1 + ldr d18, [x2] + ldr d19, [x2, x3] + add x2, x2, x3, lsl #1 + ldr d20, [x2] + ldr d21, [x2, x3] + add x2, x2, x3, lsl #1 + ldr d22, [x2] + add x2, x2, x3 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().8b}, [x2], x3 + movi v24.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + sqrshrun v24.8b, v24.8h, #6 + subs w4, w4, #1 + st1 {v24.8b}, [x0], x1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_uni_v12_8_neon, export=1 + load_qpel_filterb x6, x5 + sub x2, x2, x3, lsl #1 + sub x1, x1, #8 + sub x2, x2, x3 +0: mov x8, x2 // src + mov w11, w4 // height + mov x10, x0 // dst + ldr q16, [x8] + ldr q17, [x8, x3] + add x8, x8, x3, lsl #1 + ldr q18, [x8] + ldr q19, [x8, x3] + add x8, x8, x3, lsl #1 + ldr q20, [x8] + ldr q21, [x8, x3] + add x8, x8, x3, lsl #1 + ldr q22, [x8] + add x8, x8, x3 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().16b}, [x8], x3 + movi v24.8h, #0 + movi v25.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb2 v25, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + sqrshrun v24.8b, v24.8h, #6 + sqrshrun2 v24.16b, v25.8h, #6 + st1 {v24.8b}, [x10], #8 + subs x11, x11, #1 + st1 {v24.s}[2], [x10], x1 +.endm +1: calc_all +.purgem calc +2: add x0, x0, #12 + add x2, x2, #12 + subs w7, w7, #12 + b.ne 0b + ret +endfunc + +function ff_hevc_put_hevc_qpel_uni_v16_8_neon, export=1 + load_qpel_filterb x6, x5 + sub x2, x2, x3, lsl #1 + sub x2, x2, x3 +0: mov x8, x2 // src + mov w11, w4 // height + mov x10, x0 // dst + ldr q16, [x8] + ldr q17, [x8, x3] + add x8, x8, x3, lsl #1 + ldr q18, [x8] + ldr q19, [x8, x3] + add x8, x8, x3, lsl #1 + ldr q20, [x8] + ldr q21, [x8, x3] + add x8, x8, x3, lsl #1 + ldr q22, [x8] + add x8, x8, x3 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().16b}, [x8], x3 + movi v24.8h, #0 + movi v25.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb2 v25, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + sqrshrun v24.8b, v24.8h, #6 + sqrshrun2 v24.16b, v25.8h, #6 + subs x11, x11, #1 + st1 {v24.16b}, [x10], x1 +.endm +1: calc_all +.purgem calc +2: add x0, x0, #16 + add x2, x2, #16 + subs w7, w7, #16 + b.ne 0b + ret +endfunc + +function ff_hevc_put_hevc_qpel_uni_v24_8_neon, export=1 + b X(ff_hevc_put_hevc_qpel_uni_v12_8_neon) +endfunc + +function ff_hevc_put_hevc_qpel_uni_v32_8_neon, export=1 + b X(ff_hevc_put_hevc_qpel_uni_v16_8_neon) +endfunc + +function ff_hevc_put_hevc_qpel_uni_v48_8_neon, export=1 + b X(ff_hevc_put_hevc_qpel_uni_v16_8_neon) +endfunc + +function ff_hevc_put_hevc_qpel_uni_v64_8_neon, export=1 + b X(ff_hevc_put_hevc_qpel_uni_v16_8_neon) +endfunc + function ff_hevc_put_hevc_pel_uni_w_pixels4_8_neon, export=1 mov w10, #-6 sub w10, w10, w5 diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c b/libavcodec/aarch64/hevcdsp_init_aarch64.c index bf4e466af8..6c1e8413eb 100644 --- a/libavcodec/aarch64/hevcdsp_init_aarch64.c +++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c @@ -192,6 +192,10 @@ NEON8_FNPROTO(qpel_h, (int16_t *dst, const uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my, int width), _i8mm); +NEON8_FNPROTO(qpel_uni_v, (uint8_t *dst, ptrdiff_t dststride, + const uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width),); + NEON8_FNPROTO(qpel_uni_w_h, (uint8_t *_dst, ptrdiff_t _dststride, const uint8_t *_src, ptrdiff_t _srcstride, int height, int denom, int wx, int ox, @@ -295,6 +299,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth) NEON8_FNASSIGN(c->put_hevc_epel_uni, 0, 0, pel_uni_pixels,); NEON8_FNASSIGN(c->put_hevc_epel_uni, 1, 0, epel_uni_v,); NEON8_FNASSIGN(c->put_hevc_qpel_uni, 0, 0, pel_uni_pixels,); + NEON8_FNASSIGN(c->put_hevc_qpel_uni, 1, 0, qpel_uni_v,); NEON8_FNASSIGN(c->put_hevc_epel_uni_w, 0, 0, pel_uni_w_pixels,); NEON8_FNASSIGN(c->put_hevc_qpel_uni_w, 0, 0, pel_uni_w_pixels,); NEON8_FNASSIGN(c->put_hevc_epel_uni_w, 1, 0, epel_uni_w_v,); diff --git a/libavcodec/aarch64/hevcdsp_qpel_neon.S b/libavcodec/aarch64/hevcdsp_qpel_neon.S index e38dff9645..317a0b32b5 100644 --- a/libavcodec/aarch64/hevcdsp_qpel_neon.S +++ b/libavcodec/aarch64/hevcdsp_qpel_neon.S @@ -44,6 +44,39 @@ endconst sxtl v0.8h, v0.8b .endm +.macro load_qpel_filterb freg, xreg + movrel \xreg, qpel_filters + add \xreg, \xreg, \freg, lsl #3 + ld4r {v0.16b, v1.16b, v2.16b, v3.16b}, [\xreg], #4 + ld4r {v4.16b, v5.16b, v6.16b, v7.16b}, [\xreg] + neg v0.16b, v0.16b + neg v2.16b, v2.16b + neg v5.16b, v5.16b + neg v7.16b, v7.16b +.endm + +.macro calc_qpelb dst, src0, src1, src2, src3, src4, src5, src6, src7 + umlsl \dst\().8h, \src0\().8b, v0.8b + umlal \dst\().8h, \src1\().8b, v1.8b + umlsl \dst\().8h, \src2\().8b, v2.8b + umlal \dst\().8h, \src3\().8b, v3.8b + umlal \dst\().8h, \src4\().8b, v4.8b + umlsl \dst\().8h, \src5\().8b, v5.8b + umlal \dst\().8h, \src6\().8b, v6.8b + umlsl \dst\().8h, \src7\().8b, v7.8b +.endm + +.macro calc_qpelb2 dst, src0, src1, src2, src3, src4, src5, src6, src7 + umlsl2 \dst\().8h, \src0\().16b, v0.16b + umlal2 \dst\().8h, \src1\().16b, v1.16b + umlsl2 \dst\().8h, \src2\().16b, v2.16b + umlal2 \dst\().8h, \src3\().16b, v3.16b + umlal2 \dst\().8h, \src4\().16b, v4.16b + umlsl2 \dst\().8h, \src5\().16b, v5.16b + umlal2 \dst\().8h, \src6\().16b, v6.16b + umlsl2 \dst\().8h, \src7\().16b, v7.16b +.endm + .macro put_hevc type .ifc \type, qpel // void put_hevc_qpel_h(int16_t *dst, @@ -595,6 +628,205 @@ function ff_hevc_put_hevc_pel_uni_pixels64_8_neon, From patchwork Sat Aug 26 08:49:51 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Logan.Lyu" X-Patchwork-Id: 43324 Delivered-To: ffmpegpatchwork2@gmail.com Received: by 2002:a05:6a20:7194:b0:149:dfde:5c0a with SMTP id s20csp355956pzb; Sat, 26 Aug 2023 01:50:09 -0700 (PDT) X-Google-Smtp-Source: AGHT+IHvdioJWOi1SlTd3J9oSxJB5xK07lg9QwI0YRZezfQX8NxdP9TDXryp3RdfgmF35oiZgizF X-Received: by 2002:a17:907:763a:b0:9a1:b144:30f4 with SMTP id jy26-20020a170907763a00b009a1b14430f4mr15134824ejc.14.1693039809293; Sat, 26 Aug 2023 01:50:09 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1693039809; cv=none; d=google.com; s=arc-20160816; b=z/hxhwPSOt0iHvd61g5ZmXnKcrr2E4RvRs0WgqjkPIFs9PAaz5lVU58gW4fhmx2mLj F94BdZMtIZeppPCk9qauRdCmupKIWBaDYvq3ME6TPq9CJWRfzdNfOG67Kt1PKVOuri8z ZIOvZvaTI203MmSVfvnSTrtOg7IVBr6rBoUxmooUsXpzdMfA7wjH/oPhzb78p8mNw0K0 AjROJHferBEDZ5+Vj9QicfhJTBFz6XDpLYcuQsG8+G0LLHU/+Thoyr9lL7SwIoCFXqK8 hirOKXz64ukV3jDfRU5Ym3vzjVizpTSjScWOSsLW2f+DPx8TPfE0bD2Sl/XIp0w+XJ8/ zQLQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=sender:errors-to:content-transfer-encoding:cc:reply-to :list-subscribe:list-help:list-post:list-archive:list-unsubscribe :list-id:precedence:subject:organization:to:from:user-agent :mime-version:date:message-id:delivered-to; bh=vjdyauhXPf+QkJg+Og2AJQ7XkmAWKZX/Yq8/Si2+1N0=; fh=+fqFbc7xIMnG/cSZsGs3FRxJ55tKLvNlnjRSUGhXbk0=; b=Bs8SLrXdf+eO8cQ5bRVO2YeYBHs/KbtZQfOqGPvy+qNSyI8+QvZ93vMTPeXZUS0KzL mBQozshHvBob5w/MzqxCw4ztKJlBxiA2/16eq1Wax5/oJC4S8Augb/f9hxMtedYKwSWl NrJ5eTkJ+xSyEwDI8Leak59ViFwX5MHljXqkSjdReJNriqTbJrbXRHALHPVBckMES5TZ 6HguvkxxPSXrfEAlETsyQg8AqAnx3QY+BA8ZGKj+LtXIXNFVScUciIzz97NFTsp7kjLP pVWpl/+vd4TY7VqjGa7jyNtKMtBR5WI4g0WabrR2/rwB14TxMCrHHOaiiJ1szYRyK+N6 Zz2A== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Return-Path: Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org. [79.124.17.100]) by mx.google.com with ESMTP id r18-20020a1709067fd200b0099b42c9082bsi9356ejs.508.2023.08.26.01.50.08; Sat, 26 Aug 2023 01:50:09 -0700 (PDT) Received-SPF: pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) client-ip=79.124.17.100; Authentication-Results: mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 5BD6868C5CE; Sat, 26 Aug 2023 11:50:05 +0300 (EEST) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from smtp-my3-05p48.yunyou.top (smtp-my3-05p48.yunyou.top [60.247.169.48]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id 3C02568C5A9 for ; Sat, 26 Aug 2023 11:49:58 +0300 (EEST) Received: from [192.168.15.105] (unknown [183.158.247.103]) by smtp-my-05.yunyou.top (WestCloudMail) with ESMTPA id 7CEBB87ED0; Sat, 26 Aug 2023 16:49:51 +0800 (CST) Message-ID: <8e8aedbc-77f0-576c-003d-36c8e3d865be@myais.com.cn> Date: Sat, 26 Aug 2023 16:49:51 +0800 MIME-Version: 1.0 User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Thunderbird/102.14.0 From: "Logan.Lyu" To: ffmpeg-devel@ffmpeg.org Organization: myais Subject: [FFmpeg-devel] [PATCH 4/4] lavc/aarch64: new optimization for 8-bit hevc_qpel_uni_hv X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Cc: jb@videolan.org, jdek@itanimul.li Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" X-TUID: m1Drt4Kyep9a checkasm bench: put_hevc_qpel_uni_hv4_8_c: 489.2 put_hevc_qpel_uni_hv4_8_i8mm: 105.7 put_hevc_qpel_uni_hv6_8_c: 852.7 put_hevc_qpel_uni_hv6_8_i8mm: 268.7 put_hevc_qpel_uni_hv8_8_c: 1345.7 put_hevc_qpel_uni_hv8_8_i8mm: 300.4 put_hevc_qpel_uni_hv12_8_c: 2757.4 put_hevc_qpel_uni_hv12_8_i8mm: 581.4 put_hevc_qpel_uni_hv16_8_c: 4458.9 put_hevc_qpel_uni_hv16_8_i8mm: 860.2 put_hevc_qpel_uni_hv24_8_c: 9582.2 put_hevc_qpel_uni_hv24_8_i8mm: 2086.7 put_hevc_qpel_uni_hv32_8_c: 16401.9 put_hevc_qpel_uni_hv32_8_i8mm: 3217.4 put_hevc_qpel_uni_hv48_8_c: 36402.4 put_hevc_qpel_uni_hv48_8_i8mm: 7082.7 put_hevc_qpel_uni_hv64_8_c: 62713.2 put_hevc_qpel_uni_hv64_8_i8mm: 12408.9 Co-Authored-By: J. Dekker Signed-off-by: Logon Lyu --- libavcodec/aarch64/hevcdsp_init_aarch64.c | 5 + libavcodec/aarch64/hevcdsp_qpel_neon.S | 366 ++++++++++++++++++++++ 2 files changed, 371 insertions(+) // void put_hevc_qpel_h(int16_t *dst, @@ -1530,6 +1569,333 @@ function ff_hevc_put_hevc_qpel_uni_w_v64_8_neon, export=1 endfunc #if HAVE_I8MM + +.macro calc_all2 + calc v30, v31, v16, v18, v20, v22, v24, v26, v28, v30, v17, v19, v21, v23, v25, v27, v29, v31 + b.eq 2f + calc v16, v17, v18, v20, v22, v24, v26, v28, v30, v16, v19, v21, v23, v25, v27, v29, v31, v17 + b.eq 2f + calc v18, v19, v20, v22, v24, v26, v28, v30, v16, v18, v21, v23, v25, v27, v29, v31, v17, v19 + b.eq 2f + calc v20, v21, v22, v24, v26, v28, v30, v16, v18, v20, v23, v25, v27, v29, v31, v17, v19, v21 + b.eq 2f + calc v22, v23, v24, v26, v28, v30, v16, v18, v20, v22, v25, v27, v29, v31, v17, v19, v21, v23 + b.eq 2f + calc v24, v25, v26, v28, v30, v16, v18, v20, v22, v24, v27, v29, v31, v17, v19, v21, v23, v25 + b.eq 2f + calc v26, v27, v28, v30, v16, v18, v20, v22, v24, v26, v29, v31, v17, v19, v21, v23, v25, v27 + b.eq 2f + calc v28, v29, v30, v16, v18, v20, v22, v24, v26, v28, v31, v17, v19, v21, v23, v25, v27, v29 + b.hi 1b +.endm + +function ff_hevc_put_hevc_qpel_uni_hv4_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16]! + stp x4, x6, [sp, #-16]! + sub x1, x2, x3, lsl #1 + stp x30, xzr, [sp, #-16]! + sub x1, x1, x3 + add x0, sp, #48 + mov x2, x3 + add x3, x4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h4_8_neon_i8mm) + ldp x30, xzr, [sp], #16 + ldp x4, x6, [sp], #16 + mov x9, #(MAX_PB_SIZE * 2) + ldp x0, x1, [sp], #16 + load_qpel_filterh x6, x5 + ldr d16, [sp] + ldr d17, [sp, x9] + add sp, sp, x9, lsl #1 + ldr d18, [sp] + ldr d19, [sp, x9] + add sp, sp, x9, lsl #1 + ldr d20, [sp] + ldr d21, [sp, x9] + add sp, sp, x9, lsl #1 + ldr d22, [sp] + add sp, sp, x9 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().4h}, [sp], x9 + calc_qpelh v1, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sqrshrn, #12 + sqxtun v1.8b, v1.8h + subs w4, w4, #1 + st1 {v1.s}[0], [x0], x1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_uni_hv6_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16]! + stp x4, x6, [sp, #-16]! + sub x1, x2, x3, lsl #1 + stp x30, xzr, [sp, #-16]! + sub x1, x1, x3 + add x0, sp, #48 + mov x2, x3 + add w3, w4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h6_8_neon_i8mm) + ldp x30, xzr, [sp], #16 + ldp x4, x6, [sp], #16 + mov x9, #(MAX_PB_SIZE * 2) + ldp x0, x1, [sp], #16 + load_qpel_filterh x6, x5 + sub x1, x1, #4 + ldr q16, [sp] + ldr q17, [sp, x9] + add sp, sp, x9, lsl #1 + ldr q18, [sp] + ldr q19, [sp, x9] + add sp, sp, x9, lsl #1 + ldr q20, [sp] + ldr q21, [sp, x9] + add sp, sp, x9, lsl #1 + ldr q22, [sp] + add sp, sp, x9 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().8h}, [sp], x9 + calc_qpelh v1, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sqrshrn, #12 + calc_qpelh2 v1, v2, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sqrshrn2, #12 + sqxtun v1.8b, v1.8h + st1 {v1.s}[0], [x0], #4 + subs w4, w4, #1 + st1 {v1.h}[2], [x0], x1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_uni_hv8_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16]! + stp x4, x6, [sp, #-16]! + sub x1, x2, x3, lsl #1 + stp x30, xzr, [sp, #-16]! + sub x1, x1, x3 + add x0, sp, #48 + mov x2, x3 + add w3, w4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h8_8_neon_i8mm) + ldp x30, xzr, [sp], #16 + ldp x4, x6, [sp], #16 + mov x9, #(MAX_PB_SIZE * 2) + ldp x0, x1, [sp], #16 + load_qpel_filterh x6, x5 + ldr q16, [sp] + ldr q17, [sp, x9] + add sp, sp, x9, lsl #1 + ldr q18, [sp] + ldr q19, [sp, x9] + add sp, sp, x9, lsl #1 + ldr q20, [sp] + ldr q21, [sp, x9] + add sp, sp, x9, lsl #1 + ldr q22, [sp] + add sp, sp, x9 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().8h}, [sp], x9 + calc_qpelh v1, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sqrshrn, #12 + calc_qpelh2 v1, v2, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sqrshrn2, #12 + sqxtun v1.8b, v1.8h + subs w4, w4, #1 + st1 {v1.8b}, [x0], x1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_uni_hv12_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16]! + stp x4, x6, [sp, #-16]! + sub x1, x2, x3, lsl #1 + stp x7, x30, [sp, #-16]! + sub x1, x1, x3 + mov x2, x3 + add x0, sp, #48 + add w3, w4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h12_8_neon_i8mm) + ldp x7, x30, [sp], #16 + ldp x4, x6, [sp], #16 + mov x9, #(MAX_PB_SIZE * 2) + ldp x0, x1, [sp], #16 + load_qpel_filterh x6, x5 + sub x1, x1, #8 + ld1 {v16.8h, v17.8h}, [sp], x9 + ld1 {v18.8h, v19.8h}, [sp], x9 + ld1 {v20.8h, v21.8h}, [sp], x9 + ld1 {v22.8h, v23.8h}, [sp], x9 + ld1 {v24.8h, v25.8h}, [sp], x9 + ld1 {v26.8h, v27.8h}, [sp], x9 + ld1 {v28.8h, v29.8h}, [sp], x9 +.macro calc tmp0, tmp1, src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11, src12, src13, src14, src15 + ld1 {\tmp0\().8h, \tmp1\().8h}, [sp], x9 + calc_qpelh v1, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sqrshrn, #12 + calc_qpelh2 v1, v2, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sqrshrn2, #12 + calc_qpelh v2, \src8, \src9, \src10, \src11, \src12, \src13, \src14, \src15, sqrshrn, #12 + sqxtun v1.8b, v1.8h + sqxtun2 v1.16b, v2.8h + st1 {v1.8b}, [x0], #8 + subs w4, w4, #1 + st1 {v1.s}[2], [x0], x1 +.endm +1: calc_all2 +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_uni_hv16_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16]! + stp x4, x6, [sp, #-16]! + stp x7, x30, [sp, #-16]! + add x0, sp, #48 + sub x1, x2, x3, lsl #1 + sub x1, x1, x3 + mov x2, x3 + add w3, w4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h16_8_neon_i8mm) + ldp x7, x30, [sp], #16 + ldp x4, x6, [sp], #16 + ldp x0, x1, [sp], #16 +.Lqpel_uni_hv16_loop: + mov x9, #(MAX_PB_SIZE * 2) + load_qpel_filterh x6, x5 + sub w12, w9, w7, lsl #1 +0: mov x8, sp // src + ld1 {v16.8h, v17.8h}, [x8], x9 + mov w11, w4 // height + ld1 {v18.8h, v19.8h}, [x8], x9 + mov x10, x0 // dst + ld1 {v20.8h, v21.8h}, [x8], x9 + ld1 {v22.8h, v23.8h}, [x8], x9 + ld1 {v24.8h, v25.8h}, [x8], x9 + ld1 {v26.8h, v27.8h}, [x8], x9 + ld1 {v28.8h, v29.8h}, [x8], x9 +.macro calc tmp0, tmp1, src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11, src12, src13, src14, src15 + ld1 {\tmp0\().8h, \tmp1\().8h}, [x8], x9 + calc_qpelh v1, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sqrshrn, #12 + calc_qpelh2 v1, v2, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sqrshrn2, #12 + calc_qpelh v2, \src8, \src9, \src10, \src11, \src12, \src13, \src14, \src15, sqrshrn, #12 + calc_qpelh2 v2, v3, \src8, \src9, \src10, \src11, \src12, \src13, \src14, \src15, sqrshrn2, #12 + sqxtun v1.8b, v1.8h + subs x11, x11, #1 + sqxtun2 v1.16b, v2.8h + st1 {v1.16b}, [x10], x1 +.endm +1: calc_all2 +.purgem calc +2: add x0, x0, #16 + add sp, sp, #32 + subs w7, w7, #16 + b.ne 0b + add w10, w4, #6 + add sp, sp, x12 // discard rest of first line + lsl x10, x10, #7 + add sp, sp, x10 // tmp_array without first line + ret +endfunc + +function ff_hevc_put_hevc_qpel_uni_hv24_8_neon_i8mm, export=1 + stp x6, x30, [sp, #-16]! + mov x7, #16 + stp x0, x1, [sp, #-16]! + stp x2, x3, [sp, #-16]! + stp x4, x5, [sp, #-16]! + bl X(ff_hevc_put_hevc_qpel_uni_hv16_8_neon_i8mm) + ldp x4, x5, [sp], #16 + ldp x2, x3, [sp], #16 + add x2, x2, #16 + ldp x0, x1, [sp], #16 + mov x7, #8 + add x0, x0, #16 + ldr x6, [sp] + bl X(ff_hevc_put_hevc_qpel_uni_hv8_8_neon_i8mm) + ldp xzr, x30, [sp], #16 + ret +endfunc + +function ff_hevc_put_hevc_qpel_uni_hv32_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16]! + stp x4, x6, [sp, #-16]! + stp x7, x30, [sp, #-16]! + sub x1, x2, x3, lsl #1 + add x0, sp, #48 + sub x1, x1, x3 + mov x2, x3 + add w3, w4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h32_8_neon_i8mm) + ldp x7, x30, [sp], #16 + ldp x4, x6, [sp], #16 + ldp x0, x1, [sp], #16 + b .Lqpel_uni_hv16_loop +endfunc + +function ff_hevc_put_hevc_qpel_uni_hv48_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16]! + stp x4, x6, [sp, #-16]! + sub x1, x2, x3, lsl #1 + stp x7, x30, [sp, #-16]! + sub x1, x1, x3 + mov x2, x3 + add x0, sp, #48 + add w3, w4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h48_8_neon_i8mm) + ldp x7, x30, [sp], #16 + ldp x4, x6, [sp], #16 + ldp x0, x1, [sp], #16 + b .Lqpel_uni_hv16_loop +endfunc + +function ff_hevc_put_hevc_qpel_uni_hv64_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x0, x1, [sp, #-16]! + stp x4, x6, [sp, #-16]! + stp x7, x30, [sp, #-16]! + add x0, sp, #48 + sub x1, x2, x3, lsl #1 + mov x2, x3 + sub x1, x1, x3 + add w3, w4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h64_8_neon_i8mm) + ldp x7, x30, [sp], #16 + ldp x4, x6, [sp], #16 + ldp x0, x1, [sp], #16 + b .Lqpel_uni_hv16_loop +endfunc + .macro QPEL_UNI_W_H_HEADER ldr x12, [sp] sub x2, x2, #3 diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c b/libavcodec/aarch64/hevcdsp_init_aarch64.c index 6c1e8413eb..782cf802f6 100644 --- a/libavcodec/aarch64/hevcdsp_init_aarch64.c +++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c @@ -196,6 +196,10 @@ NEON8_FNPROTO(qpel_uni_v, (uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width),); +NEON8_FNPROTO(qpel_uni_hv, (uint8_t *dst, ptrdiff_t dststride, + const uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width), _i8mm); + NEON8_FNPROTO(qpel_uni_w_h, (uint8_t *_dst, ptrdiff_t _dststride, const uint8_t *_src, ptrdiff_t _srcstride, int height, int denom, int wx, int ox, @@ -310,6 +314,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth) NEON8_FNASSIGN(c->put_hevc_epel_uni, 1, 1, epel_uni_hv, _i8mm); NEON8_FNASSIGN(c->put_hevc_epel_uni_w, 0, 1, epel_uni_w_h ,_i8mm); NEON8_FNASSIGN(c->put_hevc_qpel, 0, 1, qpel_h, _i8mm); + NEON8_FNASSIGN(c->put_hevc_qpel_uni, 1, 1, qpel_uni_hv, _i8mm); NEON8_FNASSIGN(c->put_hevc_qpel_uni_w, 0, 1, qpel_uni_w_h, _i8mm); NEON8_FNASSIGN(c->put_hevc_epel_uni_w, 1, 1, epel_uni_w_hv, _i8mm); NEON8_FNASSIGN_PARTIAL_5(c->put_hevc_qpel_uni_w, 1, 1, qpel_uni_w_hv, _i8mm); diff --git a/libavcodec/aarch64/hevcdsp_qpel_neon.S b/libavcodec/aarch64/hevcdsp_qpel_neon.S index 317a0b32b5..e4525c7924 100644 --- a/libavcodec/aarch64/hevcdsp_qpel_neon.S +++ b/libavcodec/aarch64/hevcdsp_qpel_neon.S @@ -77,6 +77,45 @@ endconst umlsl2 \dst\().8h, \src7\().16b, v7.16b .endm +.macro load_qpel_filterh freg, xreg + movrel \xreg, qpel_filters + add \xreg, \xreg, \freg, lsl #3 + ld1 {v0.8b}, [\xreg] + sxtl v0.8h, v0.8b +.endm + +.macro calc_qpelh dst, src0, src1, src2, src3, src4, src5, src6, src7, op, shift=6 + smull \dst\().4s, \src0\().4h, v0.h[0] + smlal \dst\().4s, \src1\().4h, v0.h[1] + smlal \dst\().4s, \src2\().4h, v0.h[2] + smlal \dst\().4s, \src3\().4h, v0.h[3] + smlal \dst\().4s, \src4\().4h, v0.h[4] + smlal \dst\().4s, \src5\().4h, v0.h[5] + smlal \dst\().4s, \src6\().4h, v0.h[6] + smlal \dst\().4s, \src7\().4h, v0.h[7] +.ifc \op, sshr + sshr \dst\().4s, \dst\().4s, \shift +.else + \op \dst\().4h, \dst\().4s, \shift +.endif +.endm + +.macro calc_qpelh2 dst, dstt, src0, src1, src2, src3, src4, src5, src6, src7, op, shift=6 + smull2 \dstt\().4s, \src0\().8h, v0.h[0] + smlal2 \dstt\().4s, \src1\().8h, v0.h[1] + smlal2 \dstt\().4s, \src2\().8h, v0.h[2] + smlal2 \dstt\().4s, \src3\().8h, v0.h[3] + smlal2 \dstt\().4s, \src4\().8h, v0.h[4] + smlal2 \dstt\().4s, \src5\().8h, v0.h[5] + smlal2 \dstt\().4s, \src6\().8h, v0.h[6] + smlal2 \dstt\().4s, \src7\().8h, v0.h[7] +.ifc \op, sshr + sshr \dst\().4s, \dstt\().4s, \shift +.else + \op \dst\().8h, \dstt\().4s, \shift +.endif +.endm + .macro put_hevc type .ifc \type, qpel