diff mbox series

[FFmpeg-devel,2/3] lavc/vp8dsp: R-V V loop_filter_inner

Message ID CAEa-L+uxNmaSTS2FsX_zf_To3BAgzedfxijgq6Qh02RW1jiWkQ@mail.gmail.com
State New
Headers show
Series [FFmpeg-devel,1/3] lavc/vp8dsp: R-V V loop_filter_simple | expand

Checks

Context Check Description
andriy/configure_x86 warning Failed to apply patch
yinshiyou/configure_loongarch64 warning Failed to apply patch

Commit Message

flow gg April 20, 2024, 3:55 p.m. UTC

diff mbox series

Patch

From c033ab8d30135dc02b09b1747c0761baefdcbb4a Mon Sep 17 00:00:00 2001
From: sunyuechi <sunyuechi@iscas.ac.cn>
Date: Sat, 20 Apr 2024 23:13:07 +0800
Subject: [PATCH 2/3] lavc/vp8dsp: R-V V loop_filter_inner

C908:
vp8_loop_filter8uv_inner_v_c: 738.2
vp8_loop_filter8uv_inner_v_rvv_i32: 455.2
vp8_loop_filter16y_inner_h_c: 685.0
vp8_loop_filter16y_inner_h_rvv_i32: 497.0
vp8_loop_filter16y_inner_v_c: 743.7
vp8_loop_filter16y_inner_v_rvv_i32: 295.7
---
 libavcodec/riscv/vp8dsp_init.c |   4 ++
 libavcodec/riscv/vp8dsp_rvv.S  | 110 +++++++++++++++++++++++++++++++++
 2 files changed, 114 insertions(+)

diff --git a/libavcodec/riscv/vp8dsp_init.c b/libavcodec/riscv/vp8dsp_init.c
index 46ca71ed04..aa95021df5 100644
--- a/libavcodec/riscv/vp8dsp_init.c
+++ b/libavcodec/riscv/vp8dsp_init.c
@@ -123,6 +123,10 @@  av_cold void ff_vp8dsp_init_riscv(VP8DSPContext *c)
             c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_rvv;
         }
 
+        c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_rvv;
+        c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_rvv;
+        c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_rvv;
+
         c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter16_simple_rvv;
         c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_rvv;
     }
diff --git a/libavcodec/riscv/vp8dsp_rvv.S b/libavcodec/riscv/vp8dsp_rvv.S
index 2eadfc5766..f10e269d9d 100644
--- a/libavcodec/riscv/vp8dsp_rvv.S
+++ b/libavcodec/riscv/vp8dsp_rvv.S
@@ -72,6 +72,13 @@  func ff_vp8_idct_dc_add4uv_rvv, zve32x
         ret
 endfunc
 
+.macro filter_abs dst diff fI
+        vneg.v          v8, \diff
+        vmax.vv         \dst, v8, \diff
+        vmsleu.vx       v8, \dst, \fI
+        vmand.mm        v27, v27, v8
+.endm
+
 .macro filter_fmin len a f1 p0f2 q0f1
 .ifc \len,16
         vsetvli         zero, zero, e16, m2, ta, ma
@@ -101,6 +108,16 @@  endfunc
         vle8.v          v11, (t4)
         vle8.v          v17, (t1)
         vle8.v          v22, (\dst)
+        .if \normal
+        sub             t3, t2, a6
+        sub             t0, t1, a6
+        add             t6, \dst, a6
+        add             a7, t4, a6
+        vle8.v          v2, (t3)
+        vle8.v          v15, (t0)
+        vle8.v          v10, (t6)
+        vle8.v          v14, (a7)
+        .endif
 .else
         addi            t1, \dst, -1
         addi            a6, \dst, -2
@@ -109,9 +126,28 @@  endfunc
         vlse8.v         v11, (t4), \stride
         vlse8.v         v17, (t1), \stride
         vlse8.v         v22, (\dst), \stride
+        .if \normal
+        addi            t5, \dst, -4
+        addi            t0, \dst, -3
+        addi            t6, \dst, 2
+        addi            a7, \dst, 3
+        vlse8.v         v2, (t5), \stride
+        vlse8.v         v15, (t0), \stride
+        vlse8.v         v10, (t6), \stride
+        vlse8.v         v14, (a7), \stride
+        .endif
 .endif
         vwsubu.vv       v12, v1, v11             // p1-q1
         vwsubu.vv       v24, v22, v17            // q0-p0
+
+.if \normal
+        vwsubu.vv       v30, v1, v17
+        vwsubu.vv       v20, v11, v22
+        vwsubu.vv       v28, v1, v15
+        vwsubu.vv       v4, v2, v15
+        vwsubu.vv       v6, v10, v11
+        vwsubu.vv       v2, v14, v10
+.endif
         vnclip.wi       v23, v12, 0
 
 .ifc \len,16
@@ -130,6 +166,26 @@  endfunc
         vmacc.vx        v18, a7, v8
         vmsleu.vx       v0, v18, \fE
 
+.if \normal
+        vneg.v          v18, v30
+        vmax.vv         v30, v18, v30
+        vmsleu.vx       v27, v30, \fI
+        filter_abs      v18 v28 \fI
+        filter_abs      v18 v4 \fI
+        filter_abs      v18 v6 \fI
+        filter_abs      v18 v2 \fI
+        filter_abs      v20 v20 \fI
+        vmand.mm        v27, v0, v27             // vp8_simple_limit && normal
+
+        vmsgtu.vx       v20, v20, \thresh        // hev
+        vmsgtu.vx       v3, v30, \thresh
+        vmor.mm         v3, v3, v20              // v3 = hev: > thresh
+        vzext.vf2       v18, v1                  // v18 = p1
+        vmand.mm        v0, v27, v3              // v0 = normal && hev
+        vzext.vf2       v20, v11                 // v12 = q1
+        vmnot.m         v3, v3                   // v3 = !hv
+.endif
+
         li              t5, 3
         li              a7, 124
         li              t3, 123
@@ -163,8 +219,62 @@  endfunc
         vsse8.v         v6, (\dst), \stride, v0.t
 .endif
 
+.if \normal
+        vmand.mm        v0, v27, v3              // vp8_normal_limit & !hv
+
+        .if \inner
+        vnclip.wi       v30, v30, 0
+        filter_fmin     \len v30 v24 v4 v6
+        vadd.vi         v24, v24, 1
+        vsra.vi         v24, v24, 1              // (f1 + 1) >> 1;
+        vadd.vv         v8, v18, v24
+        vsub.vv         v10, v20, v24
+        .endif
+
+        vmax.vx         v8, v8, zero
+        vmax.vx         v10, v10, zero
+.ifc \len,16
+        vsetvli         zero, zero, e8, m1, ta, ma
+.else
+        vsetvli         zero, zero, e8, mf2, ta, ma
+.endif
+        vnclipu.wi      v4, v4, 0
+        vnclipu.wi      v5, v6, 0
+        vnclipu.wi      v6, v8, 0
+        vnclipu.wi      v7, v10, 0
+        .ifc \type,v
+        vse8.v          v4, (t1), v0.t
+        vse8.v          v5, (\dst), v0.t
+        vse8.v          v6, (t2), v0.t
+        vse8.v          v7, (t4), v0.t
+        .else
+        vsse8.v         v4, (t1), \stride, v0.t
+        vsse8.v         v5, (\dst), \stride, v0.t
+        vsse8.v         v6, (a6), \stride, v0.t
+        vsse8.v         v7, (t4), \stride, v0.t
+        .endif
+.endif
 .endm
 
+func ff_vp8_h_loop_filter16_inner_rvv, zve32x
+        vsetivli        zero, 16, e8, m1, ta, ma
+        filter 16 h 1 1 a0 a1 a2 a3 a4
+        ret
+endfunc
+
+func ff_vp8_v_loop_filter16_inner_rvv, zve32x
+        vsetivli        zero, 16, e8, m1, ta, ma
+        filter 16 v 1 1 a0 a1 a2 a3 a4
+        ret
+endfunc
+
+func ff_vp8_v_loop_filter8uv_inner_rvv, zve32x
+        vsetivli        zero, 8, e8, mf2, ta, ma
+        filter 8 v 1 1 a0 a2 a3 a4 a5
+        filter 8 v 1 1 a1 a2 a3 a4 a5
+        ret
+endfunc
+
 func ff_vp8_v_loop_filter16_simple_rvv, zve32x
         vsetivli        zero, 16, e8, m1, ta, ma
         filter 16 v 0 0 a0 a1 a2 a3 a4
-- 
2.44.0