diff mbox series

[FFmpeg-devel,10/10] lavc/vp8dsp: R-V V loop_filter

Message ID tencent_D56CA8965BC40F4515AFCD3561B98569F10A@qq.com
State New
Headers show
Series [FFmpeg-devel,01/10] lavc/vp8dsp: R-V V put_vp8_pixels | expand

Checks

Context Check Description
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

uk7b@foxmail.com May 4, 2024, 2:48 p.m. UTC
From: sunyuechi <sunyuechi@iscas.ac.cn>

C908:
vp8_loop_filter8uv_v_c: 745.5
vp8_loop_filter8uv_v_rvv_i32: 467.2
vp8_loop_filter16y_h_c: 674.2
vp8_loop_filter16y_h_rvv_i32: 553.0
vp8_loop_filter16y_v_c: 732.7
vp8_loop_filter16y_v_rvv_i32: 324.5
---
 libavcodec/riscv/vp8dsp_init.c |  4 +++
 libavcodec/riscv/vp8dsp_rvv.S  | 63 ++++++++++++++++++++++++++++++++++
 2 files changed, 67 insertions(+)
diff mbox series

Patch

diff --git a/libavcodec/riscv/vp8dsp_init.c b/libavcodec/riscv/vp8dsp_init.c
index aa95021df5..597e6acec8 100644
--- a/libavcodec/riscv/vp8dsp_init.c
+++ b/libavcodec/riscv/vp8dsp_init.c
@@ -123,6 +123,10 @@  av_cold void ff_vp8dsp_init_riscv(VP8DSPContext *c)
             c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_rvv;
         }
 
+        c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_rvv;
+        c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_rvv;
+        c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_rvv;
+
         c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_rvv;
         c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_rvv;
         c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_rvv;
diff --git a/libavcodec/riscv/vp8dsp_rvv.S b/libavcodec/riscv/vp8dsp_rvv.S
index f10e269d9d..af28ea5258 100644
--- a/libavcodec/riscv/vp8dsp_rvv.S
+++ b/libavcodec/riscv/vp8dsp_rvv.S
@@ -229,6 +229,39 @@  endfunc
         vsra.vi         v24, v24, 1              // (f1 + 1) >> 1;
         vadd.vv         v8, v18, v24
         vsub.vv         v10, v20, v24
+        .else
+        li              t5, 27
+        li              t3, 9
+        li              a7, 18
+        vwmul.vx        v2, v11, t5
+        vwmul.vx        v6, v11, t3
+        vwmul.vx        v4, v11, a7
+
+.ifc \len,16
+        vsetvli         zero, zero, e16, m2, ta, ma
+.else
+        vsetvli         zero, zero, e16, m1, ta, ma
+.endif
+
+        li              a7, 63
+        vzext.vf2       v14, v15                 // p2
+        vzext.vf2       v24, v10                 // q2
+        vadd.vx         v2, v2, a7
+        vadd.vx         v4, v4, a7
+        vadd.vx         v6, v6, a7
+        vsra.vi         v2, v2, 7                // a0
+        vsra.vi         v12, v4, 7               // a1
+        vsra.vi         v6, v6, 7                // a2
+        vadd.vv         v14, v14, v6             // p2 + a2
+        vsub.vv         v22, v24, v6             // q2 - a2
+        vsub.vv         v10, v20, v12            // q1 - a1
+        vadd.vv         v4, v8, v2               // p0 + a0
+        vsub.vv         v6, v16, v2              // q0 - a0
+        vadd.vv         v8, v12, v18             // a1 + p1
+        vmax.vx         v4, v4, zero
+        vmax.vx         v6, v6, zero
+        vmax.vx         v14, v14, zero
+        vmax.vx         v16, v22, zero
         .endif
 
         vmax.vx         v8, v8, zero
@@ -253,6 +286,17 @@  endfunc
         vsse8.v         v6, (a6), \stride, v0.t
         vsse8.v         v7, (t4), \stride, v0.t
         .endif
+        .if !\inner
+        vnclipu.wi      v14, v14, 0
+        vnclipu.wi      v16, v16, 0
+        .ifc \type,v
+        vse8.v          v14, (t0), v0.t
+        vse8.v          v16, (t6), v0.t
+        .else
+        vsse8.v         v14, (t0), \stride, v0.t
+        vsse8.v         v16, (t6), \stride, v0.t
+        .endif
+        .endif
 .endif
 .endm
 
@@ -275,6 +319,25 @@  func ff_vp8_v_loop_filter8uv_inner_rvv, zve32x
         ret
 endfunc
 
+func ff_vp8_v_loop_filter16_rvv, zve32x
+        vsetivli        zero, 16, e8, m1, ta, ma
+        filter 16 v 1 0 a0 a1 a2 a3 a4
+        ret
+endfunc
+
+func ff_vp8_h_loop_filter16_rvv, zve32x
+        vsetivli        zero, 16, e8, m1, ta, ma
+        filter 16 h 1 0 a0 a1 a2 a3 a4
+        ret
+endfunc
+
+func ff_vp8_v_loop_filter8uv_rvv, zve32x
+        vsetivli        zero, 8, e8, mf2, ta, ma
+        filter 8 v 1 0 a0 a2 a3 a4 a5
+        filter 8 v 1 0 a1 a2 a3 a4 a5
+        ret
+endfunc
+
 func ff_vp8_v_loop_filter16_simple_rvv, zve32x
         vsetivli        zero, 16, e8, m1, ta, ma
         filter 16 v 0 0 a0 a1 a2 a3 a4