@@ -78,6 +78,13 @@ av_cold void ff_vp78dsp_init_riscv(VP8DSPContext *c)
c->put_vp8_bilinear_pixels_tab[2][1][2] = ff_put_vp8_bilin4_hv_rvv;
c->put_vp8_bilinear_pixels_tab[2][2][1] = ff_put_vp8_bilin4_hv_rvv;
c->put_vp8_bilinear_pixels_tab[2][2][2] = ff_put_vp8_bilin4_hv_rvv;
+
+ c->put_vp8_epel_pixels_tab[0][0][2] = ff_put_vp8_epel16_h6_rvv;
+ c->put_vp8_epel_pixels_tab[1][0][2] = ff_put_vp8_epel8_h6_rvv;
+ c->put_vp8_epel_pixels_tab[2][0][2] = ff_put_vp8_epel4_h6_rvv;
+ c->put_vp8_epel_pixels_tab[0][0][1] = ff_put_vp8_epel16_h4_rvv;
+ c->put_vp8_epel_pixels_tab[1][0][1] = ff_put_vp8_epel8_h4_rvv;
+ c->put_vp8_epel_pixels_tab[2][0][1] = ff_put_vp8_epel4_h4_rvv;
}
#endif
}
@@ -223,3 +223,108 @@ endfunc
func ff_put_vp8_bilin4_hv_rvv, zve32x
put_vp8_bilin_hv 4
endfunc
+
+const subpel_filters
+ .byte 0, -6, 123, 12, -1, 0
+ .byte 2, -11, 108, 36, -8, 1
+ .byte 0, -9, 93, 50, -6, 0
+ .byte 3, -16, 77, 77, -16, 3
+ .byte 0, -6, 50, 93, -9, 0
+ .byte 1, -8, 36, 108, -11, 2
+ .byte 0, -1, 12, 123, -6, 0
+endconst
+
+.macro epel_filter size
+ lla t2, subpel_filters
+ addi t0, a5, -1
+ li t1, 6
+ mul t0, t0, t1
+ add t0, t0, t2
+ .irp n 1,2,3,4
+ lb t\n, \n(t0)
+ .endr
+.ifc \size,6
+ lb t5, 5(t0)
+ lb t0, (t0)
+.endif
+.endm
+
+.macro epel_load dst len size
+ addi t6, a2, -1
+ addi a7, a2, 1
+ vle8.v v24, (a2)
+ vle8.v v22, (t6)
+ vle8.v v26, (a7)
+ addi a7, a7, 1
+ vle8.v v28, (a7)
+ vwmulu.vx v16, v24, t2
+ vwmulu.vx v20, v26, t3
+.ifc \size,6
+ addi t6, t6, -1
+ addi a7, a7, 1
+ vle8.v v24, (t6)
+ vle8.v v26, (a7)
+ vwmaccu.vx v16, t0, v24
+ vwmaccu.vx v16, t5, v26
+.endif
+ li t6, 64
+ vwmaccsu.vx v16, t1, v22
+ vwmaccsu.vx v16, t4, v28
+ vwadd.wx v16, v16, t6
+
+.ifc \len,4
+ vsetvli zero, zero, e16, mf2, ta, ma
+.elseif \len == 8
+ vsetvli zero, zero, e16, m1, ta, ma
+.else
+ vsetvli zero, zero, e16, m2, ta, ma
+.endif
+
+ vwadd.vv v24, v16, v20
+ vnsra.wi v24, v24, 7
+ vmax.vx v24, v24, zero
+.ifc \len,4
+ vsetvli zero, zero, e8, mf4, ta, ma
+.elseif \len == 8
+ vsetvli zero, zero, e8, mf2, ta, ma
+.else
+ vsetvli zero, zero, e8, m1, ta, ma
+.endif
+ vnclipu.wi \dst, v24, 0
+.endm
+
+.macro epel_load_inc dst len size
+ epel_load \dst \len \size
+ add a2, a2, a3
+.endm
+
+.macro epel len size
+ epel_filter \size
+
+.ifc \len,4
+ vsetivli zero, 4, e8, mf4, ta, ma
+.elseif \len == 8
+ vsetivli zero, 8, e8, mf2, ta, ma
+.else
+ vsetivli zero, 16, e8, m1, ta, ma
+.endif
+
+1:
+ addi a4, a4, -1
+ epel_load_inc v30 \len \size
+ vse8.v v30, (a0)
+ add a0, a0, a1
+ bnez a4, 1b
+
+ ret
+.endm
+
+.irp len 16,8,4
+func ff_put_vp8_epel\len\()_h6_rvv, zve32x
+ epel \len 6
+endfunc
+
+func ff_put_vp8_epel\len\()_h4_rvv, zve32x
+ epel \len 4
+endfunc
+.endr
From: sunyuechi <sunyuechi@iscas.ac.cn> C908: vp8_put_epel4_h4_c: 10.7 vp8_put_epel4_h4_rvv_i32: 5.0 vp8_put_epel4_h6_c: 15.0 vp8_put_epel4_h6_rvv_i32: 6.2 vp8_put_epel8_h4_c: 43.2 vp8_put_epel8_h4_rvv_i32: 11.2 vp8_put_epel8_h6_c: 57.5 vp8_put_epel8_h6_rvv_i32: 13.5 vp8_put_epel16_h4_c: 92.5 vp8_put_epel16_h4_rvv_i32: 13.7 vp8_put_epel16_h6_c: 139.0 vp8_put_epel16_h6_rvv_i32: 16.5 --- libavcodec/riscv/vp8dsp_init.c | 7 +++ libavcodec/riscv/vp8dsp_rvv.S | 105 +++++++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+)