diff mbox series

[FFmpeg-devel] lavc/vc1dsp: R-V V vc1_inv_trans_8x8

Message ID 20240603153008.21229-1-remi@remlab.net
State New
Headers show
Series [FFmpeg-devel] lavc/vc1dsp: R-V V vc1_inv_trans_8x8 | expand

Checks

Context Check Description
yinshiyou/make_loongarch64 success Make finished
yinshiyou/make_fate_loongarch64 success Make fate finished
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Rémi Denis-Courmont June 3, 2024, 3:30 p.m. UTC
T-Head C908:
vc1dsp.vc1_inv_trans_8x8_c:      14.7
vc1dsp.vc1_inv_trans_8x8_rvv_i32: 4.7
---
 libavcodec/riscv/vc1dsp_init.c |   2 +
 libavcodec/riscv/vc1dsp_rvv.S  | 112 +++++++++++++++++++++++++++++++++
 2 files changed, 114 insertions(+)
diff mbox series

Patch

diff --git a/libavcodec/riscv/vc1dsp_init.c b/libavcodec/riscv/vc1dsp_init.c
index e4838fb347..b8a1015ce5 100644
--- a/libavcodec/riscv/vc1dsp_init.c
+++ b/libavcodec/riscv/vc1dsp_init.c
@@ -26,6 +26,7 @@ 
 #include "libavcodec/vc1.h"
 
 void ff_vc1_inv_trans_8x8_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_8x8_rvv(int16_t block[64]);
 void ff_vc1_inv_trans_4x8_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
 void ff_vc1_inv_trans_8x4_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
 void ff_vc1_inv_trans_4x4_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
@@ -53,6 +54,7 @@  av_cold void ff_vc1dsp_init_riscv(VC1DSPContext *dsp)
 # if HAVE_RVV
     if (flags & AV_CPU_FLAG_RVV_I32) {
         if (ff_rv_vlen_least(128)) {
+            dsp->vc1_inv_trans_8x8 = ff_vc1_inv_trans_8x8_rvv;
             dsp->vc1_inv_trans_4x8_dc = ff_vc1_inv_trans_4x8_dc_rvv;
             dsp->vc1_inv_trans_4x4_dc = ff_vc1_inv_trans_4x4_dc_rvv;
             dsp->avg_vc1_mspel_pixels_tab[0][0] = ff_avg_pixels16x16_rvv;
diff --git a/libavcodec/riscv/vc1dsp_rvv.S b/libavcodec/riscv/vc1dsp_rvv.S
index 8b3a830a4a..7a78241925 100644
--- a/libavcodec/riscv/vc1dsp_rvv.S
+++ b/libavcodec/riscv/vc1dsp_rvv.S
@@ -113,6 +113,118 @@  func ff_vc1_inv_trans_4x4_dc_rvv, zve32x
         ret
 endfunc
 
+        .variant_cc ff_vc1_inv_trans_8_rvv
+func ff_vc1_inv_trans_8_rvv, zve32x
+        li       t4, 12
+        vsll.vi  v18, v6, 4
+        li       t2, 6
+        vmul.vx  v8, v0, t4
+        li       t5, 15
+        vmul.vx  v10, v4, t4
+        li       t3, 9
+        # t[2..5] = [6 9 12 15]
+        vsll.vi  v12, v2, 4
+        vmul.vx  v14, v6, t2
+        vmul.vx  v16, v2, t2
+        vadd.vx  v8, v8, t1    # +4 or +64
+        vadd.vv  v26, v12, v14 # t3
+        vadd.vv  v24, v8, v10  # t1
+        vsub.vv  v25, v8, v10  # t2
+        vsub.vv  v27, v16, v18 # t4
+        vadd.vv  v28, v24, v26 # t5
+        vsub.vv  v31, v24, v26 # t8
+        vadd.vv  v29, v25, v27 # t6
+        vsub.vv  v30, v25, v27 # t7
+        vsll.vi  v20, v1, 4
+        vmul.vx  v21, v3, t5
+        vmul.vx  v22, v5, t3
+        vsll.vi  v23, v7, 2
+        vadd.vv  v20, v20, v21
+        vadd.vv  v22, v22, v23
+        vsll.vi  v21, v3, 2
+        vadd.vv  v16, v20, v22 # t1
+        vmul.vx  v20, v1, t5
+        vsll.vi  v22, v5, 4
+        vmul.vx  v23, v7, t3
+        vsub.vv  v20, v20, v21
+        vadd.vv  v22, v22, v23
+        vsll.vi  v21, v3, 4
+        vsub.vv  v17, v20, v22 # t2
+        vmul.vx  v20, v1, t3
+        vsll.vi  v22, v5, 2
+        vmul.vx  v23, v7, t5
+        vsub.vv  v20, v20, v21
+        vadd.vv  v22, v22, v23
+        vmul.vx  v21, v3, t3
+        vadd.vv  v18, v20, v22 # t3
+        vsll.vi  v20, v1, 2
+        vmul.vx  v22, v5, t5
+        vsll.vi  v23, v7, 4
+        vsub.vv  v20, v20, v21
+        vsub.vv  v22, v22, v23
+        vadd.vv  v0, v28, v16
+        vadd.vv  v19, v20, v22 # t4
+        vadd.vv  v1, v29, v17
+        vadd.vv  v2, v30, v18
+        vadd.vv  v3, v31, v19
+        vsub.vv  v4, v31, v19
+        vsub.vv  v5, v30, v18
+        vsub.vv  v6, v29, v17
+        vsub.vv  v7, v28, v16
+        jr       t0
+endfunc
+
+func ff_vc1_inv_trans_8x8_rvv, zve32x
+        vsetivli zero, 8, e16, m1, ta, ma
+        addi     a1, a0, 1 * 8 * 2
+        vle16.v  v0, (a0)
+        addi     a2, a0, 2 * 8 * 2
+        vle16.v  v1, (a1)
+        addi     a3, a0, 3 * 8 * 2
+        vle16.v  v2, (a2)
+        addi     a4, a0, 4 * 8 * 2
+        vle16.v  v3, (a3)
+        addi     a5, a0, 5 * 8 * 2
+        vle16.v  v4, (a4)
+        addi     a6, a0, 6 * 8 * 2
+        vle16.v  v5, (a5)
+        addi     a7, a0, 7 * 8 * 2
+        vle16.v  v6, (a6)
+        li       t1, 4
+        vle16.v  v7, (a7)
+        jal      t0, ff_vc1_inv_trans_8_rvv
+        .irp n,0,1,2,3,4,5,6,7
+        vsra.vi  v\n, v\n, 3
+        .endr
+        vsseg8e16.v v0, (a0)
+        li       t1, 64
+        .irp n,0,1,2,3,4,5,6,7
+        vle16.v v\n, (a\n)
+        .endr
+        jal      t0, ff_vc1_inv_trans_8_rvv
+        vadd.vi  v4, v4, 1
+        vadd.vi  v5, v5, 1
+        vsra.vi  v4, v4, 7
+        vsra.vi  v5, v5, 7
+        vse16.v  v4, (a4)
+        vadd.vi  v6, v6, 1
+        vse16.v  v5, (a5)
+        vadd.vi  v7, v7, 1
+        vsra.vi  v6, v6, 7
+        vsra.vi  v7, v7, 7
+        vse16.v  v6, (a6)
+        vsra.vi  v0, v0, 7
+        vse16.v  v7, (a7)
+        vsra.vi  v1, v1, 7
+        vse16.v  v0, (a0)
+        vsra.vi  v2, v2, 7
+        vse16.v  v1, (a1)
+        vsra.vi  v3, v3, 7
+        vse16.v  v2, (a2)
+        vse16.v  v3, (a3)
+        ret
+endfunc
+
 .macro mspel_op op pos n1 n2
         add           t1, \pos, a2
         v\op\()e8.v   v\n1, (\pos)