diff mbox series

[FFmpeg-devel,3/3] lavc/sbrdsp: R-V V neg_odd_64

Message ID 20231029202559.95350-3-remi@remlab.net
State Accepted
Commit d06fd18f8f4c6a81ef94cbb600620d83ad51269d
Headers show
Series [FFmpeg-devel,1/3] lavc/sbrdsp: R-V V sum64x5 | expand

Checks

Context Check Description
yinshiyou/make_loongarch64 success Make finished
yinshiyou/make_fate_loongarch64 success Make fate finished
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Rémi Denis-Courmont Oct. 29, 2023, 8:25 p.m. UTC
With 128-bit vectors, this is mostly pointless but also harmless.
Performance gains should be more noticeable with larger vector sizes.

neg_odd_64_c:       76.2
neg_odd_64_rvv_i64: 74.7
---
 libavcodec/riscv/sbrdsp_init.c |  5 +++++
 libavcodec/riscv/sbrdsp_rvv.S  | 17 +++++++++++++++++
 2 files changed, 22 insertions(+)
diff mbox series

Patch

diff --git a/libavcodec/riscv/sbrdsp_init.c b/libavcodec/riscv/sbrdsp_init.c
index e0e62278b0..1b85b2cae9 100644
--- a/libavcodec/riscv/sbrdsp_init.c
+++ b/libavcodec/riscv/sbrdsp_init.c
@@ -25,6 +25,7 @@ 
 
 void ff_sbr_sum64x5_rvv(float *z);
 float ff_sbr_sum_square_rvv(float (*x)[2], int n);
+void ff_sbr_neg_odd_64_rvv(float *x);
 
 av_cold void ff_sbrdsp_init_riscv(SBRDSPContext *c)
 {
@@ -35,5 +36,9 @@  av_cold void ff_sbrdsp_init_riscv(SBRDSPContext *c)
         c->sum64x5 = ff_sbr_sum64x5_rvv;
         c->sum_square = ff_sbr_sum_square_rvv;
     }
+#if __riscv_xlen >= 64
+    if ((flags & AV_CPU_FLAG_RVV_I64) && (flags & AV_CPU_FLAG_RVB_ADDR))
+        c->neg_odd_64 = ff_sbr_neg_odd_64_rvv;
+#endif
 #endif
 }
diff --git a/libavcodec/riscv/sbrdsp_rvv.S b/libavcodec/riscv/sbrdsp_rvv.S
index 4684630953..b510190b15 100644
--- a/libavcodec/riscv/sbrdsp_rvv.S
+++ b/libavcodec/riscv/sbrdsp_rvv.S
@@ -67,3 +67,20 @@  func ff_sbr_sum_square_rvv, zve32f
 NOHWF   fmv.x.w  a0, fa0
         ret
 endfunc
+
+#if __riscv_xlen >= 64
+func ff_sbr_neg_odd_64_rvv, zve64x
+        li      a1, 32
+        li      t1, 1 << 63
+1:
+        vsetvli t0, a1, e64, m8, ta, ma
+        vle64.v v8, (a0)
+        sub     a1, a1, t0
+        vxor.vx v8, v8, t1
+        vse64.v v8, (a0)
+        sh3add  a0, t0, a0
+        bnez    t0, 1b
+
+        ret
+endfunc
+#endif