diff mbox series

[FFmpeg-devel,5/6] lavc/idctdsp: improve R-V V add_pixels_clamped

Message ID 20231027192540.27373-5-remi@remlab.net
State Accepted
Commit d48810f3a58b4bf611ea4a75275334d30337e786
Headers show
Series [FFmpeg-devel,1/6] lavc/pixblockdsp: rename unaligned R-V V functions | expand

Checks

Context Check Description
yinshiyou/make_loongarch64 success Make finished
yinshiyou/make_fate_loongarch64 success Make fate finished
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Rémi Denis-Courmont Oct. 27, 2023, 7:25 p.m. UTC
---
 libavcodec/riscv/idctdsp_rvv.S | 28 ++++++++++++++--------------
 1 file changed, 14 insertions(+), 14 deletions(-)
diff mbox series

Patch

diff --git a/libavcodec/riscv/idctdsp_rvv.S b/libavcodec/riscv/idctdsp_rvv.S
index 4ff72f48d2..fafdddb174 100644
--- a/libavcodec/riscv/idctdsp_rvv.S
+++ b/libavcodec/riscv/idctdsp_rvv.S
@@ -23,7 +23,6 @@ 
 func ff_put_pixels_clamped_rvv, zve32x
         vsetivli    zero, 8, e16, m1, ta, ma
         vlseg8e16.v v24, (a0)
-1:
         /* RVV only has signed-signed and unsigned-unsigned clipping.
          * We need two steps for signed-to-unsigned clipping. */
         vsetvli     t0, zero, e16, m8, ta, ma
@@ -54,17 +53,18 @@  func ff_put_signed_pixels_clamped_rvv, zve64x
         ret
 endfunc
 
-func ff_add_pixels_clamped_rvv, zve32x
-        vsetivli    zero, 8, e8, mf2, ta, ma
-        vlseg8e16.v v24, (a0)
-        vlsseg8e8.v v16, (a1), a2
-        vwaddu.wv   v24, v24, v16
-        vwaddu.wv   v25, v25, v17
-        vwaddu.wv   v26, v26, v18
-        vwaddu.wv   v27, v27, v19
-        vwaddu.wv   v28, v28, v20
-        vwaddu.wv   v29, v29, v21
-        vwaddu.wv   v30, v30, v22
-        vwaddu.wv   v31, v31, v23
-        j           1b
+func ff_add_pixels_clamped_rvv, zve64x
+        vsetivli zero, 8, e8, mf2, ta, ma
+        li      t0, 8 * 8
+        vlse64.v v16, (a1), a2
+        vsetvli zero, t0, e8, m4, ta, ma
+        vle16.v v24, (a0)
+        vwaddu.wv v24, v24, v16
+        vsetvli zero, zero, e16, m8, ta, ma
+        vmax.vx v24, v24, zero
+        vsetvli zero, zero, e8, m4, ta, ma
+        vnclipu.wi v16, v24, 0
+        vsetivli zero, 8, e8, mf2, ta, ma
+        vsse64.v v16, (a1), a2
+        ret
 endfunc