diff mbox series

[FFmpeg-devel,2/6] lavc/pixblockdsp: aligned R-V V 8-bit functions

Message ID 20231027192540.27373-2-remi@remlab.net
State Accepted
Commit 300ee8b02d8361c88fe86c0950008ee6152b5616
Headers show
Series [FFmpeg-devel,1/6] lavc/pixblockdsp: rename unaligned R-V V functions | expand

Checks

Context Check Description
yinshiyou/make_loongarch64 success Make finished
yinshiyou/make_fate_loongarch64 success Make fate finished
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Rémi Denis-Courmont Oct. 27, 2023, 7:25 p.m. UTC
If the scan lines are aligned, we can load each row as a 64-bit value,
thus avoiding segmentation. And then we can factor the conversion or
subtraction.

In principle, the same optimisation should be possible for high depth,
but would require 128-bit elements, for which no FFmpeg CPU flag
exists.
---
 libavcodec/riscv/pixblockdsp_init.c | 11 +++++++++++
 libavcodec/riscv/pixblockdsp_rvv.S  | 21 +++++++++++++++++++++
 2 files changed, 32 insertions(+)
diff mbox series

Patch

diff --git a/libavcodec/riscv/pixblockdsp_init.c b/libavcodec/riscv/pixblockdsp_init.c
index 8f24281217..7d259a032f 100644
--- a/libavcodec/riscv/pixblockdsp_init.c
+++ b/libavcodec/riscv/pixblockdsp_init.c
@@ -32,10 +32,14 @@  void ff_get_pixels_8_rvi(int16_t *block, const uint8_t *pixels,
 void ff_get_pixels_16_rvi(int16_t *block, const uint8_t *pixels,
                           ptrdiff_t stride);
 
+void ff_get_pixels_8_rvv(int16_t *block, const uint8_t *pixels,
+                         ptrdiff_t stride);
 void ff_get_pixels_unaligned_8_rvv(int16_t *block, const uint8_t *pixels,
                                    ptrdiff_t stride);
 void ff_get_pixels_unaligned_16_rvv(int16_t *block, const uint8_t *pixels,
                                     ptrdiff_t stride);
+void ff_diff_pixels_rvv(int16_t *block, const uint8_t *s1,
+                        const uint8_t *s2, ptrdiff_t stride);
 void ff_diff_pixels_unaligned_rvv(int16_t *block, const uint8_t *s1,
                                   const uint8_t *s2, ptrdiff_t stride);
 
@@ -64,6 +68,13 @@  av_cold void ff_pixblockdsp_init_riscv(PixblockDSPContext *c,
 
         c->diff_pixels = ff_diff_pixels_unaligned_rvv;
         c->diff_pixels_unaligned = ff_diff_pixels_unaligned_rvv;
+
+        if (cpu_flags & AV_CPU_FLAG_RVV_I64) {
+            if (!high_bit_depth)
+                c->get_pixels = ff_get_pixels_8_rvv;
+
+            c->diff_pixels = ff_diff_pixels_rvv;
+        }
     }
 #endif
 }
diff --git a/libavcodec/riscv/pixblockdsp_rvv.S b/libavcodec/riscv/pixblockdsp_rvv.S
index e3a2fcc6ef..80c7415acf 100644
--- a/libavcodec/riscv/pixblockdsp_rvv.S
+++ b/libavcodec/riscv/pixblockdsp_rvv.S
@@ -20,6 +20,16 @@ 
 
 #include "libavutil/riscv/asm.S"
 
+func ff_get_pixels_8_rvv, zve64x
+        vsetivli zero, 8, e8, mf2, ta, ma
+        li      t0, 8 * 8
+        vlse64.v v16, (a1), a2
+        vsetvli zero, t0, e8, m4, ta, ma
+        vwcvtu.x.x.v v8, v16
+        vse16.v v8, (a0)
+        ret
+endfunc
+
 func ff_get_pixels_unaligned_8_rvv, zve32x
         vsetivli     zero, 8, e8, mf2, ta, ma
         vlsseg8e8.v  v16, (a1), a2
@@ -42,6 +52,17 @@  func ff_get_pixels_unaligned_16_rvv, zve32x
         ret
 endfunc
 
+func ff_diff_pixels_rvv, zve64x
+        vsetivli zero, 8, e8, mf2, ta, ma
+        li      t0, 8 * 8
+        vlse64.v v16, (a1), a3
+        vlse64.v v24, (a2), a3
+        vsetvli zero, t0, e8, m4, ta, ma
+        vwsubu.vv v8, v16, v24
+        vse16.v v8, (a0)
+        ret
+endfunc
+
 func ff_diff_pixels_unaligned_rvv, zve32x
         vsetivli    zero, 8, e8, mf2, ta, ma
         vlsseg8e8.v v16, (a1), a3