@@ -134,7 +134,7 @@ func ff_vp7_idct_dc_add4y_rvv, zve32x
li t1, 23170
vlse16.v v8, (a1), t0 # block[0..3][0]
vwmul.vx v0, v8, t1
- li t2, 0x20000
+ li t2, 0x20000 - (128 << 18)
vsetvli zero, zero, e32, m1, ta, ma
vsra.vi v0, v0, 14
vmul.vx v0, v0, t1
@@ -125,31 +125,31 @@ endfunc
func ff_vp8_idct_dc_add4y_rvv, zve32x
li t0, 32
vsetivli zero, 4, e16, mf2, ta, ma
+ li t1, 4 - (128 << 3)
vlse16.v v8, (a1), t0
- vadd.vi v8, v8, 4
+ vadd.vx v8, v8, t1
vsra.vi v8, v8, 3
# fall through
endfunc
.variant_cc ff_vp78_idct_dc_add4y_rvv
-# v8 = [dc0, dc1, dc2, dc3]
+# v8 = [dc0 - 128, dc1 - 128, dc2 - 128, dc3 - 128]
func ff_vp78_idct_dc_add4y_rvv, zve32x
vsetivli zero, 16, e16, m2, ta, ma
vid.v v4
+ li a4, 4
vsrl.vi v4, v4, 2
+ li t1, 128
vrgather.vv v0, v8, v4 # replicate each DC four times
vsetvli zero, zero, e8, m1, ta, ma
- li a4, 4
1:
vle8.v v8, (a0)
addi a4, a4, -1
vwaddu.wv v16, v0, v8
sh zero, (a1)
- vsetvli zero, zero, e16, m2, ta, ma
- vmax.vx v16, v16, zero
+ vnclip.wi v8, v16, 0
addi a1, a1, 32
- vsetvli zero, zero, e8, m1, ta, ma
- vnclipu.wi v8, v16, 0
+ vxor.vx v8, v8, t1
vse8.v v8, (a0)
add a0, a0, a2
bnez a4, 1b