@@ -75,18 +75,19 @@ endfunc
func ff_vector_fmul_window_rvv, zve32f
// a0: dst, a1: src0, a2: src1, a3: window, a4: length
- vsetvli t0, zero, e16, m2, ta, ma
+ // e16/m2 and e32/m4 are possible but slower due to gather.
+ vsetvli t0, zero, e16, m1, ta, ma
sh2add a2, a4, a2
vid.v v0
sh3add t3, a4, a3
vadd.vi v0, v0, 1
sh3add t0, a4, a0
1:
- vsetvli t2, a4, e16, m2, ta, ma
+ vsetvli t2, a4, e16, m1, ta, ma
slli t4, t2, 2
vrsub.vx v2, v0, t2
sub t3, t3, t4
- vsetvli zero, zero, e32, m4, ta, ma
+ vsetvli zero, zero, e32, m2, ta, ma
sub a2, a2, t4
vle32.v v8, (t3)
sub t0, t0, t4
@@ -133,6 +134,7 @@ endfunc
// TODO factor vrsub, separate last iteration?
// (a0) = (a1) * reverse(a2) [0..a3-1]
func ff_vector_fmul_reverse_rvv, zve32f
+ // e16/m4 and e32/m8 are possible but slower due to gather.
vsetvli t0, zero, e16, m4, ta, ma
sh2add a2, a3, a2
vid.v v0