@@ -28,10 +28,14 @@ void ff_flac_decorrelate_ls_16_rvv(uint8_t **out, int32_t **in,
int channels, int len, int shift);
void ff_flac_decorrelate_rs_16_rvv(uint8_t **out, int32_t **in,
int channels, int len, int shift);
+void ff_flac_decorrelate_ms_16_rvv(uint8_t **out, int32_t **in,
+ int channels, int len, int shift);
void ff_flac_decorrelate_ls_32_rvv(uint8_t **out, int32_t **in,
int channels, int len, int shift);
void ff_flac_decorrelate_rs_32_rvv(uint8_t **out, int32_t **in,
int channels, int len, int shift);
+void ff_flac_decorrelate_ms_32_rvv(uint8_t **out, int32_t **in,
+ int channels, int len, int shift);
av_cold void ff_flacdsp_init_riscv(FLACDSPContext *c, enum AVSampleFormat fmt,
int channels)
@@ -44,10 +48,12 @@ av_cold void ff_flacdsp_init_riscv(FLACDSPContext *c, enum AVSampleFormat fmt,
case AV_SAMPLE_FMT_S16:
c->decorrelate[1] = ff_flac_decorrelate_ls_16_rvv;
c->decorrelate[2] = ff_flac_decorrelate_rs_16_rvv;
+ c->decorrelate[3] = ff_flac_decorrelate_ms_16_rvv;
break;
case AV_SAMPLE_FMT_S32:
c->decorrelate[1] = ff_flac_decorrelate_ls_32_rvv;
c->decorrelate[2] = ff_flac_decorrelate_rs_32_rvv;
+ c->decorrelate[3] = ff_flac_decorrelate_ms_32_rvv;
break;
}
}
@@ -69,6 +69,32 @@ func ff_flac_decorrelate_rs_16_rvv, zve32x
ret
endfunc
+func ff_flac_decorrelate_ms_16_rvv, zve32x
+ ld a0, (a0)
+ ld a2, 8(a1)
+ ld a1, (a1)
+1:
+ vsetvli t0, a3, e32, m8, ta, ma
+ vle32.v v8, (a2)
+ sub a3, a3, t0
+ vle32.v v0, (a1)
+ sh2add a1, t0, a1
+ vsra.vi v16, v8, 1
+ sh2add a2, t0, a2
+ vsub.vv v24, v0, v16
+ vadd.vv v16, v24, v8
+ vsll.vx v8, v24, a4
+ vsll.vx v0, v16, a4
+ vsetvli zero, zero, e16, m4, ta, ma
+ vncvt.x.x.w v0, v0
+ vncvt.x.x.w v4, v8
+ vsseg2e16.v v0, (a0)
+ sh2add a0, t0, a0
+ bnez a3, 1b
+
+ ret
+endfunc
+
func ff_flac_decorrelate_ls_32_rvv, zve32x
ld a0, (a0)
ld a2, 8(a1)
@@ -110,4 +136,27 @@ func ff_flac_decorrelate_rs_32_rvv, zve32x
ret
endfunc
+
+func ff_flac_decorrelate_ms_32_rvv, zve32x
+ ld a0, (a0)
+ ld a2, 8(a1)
+ ld a1, (a1)
+1:
+ vsetvli t0, a3, e32, m4, ta, ma
+ vle32.v v4, (a2)
+ sub a3, a3, t0
+ vle32.v v0, (a1)
+ sh2add a1, t0, a1
+ vsra.vi v8, v4, 1
+ sh2add a2, t0, a2
+ vsub.vv v12, v0, v8
+ vadd.vv v8, v12, v4
+ vsll.vx v4, v12, a4
+ vsll.vx v0, v8, a4
+ vsseg2e32.v v0, (a0)
+ sh3add a0, t0, a0
+ bnez a3, 1b
+
+ ret
+endfunc
#endif