@@ -37,7 +37,7 @@ av_cold void ff_blockdsp_init_riscv(BlockDSPContext *c)
#if HAVE_RVV
int flags = av_get_cpu_flags();
- if (flags & AV_CPU_FLAG_RVV_I64 && ff_get_rv_vlenb() >= 16) {
+ if (flags & AV_CPU_FLAG_RVV_I64 && ff_rv_vlen_least(128)) {
c->clear_block = ff_clear_block_rvv;
c->clear_blocks = ff_clear_blocks_rvv;
c->fill_block_tab[0] = ff_fill_block16_rvv;
@@ -34,7 +34,7 @@ av_cold void ff_g722dsp_init_riscv(G722DSPContext *dsp)
#if HAVE_RVV
int flags = av_get_cpu_flags();
- if ((flags & AV_CPU_FLAG_RVV_I32) && ff_get_rv_vlenb() >= 16)
+ if ((flags & AV_CPU_FLAG_RVV_I32) && ff_rv_vlen_least(128))
dsp->apply_qmf = ff_g722_apply_qmf_rvv;
#endif
}
@@ -38,7 +38,7 @@ av_cold void ff_h264chroma_init_riscv(H264ChromaContext *c, int bit_depth)
int flags = av_get_cpu_flags();
if (bit_depth == 8 && (flags & AV_CPU_FLAG_RVV_I32) &&
- (flags & AV_CPU_FLAG_RVB_ADDR) && ff_get_rv_vlenb() >= 16) {
+ (flags & AV_CPU_FLAG_RVB_ADDR) && ff_rv_vlen_least(128)) {
c->put_h264_chroma_pixels_tab[0] = h264_put_chroma_mc8_rvv;
c->avg_h264_chroma_pixels_tab[0] = h264_avg_chroma_mc8_rvv;
c->put_h264_chroma_pixels_tab[1] = h264_put_chroma_mc4_rvv;
@@ -39,7 +39,7 @@ av_cold void ff_idctdsp_init_riscv(IDCTDSPContext *c, AVCodecContext *avctx,
#if HAVE_RVV
int flags = av_get_cpu_flags();
- if ((flags & AV_CPU_FLAG_RVV_I64) && ff_get_rv_vlenb() >= 16) {
+ if ((flags & AV_CPU_FLAG_RVV_I64) && ff_rv_vlen_least(128)) {
c->put_pixels_clamped = ff_put_pixels_clamped_rvv;
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_rvv;
c->add_pixels_clamped = ff_add_pixels_clamped_rvv;
@@ -82,7 +82,7 @@ av_cold void ff_me_cmp_init_riscv(MECmpContext *c, AVCodecContext *avctx)
#if HAVE_RVV
int flags = av_get_cpu_flags();
- if (flags & AV_CPU_FLAG_RVV_I32 && ff_get_rv_vlenb() >= 16) {
+ if (flags & AV_CPU_FLAG_RVV_I32 && ff_rv_vlen_least(128)) {
c->pix_abs[0][0] = ff_pix_abs16_rvv;
c->sad[0] = ff_pix_abs16_rvv;
c->pix_abs[1][0] = ff_pix_abs8_rvv;
@@ -56,7 +56,7 @@ av_cold void ff_pixblockdsp_init_riscv(PixblockDSPContext *c,
}
#if HAVE_RVV
- if ((cpu_flags & AV_CPU_FLAG_RVV_I32) && ff_get_rv_vlenb() >= 16) {
+ if ((cpu_flags & AV_CPU_FLAG_RVV_I32) && ff_rv_vlen_least(128)) {
c->diff_pixels = ff_diff_pixels_unaligned_rvv;
c->diff_pixels_unaligned = ff_diff_pixels_unaligned_rvv;
}
@@ -33,7 +33,7 @@ av_cold void ff_rv34dsp_init_riscv(RV34DSPContext *c)
#if HAVE_RVV
int flags = av_get_cpu_flags();
- if (flags & AV_CPU_FLAG_RVV_I32 && ff_get_rv_vlenb() >= 16) {
+ if (flags & AV_CPU_FLAG_RVV_I32 && ff_rv_vlen_least(128)) {
c->rv34_inv_transform_dc = ff_rv34_inv_transform_dc_rvv;
c->rv34_idct_dc_add = ff_rv34_idct_dc_add_rvv;
}
@@ -40,7 +40,7 @@ av_cold void ff_rv40dsp_init_riscv(RV34DSPContext *c)
#if HAVE_RVV
int flags = av_get_cpu_flags();
- if ((flags & AV_CPU_FLAG_RVV_I32) && ff_get_rv_vlenb() >= 16 &&
+ if ((flags & AV_CPU_FLAG_RVV_I32) && ff_rv_vlen_least(128) &&
(flags & AV_CPU_FLAG_RVB_ADDR)) {
c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_rvv;
c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_rvv;
@@ -53,7 +53,7 @@ av_cold void ff_sbrdsp_init_riscv(SBRDSPContext *c)
c->sum_square = ff_sbr_sum_square_rvv;
c->hf_gen = ff_sbr_hf_gen_rvv;
c->hf_g_filt = ff_sbr_hf_g_filt_rvv;
- if (ff_get_rv_vlenb() <= 16) {
+ if (ff_rv_vlen_least(128)) {
c->hf_apply_noise[0] = ff_sbr_hf_apply_noise_0_rvv;
c->hf_apply_noise[2] = ff_sbr_hf_apply_noise_2_rvv;
if (flags & AV_CPU_FLAG_RVB_BASIC) {
@@ -35,7 +35,7 @@ av_cold void ff_vc1dsp_init_riscv(VC1DSPContext *dsp)
#if HAVE_RVV
int flags = av_get_cpu_flags();
- if (flags & AV_CPU_FLAG_RVV_I32 && ff_get_rv_vlenb() >= 16) {
+ if (flags & AV_CPU_FLAG_RVV_I32 && ff_rv_vlen_least(128)) {
dsp->vc1_inv_trans_4x8_dc = ff_vc1_inv_trans_4x8_dc_rvv;
dsp->vc1_inv_trans_4x4_dc = ff_vc1_inv_trans_4x4_dc_rvv;
if (flags & AV_CPU_FLAG_RVV_I64) {
@@ -56,7 +56,7 @@ av_cold void ff_vp78dsp_init_riscv(VP8DSPContext *c)
c->put_vp8_bilinear_pixels_tab[2][0][0] = ff_put_vp8_pixels4_rvi;
}
#if HAVE_RVV
- if (flags & AV_CPU_FLAG_RVV_I32 && ff_get_rv_vlenb() >= 16) {
+ if (flags & AV_CPU_FLAG_RVV_I32 && ff_rv_vlen_least(128)) {
c->put_vp8_bilinear_pixels_tab[0][0][1] = ff_put_vp8_bilin16_h_rvv;
c->put_vp8_bilinear_pixels_tab[0][0][2] = ff_put_vp8_bilin16_h_rvv;
c->put_vp8_bilinear_pixels_tab[1][0][1] = ff_put_vp8_bilin8_h_rvv;
@@ -107,7 +107,7 @@ av_cold void ff_vp8dsp_init_riscv(VP8DSPContext *c)
#if HAVE_RVV
int flags = av_get_cpu_flags();
- if (flags & AV_CPU_FLAG_RVV_I32 && ff_get_rv_vlenb() >= 16) {
+ if (flags & AV_CPU_FLAG_RVV_I32 && ff_rv_vlen_least(128)) {
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_rvv;
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_rvv;
if (flags & AV_CPU_FLAG_RVB_ADDR) {
@@ -29,7 +29,7 @@ static av_cold void vp9dsp_intrapred_init_rvv(VP9DSPContext *dsp, int bpp)
#if HAVE_RVV
int flags = av_get_cpu_flags();
- if (bpp == 8 && flags & AV_CPU_FLAG_RVV_I64 && ff_get_rv_vlenb() >= 16) {
+ if (bpp == 8 && flags & AV_CPU_FLAG_RVV_I64 && ff_rv_vlen_least(128)) {
dsp->intra_pred[TX_8X8][DC_PRED] = ff_dc_8x8_rvv;
dsp->intra_pred[TX_8X8][LEFT_DC_PRED] = ff_dc_left_8x8_rvv;
dsp->intra_pred[TX_8X8][DC_127_PRED] = ff_dc_127_8x8_rvv;
@@ -38,7 +38,7 @@ static av_cold void vp9dsp_intrapred_init_rvv(VP9DSPContext *dsp, int bpp)
dsp->intra_pred[TX_8X8][TOP_DC_PRED] = ff_dc_top_8x8_rvv;
}
- if (bpp == 8 && flags & AV_CPU_FLAG_RVV_I32 && ff_get_rv_vlenb() >= 16) {
+ if (bpp == 8 && flags & AV_CPU_FLAG_RVV_I32 && ff_rv_vlen_least(128)) {
dsp->intra_pred[TX_32X32][DC_PRED] = ff_dc_32x32_rvv;
dsp->intra_pred[TX_16X16][DC_PRED] = ff_dc_16x16_rvv;
dsp->intra_pred[TX_32X32][LEFT_DC_PRED] = ff_dc_left_32x32_rvv;