Message ID | 20230715205705.34475-1-remi@remlab.net |
---|---|
State | New |
Headers | show |
Series | [FFmpeg-devel,1/2] lavc/aacpsdsp: rework RISC-V add_squares | expand |
Context | Check | Description |
---|---|---|
yinshiyou/make_loongarch64 | success | Make finished |
yinshiyou/make_fate_loongarch64 | success | Make fate finished |
andriy/make_x86 | success | Make finished |
andriy/make_fate_x86 | success | Make fate finished |
Le lauantaina 15. heinäkuuta 2023, 23.57.04 EEST Rémi Denis-Courmont a écrit : > Segmented loads are kinda slow, so this advantageously uses a > unit-strided load and narrowing shifts instead. > > Before: > ps_add_squares_rvv_f32: 30352.2 > > After: > ps_add_squares_rvv_f32: 11973.7 > --- > libavcodec/riscv/aacpsdsp_rvv.S | 11 +++++++---- > 1 file changed, 7 insertions(+), 4 deletions(-) > > diff --git a/libavcodec/riscv/aacpsdsp_rvv.S > b/libavcodec/riscv/aacpsdsp_rvv.S index 80bd19f6ad..b7ea314fc3 100644 > --- a/libavcodec/riscv/aacpsdsp_rvv.S > +++ b/libavcodec/riscv/aacpsdsp_rvv.S > @@ -1,5 +1,5 @@ > /* > - * Copyright © 2022 Rémi Denis-Courmont. > + * Copyright © 2022-2023 Rémi Denis-Courmont. > * > * This file is part of FFmpeg. > * > @@ -21,13 +21,16 @@ > #include "libavutil/riscv/asm.S" > > func ff_ps_add_squares_rvv, zve32f > + li t1, 32 > 1: > vsetvli t0, a2, e32, m1, ta, ma > - vlseg2e32.v v24, (a1) > - sub a2, a2, t0 > + vle64.v v8, (a1) Requires zve64x > + vnsrl.wx v24, v8, zero > vle32.v v16, (a0) > - sh3add a1, t0, a1 > + sub a2, a2, t0 > + vnsrl.wx v25, v8, t1 > vfmacc.vv v16, v24, v24 > + sh3add a1, t0, a1 > vfmacc.vv v16, v25, v25 > vse32.v v16, (a0) > sh2add a0, t0, a0
diff --git a/libavcodec/riscv/aacpsdsp_rvv.S b/libavcodec/riscv/aacpsdsp_rvv.S index 80bd19f6ad..b7ea314fc3 100644 --- a/libavcodec/riscv/aacpsdsp_rvv.S +++ b/libavcodec/riscv/aacpsdsp_rvv.S @@ -1,5 +1,5 @@ /* - * Copyright © 2022 Rémi Denis-Courmont. + * Copyright © 2022-2023 Rémi Denis-Courmont. * * This file is part of FFmpeg. * @@ -21,13 +21,16 @@ #include "libavutil/riscv/asm.S" func ff_ps_add_squares_rvv, zve32f + li t1, 32 1: vsetvli t0, a2, e32, m1, ta, ma - vlseg2e32.v v24, (a1) - sub a2, a2, t0 + vle64.v v8, (a1) + vnsrl.wx v24, v8, zero vle32.v v16, (a0) - sh3add a1, t0, a1 + sub a2, a2, t0 + vnsrl.wx v25, v8, t1 vfmacc.vv v16, v24, v24 + sh3add a1, t0, a1 vfmacc.vv v16, v25, v25 vse32.v v16, (a0) sh2add a0, t0, a0