diff mbox series

[FFmpeg-devel] lavc/lpc: R-V V apply_welch_window

Message ID 20231208174652.21299-1-remi@remlab.net
State New
Headers show
Series [FFmpeg-devel] lavc/lpc: R-V V apply_welch_window | expand

Checks

Context Check Description
yinshiyou/make_loongarch64 success Make finished
yinshiyou/make_fate_loongarch64 success Make fate finished
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Rémi Denis-Courmont Dec. 8, 2023, 5:46 p.m. UTC
apply_welch_window_even_c:       617.5
apply_welch_window_even_rvv_f64: 235.0
apply_welch_window_odd_c:        709.0
apply_welch_window_odd_rvv_f64:  256.5
---
 libavcodec/lpc.c            |  4 +-
 libavcodec/lpc.h            |  1 +
 libavcodec/riscv/Makefile   |  2 +
 libavcodec/riscv/lpc_init.c | 37 ++++++++++++++++
 libavcodec/riscv/lpc_rvv.S  | 88 +++++++++++++++++++++++++++++++++++++
 5 files changed, 131 insertions(+), 1 deletion(-)
 create mode 100644 libavcodec/riscv/lpc_init.c
 create mode 100644 libavcodec/riscv/lpc_rvv.S

Comments

Anton Khirnov Dec. 11, 2023, 9:11 a.m. UTC | #1
Quoting Rémi Denis-Courmont (2023-12-08 18:46:51)
> +#if __riscv_xlen >= 64
> +func ff_lpc_apply_welch_window_rvv, zve64d
> +        vsetvli t0, zero, e64, m8, ta, ma
> +        vid.v   v0
> +        addi    t2, a1, -1
> +        vfcvt.f.xu.v v0, v0
> +        li      t3, 2
> +        fcvt.d.l ft2, t2
> +        srai    t1, a1, 1
> +        fcvt.d.l ft3, t3
> +        li      t4, 1
> +        fdiv.d  ft0, ft3, ft2    # ft0 = c = 2. / (len - 1)
> +        fcvt.d.l fa1, t4         # fa1 = 1.
> +        fsub.d  ft1, ft0, fa1
> +        vfrsub.vf v0, v0, ft1    # v0[i] = c - i - 1.
> +1:
> +        vsetvli t0, t1, e64, m8, ta, ma
> +        vfmul.vv v16, v0, v0  # no fused multipy-add as v0 is reused
> +        sub     t1, t1, t0
> +        vle32.v v8, (a0)
> +        fcvt.d.l ft2, t0
> +        vfrsub.vf v16, v16, fa1  # v16 = 1. - w * w
> +        sh2add  a0, t0, a0
> +        vsetvli zero, zero, e32, m4, ta, ma
> +        vfwcvt.f.x.v v24, v8
> +        vsetvli zero, zero, e64, m8, ta, ma
> +        vfsub.vf v0, v0, ft2     # v0 -= vl
> +        vfmul.vv v8, v24, v16
> +        vse64.v v8, (a2)
> +        sh3add  a2, t0, a2
> +        bnez    t1, 1b
> +
> +        andi    t1, a1, 1
> +        beqz    t1, 2f
> +
> +        sd      zero, (a2)
> +        addi    a0, a0, 4
> +        addi    a2, a2, 8
> +2:
> +        vsetvli t0, zero, e64, m8, ta, ma
> +        vid.v   v0
> +        srai    t1, a1, 1
> +        vfcvt.f.xu.v v0, v0
> +        fcvt.d.l ft1, t1
> +        fsub.d  ft1, ft0, ft1    # ft1 = c - (len / 2)
> +        vfadd.vf v0, v0, ft1     # v0[i] = c - (len / 2) + i
> +3:
> +        vsetvli t0, t1, e64, m8, ta, ma
> +        vfmul.vv v16, v0, v0
> +        sub     t1, t1, t0
> +        vle32.v v8, (a0)
> +        fcvt.d.l ft2, t0
> +        vfrsub.vf v16, v16, fa1  # v16 = 1. - w * w
> +        sh2add  a0, t0, a0
> +        vsetvli zero, zero, e32, m4, ta, ma
> +        vfwcvt.f.x.v v24, v8
> +        vsetvli zero, zero, e64, m8, ta, ma
> +        vfadd.vf v0, v0, ft2     # v0 += vl
> +        vfmul.vv v8, v24, v16
> +        vse64.v v8, (a2)
> +        sh3add  a2, t0, a2
> +        bnez    t1, 3b

I think it'd look a lot less like base64 < /dev/random if you vertically
aligned the first operands.
Rémi Denis-Courmont Dec. 11, 2023, 9:50 a.m. UTC | #2
Le 11 décembre 2023 11:11:28 GMT+02:00, Anton Khirnov <anton@khirnov.net> a écrit :
>Quoting Rémi Denis-Courmont (2023-12-08 18:46:51)
>> +#if __riscv_xlen >= 64
>> +func ff_lpc_apply_welch_window_rvv, zve64d
>> +        vsetvli t0, zero, e64, m8, ta, ma
>> +        vid.v   v0
>> +        addi    t2, a1, -1
>> +        vfcvt.f.xu.v v0, v0
>> +        li      t3, 2
>> +        fcvt.d.l ft2, t2
>> +        srai    t1, a1, 1
>> +        fcvt.d.l ft3, t3
>> +        li      t4, 1
>> +        fdiv.d  ft0, ft3, ft2    # ft0 = c = 2. / (len - 1)
>> +        fcvt.d.l fa1, t4         # fa1 = 1.
>> +        fsub.d  ft1, ft0, fa1
>> +        vfrsub.vf v0, v0, ft1    # v0[i] = c - i - 1.
>> +1:
>> +        vsetvli t0, t1, e64, m8, ta, ma
>> +        vfmul.vv v16, v0, v0  # no fused multipy-add as v0 is reused
>> +        sub     t1, t1, t0
>> +        vle32.v v8, (a0)
>> +        fcvt.d.l ft2, t0
>> +        vfrsub.vf v16, v16, fa1  # v16 = 1. - w * w
>> +        sh2add  a0, t0, a0
>> +        vsetvli zero, zero, e32, m4, ta, ma
>> +        vfwcvt.f.x.v v24, v8
>> +        vsetvli zero, zero, e64, m8, ta, ma
>> +        vfsub.vf v0, v0, ft2     # v0 -= vl
>> +        vfmul.vv v8, v24, v16
>> +        vse64.v v8, (a2)
>> +        sh3add  a2, t0, a2
>> +        bnez    t1, 1b
>> +
>> +        andi    t1, a1, 1
>> +        beqz    t1, 2f
>> +
>> +        sd      zero, (a2)
>> +        addi    a0, a0, 4
>> +        addi    a2, a2, 8
>> +2:
>> +        vsetvli t0, zero, e64, m8, ta, ma
>> +        vid.v   v0
>> +        srai    t1, a1, 1
>> +        vfcvt.f.xu.v v0, v0
>> +        fcvt.d.l ft1, t1
>> +        fsub.d  ft1, ft0, ft1    # ft1 = c - (len / 2)
>> +        vfadd.vf v0, v0, ft1     # v0[i] = c - (len / 2) + i
>> +3:
>> +        vsetvli t0, t1, e64, m8, ta, ma
>> +        vfmul.vv v16, v0, v0
>> +        sub     t1, t1, t0
>> +        vle32.v v8, (a0)
>> +        fcvt.d.l ft2, t0
>> +        vfrsub.vf v16, v16, fa1  # v16 = 1. - w * w
>> +        sh2add  a0, t0, a0
>> +        vsetvli zero, zero, e32, m4, ta, ma
>> +        vfwcvt.f.x.v v24, v8
>> +        vsetvli zero, zero, e64, m8, ta, ma
>> +        vfadd.vf v0, v0, ft2     # v0 += vl
>> +        vfmul.vv v8, v24, v16
>> +        vse64.v v8, (a2)
>> +        sh3add  a2, t0, a2
>> +        bnez    t1, 3b
>
>I think it'd look a lot less like base64 < /dev/random if you vertically
>aligned the first operands.

They are aligned to the 17th column. Problem is that quite a few vector mnemonics are larger than 7 characters.

>
>-- 
>Anton Khirnov
>_______________________________________________
>ffmpeg-devel mailing list
>ffmpeg-devel@ffmpeg.org
>https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
>To unsubscribe, visit link above, or email
>ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
Anton Khirnov Dec. 11, 2023, 9:57 a.m. UTC | #3
Quoting Rémi Denis-Courmont (2023-12-11 10:50:53)
> Le 11 décembre 2023 11:11:28 GMT+02:00, Anton Khirnov <anton@khirnov.net> a écrit :
> >I think it'd look a lot less like base64 < /dev/random if you vertically
> >aligned the first operands.
> 
> They are aligned to the 17th column. Problem is that quite a few vector mnemonics are larger than 7 characters.

Align to 25 or 33 then?
Rémi Denis-Courmont Dec. 11, 2023, 10:03 a.m. UTC | #4
Le 11 décembre 2023 11:57:50 GMT+02:00, Anton Khirnov <anton@khirnov.net> a écrit :
>Quoting Rémi Denis-Courmont (2023-12-11 10:50:53)
>> Le 11 décembre 2023 11:11:28 GMT+02:00, Anton Khirnov <anton@khirnov.net> a écrit :
>> >I think it'd look a lot less like base64 < /dev/random if you vertically
>> >aligned the first operands.
>> 
>> They are aligned to the 17th column. Problem is that quite a few vector mnemonics are larger than 7 characters.
>
>Align to 25 or 33 then?

IMO that's even worse. The operands end up too far off from most mnemonics that it hurts legibility more than it improves.

I initially aligned to the longest mnemonics but that turned out badly whenever revectoring (for obvious reasons).

>
>-- 
>Anton Khirnov
>_______________________________________________
>ffmpeg-devel mailing list
>ffmpeg-devel@ffmpeg.org
>https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
>To unsubscribe, visit link above, or email
>ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
diff mbox series

Patch

diff --git a/libavcodec/lpc.c b/libavcodec/lpc.c
index dc6a3060ce..9e2fd0f128 100644
--- a/libavcodec/lpc.c
+++ b/libavcodec/lpc.c
@@ -320,7 +320,9 @@  av_cold int ff_lpc_init(LPCContext *s, int blocksize, int max_order,
     s->lpc_apply_welch_window = lpc_apply_welch_window_c;
     s->lpc_compute_autocorr   = lpc_compute_autocorr_c;
 
-#if ARCH_X86
+#if ARCH_RISCV
+    ff_lpc_init_riscv(s);
+#elif ARCH_X86
     ff_lpc_init_x86(s);
 #endif
 
diff --git a/libavcodec/lpc.h b/libavcodec/lpc.h
index 467d0b2830..0200baea5c 100644
--- a/libavcodec/lpc.h
+++ b/libavcodec/lpc.h
@@ -109,6 +109,7 @@  double ff_lpc_calc_ref_coefs_f(LPCContext *s, const float *samples, int len,
  */
 int ff_lpc_init(LPCContext *s, int blocksize, int max_order,
                 enum FFLPCType lpc_type);
+void ff_lpc_init_riscv(LPCContext *s);
 void ff_lpc_init_x86(LPCContext *s);
 
 /**
diff --git a/libavcodec/riscv/Makefile b/libavcodec/riscv/Makefile
index e9825c0856..1d4572fbc5 100644
--- a/libavcodec/riscv/Makefile
+++ b/libavcodec/riscv/Makefile
@@ -33,6 +33,8 @@  OBJS-$(CONFIG_LLVIDDSP) += riscv/llviddsp_init.o
 RVV-OBJS-$(CONFIG_LLVIDDSP) += riscv/llviddsp_rvv.o
 OBJS-$(CONFIG_LLVIDENCDSP) += riscv/llvidencdsp_init.o
 RVV-OBJS-$(CONFIG_LLVIDENCDSP) += riscv/llvidencdsp_rvv.o
+OBJS-$(CONFIG_LPC) += riscv/lpc_init.o
+RVV-OBJS-$(CONFIG_LPC) += riscv/lpc_rvv.o
 OBJS-$(CONFIG_OPUS_DECODER) += riscv/opusdsp_init.o
 RVV-OBJS-$(CONFIG_OPUS_DECODER) += riscv/opusdsp_rvv.o
 OBJS-$(CONFIG_PIXBLOCKDSP) += riscv/pixblockdsp_init.o
diff --git a/libavcodec/riscv/lpc_init.c b/libavcodec/riscv/lpc_init.c
new file mode 100644
index 0000000000..c16e5745f0
--- /dev/null
+++ b/libavcodec/riscv/lpc_init.c
@@ -0,0 +1,37 @@ 
+/*
+ * Copyright © 2022 Rémi Denis-Courmont.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavcodec/lpc.h"
+
+void ff_lpc_apply_welch_window_rvv(const int32_t *, ptrdiff_t, double *);
+
+av_cold void ff_lpc_init_riscv(LPCContext *c)
+{
+#if HAVE_RVV && (__riscv_xlen >= 64)
+    int flags = av_get_cpu_flags();
+
+    if ((flags & AV_CPU_FLAG_RVV_F64) && (flags & AV_CPU_FLAG_RVB_ADDR))
+        c->lpc_apply_welch_window = ff_lpc_apply_welch_window_rvv;
+#endif
+}
diff --git a/libavcodec/riscv/lpc_rvv.S b/libavcodec/riscv/lpc_rvv.S
new file mode 100644
index 0000000000..2bc729d400
--- /dev/null
+++ b/libavcodec/riscv/lpc_rvv.S
@@ -0,0 +1,88 @@ 
+/*
+ * Copyright © 2023 Rémi Denis-Courmont.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/riscv/asm.S"
+
+#if __riscv_xlen >= 64
+func ff_lpc_apply_welch_window_rvv, zve64d
+        vsetvli t0, zero, e64, m8, ta, ma
+        vid.v   v0
+        addi    t2, a1, -1
+        vfcvt.f.xu.v v0, v0
+        li      t3, 2
+        fcvt.d.l ft2, t2
+        srai    t1, a1, 1
+        fcvt.d.l ft3, t3
+        li      t4, 1
+        fdiv.d  ft0, ft3, ft2    # ft0 = c = 2. / (len - 1)
+        fcvt.d.l fa1, t4         # fa1 = 1.
+        fsub.d  ft1, ft0, fa1
+        vfrsub.vf v0, v0, ft1    # v0[i] = c - i - 1.
+1:
+        vsetvli t0, t1, e64, m8, ta, ma
+        vfmul.vv v16, v0, v0  # no fused multipy-add as v0 is reused
+        sub     t1, t1, t0
+        vle32.v v8, (a0)
+        fcvt.d.l ft2, t0
+        vfrsub.vf v16, v16, fa1  # v16 = 1. - w * w
+        sh2add  a0, t0, a0
+        vsetvli zero, zero, e32, m4, ta, ma
+        vfwcvt.f.x.v v24, v8
+        vsetvli zero, zero, e64, m8, ta, ma
+        vfsub.vf v0, v0, ft2     # v0 -= vl
+        vfmul.vv v8, v24, v16
+        vse64.v v8, (a2)
+        sh3add  a2, t0, a2
+        bnez    t1, 1b
+
+        andi    t1, a1, 1
+        beqz    t1, 2f
+
+        sd      zero, (a2)
+        addi    a0, a0, 4
+        addi    a2, a2, 8
+2:
+        vsetvli t0, zero, e64, m8, ta, ma
+        vid.v   v0
+        srai    t1, a1, 1
+        vfcvt.f.xu.v v0, v0
+        fcvt.d.l ft1, t1
+        fsub.d  ft1, ft0, ft1    # ft1 = c - (len / 2)
+        vfadd.vf v0, v0, ft1     # v0[i] = c - (len / 2) + i
+3:
+        vsetvli t0, t1, e64, m8, ta, ma
+        vfmul.vv v16, v0, v0
+        sub     t1, t1, t0
+        vle32.v v8, (a0)
+        fcvt.d.l ft2, t0
+        vfrsub.vf v16, v16, fa1  # v16 = 1. - w * w
+        sh2add  a0, t0, a0
+        vsetvli zero, zero, e32, m4, ta, ma
+        vfwcvt.f.x.v v24, v8
+        vsetvli zero, zero, e64, m8, ta, ma
+        vfadd.vf v0, v0, ft2     # v0 += vl
+        vfmul.vv v8, v24, v16
+        vse64.v v8, (a2)
+        sh3add  a2, t0, a2
+        bnez    t1, 3b
+
+        ret
+endfunc
+#endif