diff mbox series

[FFmpeg-devel,7/7] lavc/me_cmp: R-V V nsse

Message ID CAEa-L+t7ueXXH1+ByiWamOSYm3HdG9qBGh90EVH1ESExCWEG7w@mail.gmail.com
State New
Headers show
Series [FFmpeg-devel,1/7] lavc/me_cmp: R-V V pix_abs | expand

Checks

Context Check Description
andriy/configure_x86 warning Failed to apply patch
yinshiyou/configure_loongarch64 warning Failed to apply patch

Commit Message

flow gg Feb. 6, 2024, 3:56 p.m. UTC

Comments

Rémi Denis-Courmont Feb. 22, 2024, 3:20 p.m. UTC | #1
Le tiistaina 6. helmikuuta 2024, 17.56.59 EET flow gg a écrit :
> 

Use 'static' functions where possible.
flow gg Feb. 22, 2024, 3:41 p.m. UTC | #2
Okay, updated it in the reply

Rémi Denis-Courmont <remi@remlab.net> 于2024年2月22日周四 23:20写道:

> Le tiistaina 6. helmikuuta 2024, 17.56.59 EET flow gg a écrit :
> >
>
> Use 'static' functions where possible.
>
> --
> レミ・デニ-クールモン
> http://www.remlab.net/
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
diff mbox series

Patch

From 31635394e89318c554a9653bd22791336309951e Mon Sep 17 00:00:00 2001
From: sunyuechi <sunyuechi@iscas.ac.cn>
Date: Tue, 6 Feb 2024 22:51:47 +0800
Subject: [PATCH 7/7] lavc/me_cmp: R-V V nsse

C908:
nsse_0_c: 1990.0
nsse_0_rvv_i32: 572.0
nsse_1_c: 910.0
nsse_1_rvv_i32: 456.0
---
 libavcodec/riscv/me_cmp_init.c |  30 +++++++++
 libavcodec/riscv/me_cmp_rvv.S  | 118 +++++++++++++++++++++++++++++++++
 2 files changed, 148 insertions(+)

diff --git a/libavcodec/riscv/me_cmp_init.c b/libavcodec/riscv/me_cmp_init.c
index 24e78e3eeb..48c0d3d827 100644
--- a/libavcodec/riscv/me_cmp_init.c
+++ b/libavcodec/riscv/me_cmp_init.c
@@ -55,6 +55,15 @@  int ff_vsad8_rvv(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdif
 int ff_vsad_intra16_rvv(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h);
 int ff_vsad_intra8_rvv(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h);
 
+int ff_nsse16_rvv(int multiplier, const uint8_t *s1, const uint8_t *s2,
+                    ptrdiff_t stride, int h);
+int nsse16_rvv_wrapper(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
+                        ptrdiff_t stride, int h);
+int ff_nsse8_rvv(int multiplier, const uint8_t *s1, const uint8_t *s2,
+                    ptrdiff_t stride, int h);
+int nsse8_rvv_wrapper(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
+                        ptrdiff_t stride, int h);
+
 av_cold void ff_me_cmp_init_riscv(MECmpContext *c, AVCodecContext *avctx)
 {
 #if HAVE_RVV
@@ -82,6 +91,27 @@  av_cold void ff_me_cmp_init_riscv(MECmpContext *c, AVCodecContext *avctx)
         c->vsad[1] = ff_vsad8_rvv;
         c->vsad[4] = ff_vsad_intra16_rvv;
         c->vsad[5] = ff_vsad_intra8_rvv;
+
+        c->nsse[0] = nsse16_rvv_wrapper;
+        c->nsse[1] = nsse8_rvv_wrapper;
     }
 #endif
 }
+
+int nsse16_rvv_wrapper(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
+                        ptrdiff_t stride, int h)
+{
+    if (c)
+        return ff_nsse16_rvv(c->avctx->nsse_weight, s1, s2, stride, h);
+    else
+        return ff_nsse16_rvv(8, s1, s2, stride, h);
+}
+
+int nsse8_rvv_wrapper(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
+                        ptrdiff_t stride, int h)
+{
+    if (c)
+        return ff_nsse8_rvv(c->avctx->nsse_weight, s1, s2, stride, h);
+    else
+        return ff_nsse8_rvv(8, s1, s2, stride, h);
+}
diff --git a/libavcodec/riscv/me_cmp_rvv.S b/libavcodec/riscv/me_cmp_rvv.S
index f32ae6b259..c9ae5bb6fc 100644
--- a/libavcodec/riscv/me_cmp_rvv.S
+++ b/libavcodec/riscv/me_cmp_rvv.S
@@ -407,3 +407,121 @@  endfunc
 func ff_vsad_intra8_rvv, zve32x
         vsad_vsse_intra8 abs
 endfunc
+
+func ff_nsse16_rvv, zve32x
+        .macro squarediff16
+        vsetivli        zero, 16, e8, m1, tu, ma
+        vle8.v          v4, (a1)
+        vle8.v          v12, (a2)
+        vwsubu.vv       v16, v4, v12
+        vsetvli         zero, zero, e16, m2, tu, ma
+        vwmacc.vv       v24, v16, v16
+        .endm
+
+        .macro gradiff16 srcx srcv
+        vsetivli        zero, 16, e8, m1, tu, ma
+        vle8.v          v8, (\srcx)
+        vslide1down.vx  v0, \srcv, t5
+        vslide1down.vx  v16, v8, t5
+        vwsubu.vv       v20, \srcv, v0
+        vwsubu.wv       v0, v20, v8
+        vwaddu.wv       v20, v0, v16
+        vsetivli        zero, 15, e16, m2, tu, ma
+        vneg.v          v0, v20
+        vmax.vv         v0, v20, v0
+        .endm
+
+        csrwi           vxrm, 0
+        vsetivli        t0, 16, e32, m4, ta, ma
+        addi            a4, a4, -1
+        li              t5, 1
+        vmv.v.x         v24, zero
+        vmv.v.x         v28, zero
+1:
+        add             t1, a1, a3
+        add             t2, a2, a3
+        addi            a4, a4, -1
+        squarediff16
+        gradiff16       t1, v4
+        vwaddu.wv       v28, v28, v0
+        gradiff16       t2, v12
+        vwsubu.wv       v28, v28, v0
+        add             a1, a1, a3
+        add             a2, a2, a3
+        bnez            a4, 1b
+
+        squarediff16
+        vsetivli        zero, 16, e32, m4, tu, ma
+        vmv.s.x         v0, zero
+        vmv.s.x         v4, zero
+        vredsum.vs      v0, v24, v0
+        vredsum.vs      v4, v28, v4
+        vmv.x.s         t1, v0
+        vmv.x.s         t2, v4
+        srai            t3, t2, 31
+        xor             t2, t3, t2
+        sub             t2, t2, t3
+        mul             t2, t2, a0
+        add             a0, t2, t1
+
+        ret
+endfunc
+
+func ff_nsse8_rvv, zve32x
+        .macro squarediff8
+        vsetivli        zero, 8, e8, mf2, tu, ma
+        vle8.v          v4, (a1)
+        vle8.v          v12, (a2)
+        vwsubu.vv       v16, v4, v12
+        vsetvli         zero, zero, e16, m1, tu, ma
+        vwmacc.vv       v24, v16, v16
+        .endm
+
+        .macro gradiff8 srcx srcv
+        vsetivli        zero, 8, e8, mf2, tu, ma
+        vle8.v          v8, (\srcx)
+        vslide1down.vx  v0, \srcv, t5
+        vslide1down.vx  v16, v8, t5
+        vwsubu.vv       v20, \srcv, v0
+        vwsubu.wv       v0, v20, v8
+        vwaddu.wv       v20, v0, v16
+        vsetivli        zero, 7, e16, m1, tu, ma
+        vneg.v          v0, v20
+        vmax.vv         v0, v20, v0
+        .endm
+
+        csrwi           vxrm, 0
+        vsetivli        t0, 8, e32, m2, ta, ma
+        addi            a4, a4, -1
+        li              t5, 1
+        vmv.v.x         v24, zero
+        vmv.v.x         v28, zero
+1:
+        add             t1, a1, a3
+        add             t2, a2, a3
+        addi            a4, a4, -1
+        squarediff8
+        gradiff8        t1, v4
+        vwaddu.wv       v28, v28, v0
+        gradiff8        t2, v12
+        vwsubu.wv       v28, v28, v0
+        add             a1, a1, a3
+        add             a2, a2, a3
+        bnez            a4, 1b
+
+        squarediff8
+        vsetivli        zero, 8, e32, m2, tu, ma
+        vmv.s.x         v0, zero
+        vmv.s.x         v4, zero
+        vredsum.vs      v0, v24, v0
+        vredsum.vs      v4, v28, v4
+        vmv.x.s         t1, v0
+        vmv.x.s         t2, v4
+        srai            t3, t2, 31
+        xor             t2, t3, t2
+        sub             t2, t2, t3
+        mul             t2, t2, a0
+        add             a0, t2, t1
+
+        ret
+endfunc
-- 
2.43.0