diff mbox series

[FFmpeg-devel] lavc/vp8dsp: R-V V put_bilin_h v unroll

Message ID tencent_C1565BD80F4F886AB785C7905C31D75C1906@qq.com
State New
Headers show
Series [FFmpeg-devel] lavc/vp8dsp: R-V V put_bilin_h v unroll | expand

Checks

Context Check Description
yinshiyou/make_loongarch64 success Make finished
yinshiyou/make_fate_loongarch64 success Make fate finished
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

uk7b@foxmail.com May 30, 2024, 3:26 p.m. UTC
From: sunyuechi <sunyuechi@iscas.ac.cn>

Since len < 64, the registers are sufficient, so it can be
directly unrolled (a4 is even).

Another benefit of unrolling is that it reduces one load operation
vertically compared to horizontally.

                                 old                 new
                             C908   X60          C908   X60
vp8_put_bilin4_h_c         :    6.2    5.5     :    6.2    5.5
vp8_put_bilin4_h_rvv_i32   :    2.2    2.0     :    1.5    1.5
vp8_put_bilin4_v_c         :    6.5    5.7     :    6.2    5.7
vp8_put_bilin4_v_rvv_i32   :    2.2    2.0     :    1.2    1.5
vp8_put_bilin8_h_c         :   24.2   21.5     :   24.2   21.5
vp8_put_bilin8_h_rvv_i32   :    5.2    4.7     :    3.5    3.5
vp8_put_bilin8_v_c         :   24.5   21.7     :   24.5   21.7
vp8_put_bilin8_v_rvv_i32   :    5.2    4.7     :    3.5    3.2
vp8_put_bilin16_h_c        :   48.0   42.7     :   48.0   42.7
vp8_put_bilin16_h_rvv_i32  :    5.7    5.0     :    5.2    4.5
vp8_put_bilin16_v_c        :   48.2   43.0     :   48.2   42.7
vp8_put_bilin16_v_rvv_i32  :    5.7    5.2     :    4.5    4.2
---
 libavcodec/riscv/vp8dsp_rvv.S | 34 +++++++++++++++++++++++++++++-----
 1 file changed, 29 insertions(+), 5 deletions(-)

Comments

flow gg May 30, 2024, 3:36 p.m. UTC | #1
I directly copied the VP9 modifications over... Since len <= 16, it seems
like it can be improved a bit more

<uk7b@foxmail.com> 于2024年5月30日周四 23:27写道:

> From: sunyuechi <sunyuechi@iscas.ac.cn>
>
> Since len < 64, the registers are sufficient, so it can be
> directly unrolled (a4 is even).
>
> Another benefit of unrolling is that it reduces one load operation
> vertically compared to horizontally.
>
>                                  old                 new
>                              C908   X60          C908   X60
> vp8_put_bilin4_h_c         :    6.2    5.5     :    6.2    5.5
> vp8_put_bilin4_h_rvv_i32   :    2.2    2.0     :    1.5    1.5
> vp8_put_bilin4_v_c         :    6.5    5.7     :    6.2    5.7
> vp8_put_bilin4_v_rvv_i32   :    2.2    2.0     :    1.2    1.5
> vp8_put_bilin8_h_c         :   24.2   21.5     :   24.2   21.5
> vp8_put_bilin8_h_rvv_i32   :    5.2    4.7     :    3.5    3.5
> vp8_put_bilin8_v_c         :   24.5   21.7     :   24.5   21.7
> vp8_put_bilin8_v_rvv_i32   :    5.2    4.7     :    3.5    3.2
> vp8_put_bilin16_h_c        :   48.0   42.7     :   48.0   42.7
> vp8_put_bilin16_h_rvv_i32  :    5.7    5.0     :    5.2    4.5
> vp8_put_bilin16_v_c        :   48.2   43.0     :   48.2   42.7
> vp8_put_bilin16_v_rvv_i32  :    5.7    5.2     :    4.5    4.2
> ---
>  libavcodec/riscv/vp8dsp_rvv.S | 34 +++++++++++++++++++++++++++++-----
>  1 file changed, 29 insertions(+), 5 deletions(-)
>
> diff --git a/libavcodec/riscv/vp8dsp_rvv.S b/libavcodec/riscv/vp8dsp_rvv.S
> index 3360a38cac..5bea6cba9c 100644
> --- a/libavcodec/riscv/vp8dsp_rvv.S
> +++ b/libavcodec/riscv/vp8dsp_rvv.S
> @@ -172,11 +172,35 @@ func ff_put_vp8_bilin4_\type\()_rvv, zve32x
>          li              t4, 4
>          sub             t1, t1, \mn
>  1:
> -        addi            a4, a4, -1
> -        bilin_load      v0, \type, \mn
> -        vse8.v          v0, (a0)
> -        add             a2, a2, a3
> -        add             a0, a0, a1
> +        add             t0, a2, a3
> +        add             t2, a0, a1
> +        addi            a4, a4, -2
> +.ifc \type,v
> +        add             t3, t0, a3
> +.else
> +        addi            t5, a2, 1
> +        addi            t3, t0, 1
> +        vle8.v          v2, (t5)
> +.endif
> +        vle8.v          v0, (a2)
> +        vle8.v          v4, (t0)
> +        vle8.v          v6, (t3)
> +        vwmulu.vx       v28, v0, t1
> +        vwmulu.vx       v26, v4, t1
> +.ifc \type,v
> +        vwmaccu.vx      v28, \mn, v4
> +.else
> +        vwmaccu.vx      v28, \mn, v2
> +.endif
> +        vwmaccu.vx      v26, \mn, v6
> +        vwaddu.wx       v24, v28, t4
> +        vwaddu.wx       v22, v26, t4
> +        vnsra.wi        v30, v24, 3
> +        vnsra.wi        v0, v22, 3
> +        vse8.v          v30, (a0)
> +        vse8.v          v0, (t2)
> +        add             a2, t0, a3
> +        add             a0, t2, a1
>          bnez            a4, 1b
>
>          ret
> --
> 2.45.1
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
flow gg May 30, 2024, 3:45 p.m. UTC | #2
Well.. because scalar registers are limited, the direct unrolling will be
like this for now. We can handle different lengths separately in the future

flow gg <hlefthleft@gmail.com> 于2024年5月30日周四 23:36写道:

> I directly copied the VP9 modifications over... Since len <= 16, it seems
> like it can be improved a bit more
>
> <uk7b@foxmail.com> 于2024年5月30日周四 23:27写道:
>
>> From: sunyuechi <sunyuechi@iscas.ac.cn>
>>
>> Since len < 64, the registers are sufficient, so it can be
>> directly unrolled (a4 is even).
>>
>> Another benefit of unrolling is that it reduces one load operation
>> vertically compared to horizontally.
>>
>>                                  old                 new
>>                              C908   X60          C908   X60
>> vp8_put_bilin4_h_c         :    6.2    5.5     :    6.2    5.5
>> vp8_put_bilin4_h_rvv_i32   :    2.2    2.0     :    1.5    1.5
>> vp8_put_bilin4_v_c         :    6.5    5.7     :    6.2    5.7
>> vp8_put_bilin4_v_rvv_i32   :    2.2    2.0     :    1.2    1.5
>> vp8_put_bilin8_h_c         :   24.2   21.5     :   24.2   21.5
>> vp8_put_bilin8_h_rvv_i32   :    5.2    4.7     :    3.5    3.5
>> vp8_put_bilin8_v_c         :   24.5   21.7     :   24.5   21.7
>> vp8_put_bilin8_v_rvv_i32   :    5.2    4.7     :    3.5    3.2
>> vp8_put_bilin16_h_c        :   48.0   42.7     :   48.0   42.7
>> vp8_put_bilin16_h_rvv_i32  :    5.7    5.0     :    5.2    4.5
>> vp8_put_bilin16_v_c        :   48.2   43.0     :   48.2   42.7
>> vp8_put_bilin16_v_rvv_i32  :    5.7    5.2     :    4.5    4.2
>> ---
>>  libavcodec/riscv/vp8dsp_rvv.S | 34 +++++++++++++++++++++++++++++-----
>>  1 file changed, 29 insertions(+), 5 deletions(-)
>>
>> diff --git a/libavcodec/riscv/vp8dsp_rvv.S b/libavcodec/riscv/vp8dsp_rvv.S
>> index 3360a38cac..5bea6cba9c 100644
>> --- a/libavcodec/riscv/vp8dsp_rvv.S
>> +++ b/libavcodec/riscv/vp8dsp_rvv.S
>> @@ -172,11 +172,35 @@ func ff_put_vp8_bilin4_\type\()_rvv, zve32x
>>          li              t4, 4
>>          sub             t1, t1, \mn
>>  1:
>> -        addi            a4, a4, -1
>> -        bilin_load      v0, \type, \mn
>> -        vse8.v          v0, (a0)
>> -        add             a2, a2, a3
>> -        add             a0, a0, a1
>> +        add             t0, a2, a3
>> +        add             t2, a0, a1
>> +        addi            a4, a4, -2
>> +.ifc \type,v
>> +        add             t3, t0, a3
>> +.else
>> +        addi            t5, a2, 1
>> +        addi            t3, t0, 1
>> +        vle8.v          v2, (t5)
>> +.endif
>> +        vle8.v          v0, (a2)
>> +        vle8.v          v4, (t0)
>> +        vle8.v          v6, (t3)
>> +        vwmulu.vx       v28, v0, t1
>> +        vwmulu.vx       v26, v4, t1
>> +.ifc \type,v
>> +        vwmaccu.vx      v28, \mn, v4
>> +.else
>> +        vwmaccu.vx      v28, \mn, v2
>> +.endif
>> +        vwmaccu.vx      v26, \mn, v6
>> +        vwaddu.wx       v24, v28, t4
>> +        vwaddu.wx       v22, v26, t4
>> +        vnsra.wi        v30, v24, 3
>> +        vnsra.wi        v0, v22, 3
>> +        vse8.v          v30, (a0)
>> +        vse8.v          v0, (t2)
>> +        add             a2, t0, a3
>> +        add             a0, t2, a1
>>          bnez            a4, 1b
>>
>>          ret
>> --
>> 2.45.1
>>
>> _______________________________________________
>> ffmpeg-devel mailing list
>> ffmpeg-devel@ffmpeg.org
>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>
>> To unsubscribe, visit link above, or email
>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>>
>
flow gg June 12, 2024, 9:48 a.m. UTC | #3
ping

<uk7b@foxmail.com> 于2024年5月30日周四 23:27写道:

> From: sunyuechi <sunyuechi@iscas.ac.cn>
>
> Since len < 64, the registers are sufficient, so it can be
> directly unrolled (a4 is even).
>
> Another benefit of unrolling is that it reduces one load operation
> vertically compared to horizontally.
>
>                                  old                 new
>                              C908   X60          C908   X60
> vp8_put_bilin4_h_c         :    6.2    5.5     :    6.2    5.5
> vp8_put_bilin4_h_rvv_i32   :    2.2    2.0     :    1.5    1.5
> vp8_put_bilin4_v_c         :    6.5    5.7     :    6.2    5.7
> vp8_put_bilin4_v_rvv_i32   :    2.2    2.0     :    1.2    1.5
> vp8_put_bilin8_h_c         :   24.2   21.5     :   24.2   21.5
> vp8_put_bilin8_h_rvv_i32   :    5.2    4.7     :    3.5    3.5
> vp8_put_bilin8_v_c         :   24.5   21.7     :   24.5   21.7
> vp8_put_bilin8_v_rvv_i32   :    5.2    4.7     :    3.5    3.2
> vp8_put_bilin16_h_c        :   48.0   42.7     :   48.0   42.7
> vp8_put_bilin16_h_rvv_i32  :    5.7    5.0     :    5.2    4.5
> vp8_put_bilin16_v_c        :   48.2   43.0     :   48.2   42.7
> vp8_put_bilin16_v_rvv_i32  :    5.7    5.2     :    4.5    4.2
> ---
>  libavcodec/riscv/vp8dsp_rvv.S | 34 +++++++++++++++++++++++++++++-----
>  1 file changed, 29 insertions(+), 5 deletions(-)
>
> diff --git a/libavcodec/riscv/vp8dsp_rvv.S b/libavcodec/riscv/vp8dsp_rvv.S
> index 3360a38cac..5bea6cba9c 100644
> --- a/libavcodec/riscv/vp8dsp_rvv.S
> +++ b/libavcodec/riscv/vp8dsp_rvv.S
> @@ -172,11 +172,35 @@ func ff_put_vp8_bilin4_\type\()_rvv, zve32x
>          li              t4, 4
>          sub             t1, t1, \mn
>  1:
> -        addi            a4, a4, -1
> -        bilin_load      v0, \type, \mn
> -        vse8.v          v0, (a0)
> -        add             a2, a2, a3
> -        add             a0, a0, a1
> +        add             t0, a2, a3
> +        add             t2, a0, a1
> +        addi            a4, a4, -2
> +.ifc \type,v
> +        add             t3, t0, a3
> +.else
> +        addi            t5, a2, 1
> +        addi            t3, t0, 1
> +        vle8.v          v2, (t5)
> +.endif
> +        vle8.v          v0, (a2)
> +        vle8.v          v4, (t0)
> +        vle8.v          v6, (t3)
> +        vwmulu.vx       v28, v0, t1
> +        vwmulu.vx       v26, v4, t1
> +.ifc \type,v
> +        vwmaccu.vx      v28, \mn, v4
> +.else
> +        vwmaccu.vx      v28, \mn, v2
> +.endif
> +        vwmaccu.vx      v26, \mn, v6
> +        vwaddu.wx       v24, v28, t4
> +        vwaddu.wx       v22, v26, t4
> +        vnsra.wi        v30, v24, 3
> +        vnsra.wi        v0, v22, 3
> +        vse8.v          v30, (a0)
> +        vse8.v          v0, (t2)
> +        add             a2, t0, a3
> +        add             a0, t2, a1
>          bnez            a4, 1b
>
>          ret
> --
> 2.45.1
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
Rémi Denis-Courmont June 12, 2024, 2:40 p.m. UTC | #4
Le torstaina 30. toukokuuta 2024, 18.26.53 EEST uk7b@foxmail.com a écrit :
> From: sunyuechi <sunyuechi@iscas.ac.cn>
> 
> Since len < 64, the registers are sufficient, so it can be
> directly unrolled (a4 is even).
> 
> Another benefit of unrolling is that it reduces one load operation
> vertically compared to horizontally.
> 
>                                  old                 new
>                              C908   X60          C908   X60
> vp8_put_bilin4_h_c         :    6.2    5.5     :    6.2    5.5
> vp8_put_bilin4_h_rvv_i32   :    2.2    2.0     :    1.5    1.5
> vp8_put_bilin4_v_c         :    6.5    5.7     :    6.2    5.7
> vp8_put_bilin4_v_rvv_i32   :    2.2    2.0     :    1.2    1.5
> vp8_put_bilin8_h_c         :   24.2   21.5     :   24.2   21.5
> vp8_put_bilin8_h_rvv_i32   :    5.2    4.7     :    3.5    3.5
> vp8_put_bilin8_v_c         :   24.5   21.7     :   24.5   21.7
> vp8_put_bilin8_v_rvv_i32   :    5.2    4.7     :    3.5    3.2
> vp8_put_bilin16_h_c        :   48.0   42.7     :   48.0   42.7
> vp8_put_bilin16_h_rvv_i32  :    5.7    5.0     :    5.2    4.5
> vp8_put_bilin16_v_c        :   48.2   43.0     :   48.2   42.7
> vp8_put_bilin16_v_rvv_i32  :    5.7    5.2     :    4.5    4.2
> ---
>  libavcodec/riscv/vp8dsp_rvv.S | 34 +++++++++++++++++++++++++++++-----
>  1 file changed, 29 insertions(+), 5 deletions(-)
> 
> diff --git a/libavcodec/riscv/vp8dsp_rvv.S b/libavcodec/riscv/vp8dsp_rvv.S
> index 3360a38cac..5bea6cba9c 100644
> --- a/libavcodec/riscv/vp8dsp_rvv.S
> +++ b/libavcodec/riscv/vp8dsp_rvv.S
> @@ -172,11 +172,35 @@ func ff_put_vp8_bilin4_\type\()_rvv, zve32x
>          li              t4, 4
>          sub             t1, t1, \mn
>  1:
> -        addi            a4, a4, -1
> -        bilin_load      v0, \type, \mn

Does this not render the type parameter of bilin_load useless (always h)?
(Not a blocker for this patch.)

> -        vse8.v          v0, (a0)
> -        add             a2, a2, a3
> -        add             a0, a0, a1
> +        add             t0, a2, a3
> +        add             t2, a0, a1
> +        addi            a4, a4, -2
> +.ifc \type,v
> +        add             t3, t0, a3
> +.else
> +        addi            t5, a2, 1
> +        addi            t3, t0, 1
> +        vle8.v          v2, (t5)

Not sure if I already asked this but is this really faster than slide1? 
Normally we want to minimise the work of the memory bus.

> +.endif
> +        vle8.v          v0, (a2)
> +        vle8.v          v4, (t0)
> +        vle8.v          v6, (t3)
> +        vwmulu.vx       v28, v0, t1
> +        vwmulu.vx       v26, v4, t1
> +.ifc \type,v
> +        vwmaccu.vx      v28, \mn, v4
> +.else
> +        vwmaccu.vx      v28, \mn, v2
> +.endif
> +        vwmaccu.vx      v26, \mn, v6
> +        vwaddu.wx       v24, v28, t4
> +        vwaddu.wx       v22, v26, t4
> +        vnsra.wi        v30, v24, 3
> +        vnsra.wi        v0, v22, 3
> +        vse8.v          v30, (a0)
> +        vse8.v          v0, (t2)
> +        add             a2, t0, a3
> +        add             a0, t2, a1
>          bnez            a4, 1b
> 
>          ret
flow gg June 12, 2024, 3:22 p.m. UTC | #5
> Does this not render the type parameter of bilin_load useless (always h)?
> (Not a blocker for this patch.)

Yes, this was needed in the initial version, but it is no longer required.
I just sent a patch.

> Not sure if I already asked this but is this really faster than slide1?
> Normally we want to minimise the work of the memory bus.

Originally it was slide, but based on your review, it was changed to load,
which should be better.

review: "Can't we skip the slide and just load the vector at a2+1? Also
then, we can keep VL=len and halve the multipler."

Rémi Denis-Courmont <remi@remlab.net> 于2024年6月12日周三 22:41写道:

> Le torstaina 30. toukokuuta 2024, 18.26.53 EEST uk7b@foxmail.com a écrit :
> > From: sunyuechi <sunyuechi@iscas.ac.cn>
> >
> > Since len < 64, the registers are sufficient, so it can be
> > directly unrolled (a4 is even).
> >
> > Another benefit of unrolling is that it reduces one load operation
> > vertically compared to horizontally.
> >
> >                                  old                 new
> >                              C908   X60          C908   X60
> > vp8_put_bilin4_h_c         :    6.2    5.5     :    6.2    5.5
> > vp8_put_bilin4_h_rvv_i32   :    2.2    2.0     :    1.5    1.5
> > vp8_put_bilin4_v_c         :    6.5    5.7     :    6.2    5.7
> > vp8_put_bilin4_v_rvv_i32   :    2.2    2.0     :    1.2    1.5
> > vp8_put_bilin8_h_c         :   24.2   21.5     :   24.2   21.5
> > vp8_put_bilin8_h_rvv_i32   :    5.2    4.7     :    3.5    3.5
> > vp8_put_bilin8_v_c         :   24.5   21.7     :   24.5   21.7
> > vp8_put_bilin8_v_rvv_i32   :    5.2    4.7     :    3.5    3.2
> > vp8_put_bilin16_h_c        :   48.0   42.7     :   48.0   42.7
> > vp8_put_bilin16_h_rvv_i32  :    5.7    5.0     :    5.2    4.5
> > vp8_put_bilin16_v_c        :   48.2   43.0     :   48.2   42.7
> > vp8_put_bilin16_v_rvv_i32  :    5.7    5.2     :    4.5    4.2
> > ---
> >  libavcodec/riscv/vp8dsp_rvv.S | 34 +++++++++++++++++++++++++++++-----
> >  1 file changed, 29 insertions(+), 5 deletions(-)
> >
> > diff --git a/libavcodec/riscv/vp8dsp_rvv.S
> b/libavcodec/riscv/vp8dsp_rvv.S
> > index 3360a38cac..5bea6cba9c 100644
> > --- a/libavcodec/riscv/vp8dsp_rvv.S
> > +++ b/libavcodec/riscv/vp8dsp_rvv.S
> > @@ -172,11 +172,35 @@ func ff_put_vp8_bilin4_\type\()_rvv, zve32x
> >          li              t4, 4
> >          sub             t1, t1, \mn
> >  1:
> > -        addi            a4, a4, -1
> > -        bilin_load      v0, \type, \mn
>
> Does this not render the type parameter of bilin_load useless (always h)?
> (Not a blocker for this patch.)
>
> > -        vse8.v          v0, (a0)
> > -        add             a2, a2, a3
> > -        add             a0, a0, a1
> > +        add             t0, a2, a3
> > +        add             t2, a0, a1
> > +        addi            a4, a4, -2
> > +.ifc \type,v
> > +        add             t3, t0, a3
> > +.else
> > +        addi            t5, a2, 1
> > +        addi            t3, t0, 1
> > +        vle8.v          v2, (t5)
>
> Not sure if I already asked this but is this really faster than slide1?
> Normally we want to minimise the work of the memory bus.
>
> > +.endif
> > +        vle8.v          v0, (a2)
> > +        vle8.v          v4, (t0)
> > +        vle8.v          v6, (t3)
> > +        vwmulu.vx       v28, v0, t1
> > +        vwmulu.vx       v26, v4, t1
> > +.ifc \type,v
> > +        vwmaccu.vx      v28, \mn, v4
> > +.else
> > +        vwmaccu.vx      v28, \mn, v2
> > +.endif
> > +        vwmaccu.vx      v26, \mn, v6
> > +        vwaddu.wx       v24, v28, t4
> > +        vwaddu.wx       v22, v26, t4
> > +        vnsra.wi        v30, v24, 3
> > +        vnsra.wi        v0, v22, 3
> > +        vse8.v          v30, (a0)
> > +        vse8.v          v0, (t2)
> > +        add             a2, t0, a3
> > +        add             a0, t2, a1
> >          bnez            a4, 1b
> >
> >          ret
>
> --
> 雷米‧德尼-库尔蒙
> http://www.remlab.net/
>
>
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
diff mbox series

Patch

diff --git a/libavcodec/riscv/vp8dsp_rvv.S b/libavcodec/riscv/vp8dsp_rvv.S
index 3360a38cac..5bea6cba9c 100644
--- a/libavcodec/riscv/vp8dsp_rvv.S
+++ b/libavcodec/riscv/vp8dsp_rvv.S
@@ -172,11 +172,35 @@  func ff_put_vp8_bilin4_\type\()_rvv, zve32x
         li              t4, 4
         sub             t1, t1, \mn
 1:
-        addi            a4, a4, -1
-        bilin_load      v0, \type, \mn
-        vse8.v          v0, (a0)
-        add             a2, a2, a3
-        add             a0, a0, a1
+        add             t0, a2, a3
+        add             t2, a0, a1
+        addi            a4, a4, -2
+.ifc \type,v
+        add             t3, t0, a3
+.else
+        addi            t5, a2, 1
+        addi            t3, t0, 1
+        vle8.v          v2, (t5)
+.endif
+        vle8.v          v0, (a2)
+        vle8.v          v4, (t0)
+        vle8.v          v6, (t3)
+        vwmulu.vx       v28, v0, t1
+        vwmulu.vx       v26, v4, t1
+.ifc \type,v
+        vwmaccu.vx      v28, \mn, v4
+.else
+        vwmaccu.vx      v28, \mn, v2
+.endif
+        vwmaccu.vx      v26, \mn, v6
+        vwaddu.wx       v24, v28, t4
+        vwaddu.wx       v22, v26, t4
+        vnsra.wi        v30, v24, 3
+        vnsra.wi        v0, v22, 3
+        vse8.v          v30, (a0)
+        vse8.v          v0, (t2)
+        add             a2, t0, a3
+        add             a0, t2, a1
         bnez            a4, 1b
 
         ret