diff mbox series

[FFmpeg-devel] Moves yuv2yuvX_sse3 to yasm, unrolls main loop and other small optimizations for ~20% speedup.

Message ID 20201027085507.3286201-1-alankelly@google.com
State Superseded
Headers show
Series [FFmpeg-devel] Moves yuv2yuvX_sse3 to yasm, unrolls main loop and other small optimizations for ~20% speedup. | expand

Checks

Context Check Description
andriy/x86_make success Make finished
andriy/x86_make_fate success Make fate finished
andriy/PPC64_make success Make finished
andriy/PPC64_make_fate success Make fate finished

Commit Message

Alan Kelly Oct. 27, 2020, 8:55 a.m. UTC
---
 libswscale/x86/Makefile     |   1 +
 libswscale/x86/swscale.c    |  75 ++++----------------------
 libswscale/x86/yuv2yuvX.asm | 105 ++++++++++++++++++++++++++++++++++++
 3 files changed, 116 insertions(+), 65 deletions(-)
 create mode 100644 libswscale/x86/yuv2yuvX.asm

Comments

Alan Kelly Oct. 27, 2020, 9:06 a.m. UTC | #1
Apologies for the multiple threads, my git send-email was wrongly
configured. This has been fixed.

This code has been tested on AVX2 giving a significant speedup, however,
until the ff_hscale* functions are ported to avx2, this should not be
enabled as it results in an overall slowdown of swscale probably due to cpu
frequency scaling.

checkasm will follow in a separate patch.

On Tue, Oct 27, 2020 at 9:56 AM Alan Kelly <alankelly@google.com> wrote:

> ---
>  libswscale/x86/Makefile     |   1 +
>  libswscale/x86/swscale.c    |  75 ++++----------------------
>  libswscale/x86/yuv2yuvX.asm | 105 ++++++++++++++++++++++++++++++++++++
>  3 files changed, 116 insertions(+), 65 deletions(-)
>  create mode 100644 libswscale/x86/yuv2yuvX.asm
>
> diff --git a/libswscale/x86/Makefile b/libswscale/x86/Makefile
> index 831d5359aa..bfe383364e 100644
> --- a/libswscale/x86/Makefile
> +++ b/libswscale/x86/Makefile
> @@ -13,3 +13,4 @@ X86ASM-OBJS                     += x86/input.o
>                 \
>                                     x86/scale.o                          \
>                                     x86/rgb_2_rgb.o                      \
>                                     x86/yuv_2_rgb.o                      \
> +                                   x86/yuv2yuvX.o                       \
> diff --git a/libswscale/x86/swscale.c b/libswscale/x86/swscale.c
> index 3160fedf04..fec9fa22e0 100644
> --- a/libswscale/x86/swscale.c
> +++ b/libswscale/x86/swscale.c
> @@ -197,80 +197,25 @@ void ff_updateMMXDitherTables(SwsContext *c, int
> dstY)
>  }
>
>  #if HAVE_MMXEXT
> +void ff_yuv2yuvX_sse3(const int16_t *filter, int filterSize,
> +                           uint8_t *dest, int dstW,
> +                           const uint8_t *dither, int offset);
> +
>  static void yuv2yuvX_sse3(const int16_t *filter, int filterSize,
>                             const int16_t **src, uint8_t *dest, int dstW,
>                             const uint8_t *dither, int offset)
>  {
> +    int remainder = (dstW % 32);
> +    int pixelsProcessed = dstW - remainder;
>      if(((uintptr_t)dest) & 15){
>          yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither,
> offset);
>          return;
>      }
> -    filterSize--;
> -#define MAIN_FUNCTION \
> -        "pxor       %%xmm0, %%xmm0 \n\t" \
> -        "punpcklbw  %%xmm0, %%xmm3 \n\t" \
> -        "movd           %4, %%xmm1 \n\t" \
> -        "punpcklwd  %%xmm1, %%xmm1 \n\t" \
> -        "punpckldq  %%xmm1, %%xmm1 \n\t" \
> -        "punpcklqdq %%xmm1, %%xmm1 \n\t" \
> -        "psllw          $3, %%xmm1 \n\t" \
> -        "paddw      %%xmm1, %%xmm3 \n\t" \
> -        "psraw          $4, %%xmm3 \n\t" \
> -        "movdqa     %%xmm3, %%xmm4 \n\t" \
> -        "movdqa     %%xmm3, %%xmm7 \n\t" \
> -        "movl           %3, %%ecx  \n\t" \
> -        "mov                                 %0, %%"FF_REG_d"
> \n\t"\
> -        "mov                        (%%"FF_REG_d"), %%"FF_REG_S"
>  \n\t"\
> -        ".p2align                             4             \n\t" /*
> FIXME Unroll? */\
> -        "1:                                                 \n\t"\
> -        "movddup                  8(%%"FF_REG_d"), %%xmm0   \n\t" /*
> filterCoeff */\
> -        "movdqa              (%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm2
> \n\t" /* srcData */\
> -        "movdqa            16(%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm5
> \n\t" /* srcData */\
> -        "add                                $16, %%"FF_REG_d"
> \n\t"\
> -        "mov                        (%%"FF_REG_d"), %%"FF_REG_S"
>  \n\t"\
> -        "test                         %%"FF_REG_S", %%"FF_REG_S"
>  \n\t"\
> -        "pmulhw                           %%xmm0, %%xmm2      \n\t"\
> -        "pmulhw                           %%xmm0, %%xmm5      \n\t"\
> -        "paddw                            %%xmm2, %%xmm3      \n\t"\
> -        "paddw                            %%xmm5, %%xmm4      \n\t"\
> -        " jnz                                1b             \n\t"\
> -        "psraw                               $3, %%xmm3      \n\t"\
> -        "psraw                               $3, %%xmm4      \n\t"\
> -        "packuswb                         %%xmm4, %%xmm3      \n\t"\
> -        "movntdq                          %%xmm3, (%1, %%"FF_REG_c")
> \n\t"\
> -        "add                         $16, %%"FF_REG_c"        \n\t"\
> -        "cmp                          %2, %%"FF_REG_c"        \n\t"\
> -        "movdqa                   %%xmm7, %%xmm3            \n\t" \
> -        "movdqa                   %%xmm7, %%xmm4            \n\t" \
> -        "mov                                 %0, %%"FF_REG_d"
> \n\t"\
> -        "mov                        (%%"FF_REG_d"), %%"FF_REG_S"
>  \n\t"\
> -        "jb                                  1b             \n\t"
> -
> -    if (offset) {
> -        __asm__ volatile(
> -            "movq          %5, %%xmm3  \n\t"
> -            "movdqa    %%xmm3, %%xmm4  \n\t"
> -            "psrlq        $24, %%xmm3  \n\t"
> -            "psllq        $40, %%xmm4  \n\t"
> -            "por       %%xmm4, %%xmm3  \n\t"
> -            MAIN_FUNCTION
> -              :: "g" (filter),
> -              "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m"
> (offset),
> -              "m"(filterSize), "m"(((uint64_t *) dither)[0])
> -              : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" ,
> "%xmm4" , "%xmm5" , "%xmm7" ,)
> -                "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c
> -              );
> -    } else {
> -        __asm__ volatile(
> -            "movq          %5, %%xmm3   \n\t"
> -            MAIN_FUNCTION
> -              :: "g" (filter),
> -              "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m"
> (offset),
> -              "m"(filterSize), "m"(((uint64_t *) dither)[0])
> -              : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" ,
> "%xmm4" , "%xmm5" , "%xmm7" ,)
> -                "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c
> -              );
> +    ff_yuv2yuvX_sse3(filter, filterSize - 1, dest - offset,
> pixelsProcessed + offset, dither, offset);
> +    if(remainder > 0){
> +      yuv2yuvX_mmxext(filter, filterSize, src, dest + pixelsProcessed,
> remainder, dither, offset + pixelsProcessed);
>      }
> +    return;
>  }
>  #endif
>
> diff --git a/libswscale/x86/yuv2yuvX.asm b/libswscale/x86/yuv2yuvX.asm
> new file mode 100644
> index 0000000000..84727de599
> --- /dev/null
> +++ b/libswscale/x86/yuv2yuvX.asm
> @@ -0,0 +1,105 @@
>
> +;******************************************************************************
> +;* x86-optimized yuv2yuvX
> +;* Copyright 2020 Google LLC
> +;*
> +;* This file is part of FFmpeg.
> +;*
> +;* FFmpeg is free software; you can redistribute it and/or
> +;* modify it under the terms of the GNU Lesser General Public
> +;* License as published by the Free Software Foundation; either
> +;* version 2.1 of the License, or (at your option) any later version.
> +;*
> +;* FFmpeg is distributed in the hope that it will be useful,
> +;* but WITHOUT ANY WARRANTY; without even the implied warranty of
> +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +;* Lesser General Public License for more details.
> +;*
> +;* You should have received a copy of the GNU Lesser General Public
> +;* License along with FFmpeg; if not, write to the Free Software
> +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
> 02110-1301 USA
>
> +;******************************************************************************
> +
> +%include "libavutil/x86/x86util.asm"
> +
> +SECTION .text
> +
>
> +;-----------------------------------------------------------------------------
> +; yuv2yuvX
> +;
> +; void ff_yuv2yuvX_<opt>(const int16_t *filter, int filterSize,
> +;                        uint8_t *dest, int dstW,
> +;                        const uint8_t *dither, int offset);
> +;
>
> +;-----------------------------------------------------------------------------
> +
> +%macro YUV2YUVX_FUNC 0
> +cglobal yuv2yuvX, 6, 7, 16, filter, rsi, dest, dstW, dither, offset, src
> +%if ARCH_X86_64
> +    movsxd               dstWq, dstWd
> +    movsxd               offsetq, offsetd
> +%endif ; x86-64
> +    movq                 xmm3, [ditherq]
> +    cmp                  offsetd, 0
> +    jz                   .offset
> +
> +    ; offset != 0 path.
> +    psrlq                m5, m3, $18
> +    psllq                m3, m3, $28
> +    por                  m3, m3, m5
> +
> +.offset:
> +%if cpuflag(avx2)
> +    vperm2i128           m3, m3, m3, 0
> +%endif ; avx2
> +%if ARCH_X86_64
> +    movq                 xmm1, rsiq
> +%else
> +    movd                 mm1, rsi
> +%endif
> +    vpbroadcastw         m1, xmm1
> +    pxor                 m0, m0, m0
> +    mov                  rsiq, filterq
> +    mov                  srcq, [rsiq]
> +    punpcklbw            m3, m0
> +    psllw                m1, m1, 3
> +    paddw                m3, m3, m1
> +    psraw                m7, m3, 4
> +.outerloop:
> +    mova                 m4, m7
> +    mova                 m3, m7
> +    mova                 m6, m7
> +    mova                 m1, m7
> +.loop:
> +    vpbroadcastq         m0, [rsiq + 8]
> +    pmulhw               m2, m0, [srcq + offsetq * 2]
> +    pmulhw               m5, m0, [srcq + offsetq * 2 + mmsize]
> +    paddw                m3, m3, m2
> +    paddw                m4, m4, m5
> +    pmulhw               m2, m0, [srcq + offsetq * 2 + 2 * mmsize]
> +    pmulhw               m5, m0, [srcq + offsetq * 2 + 3 * mmsize]
> +    paddw                m6, m6, m2
> +    paddw                m1, m1, m5
> +    add                  rsiq, $10
> +    mov                  srcq, [rsiq]
> +    test                 srcd, srcd
> +    jnz                  .loop
> +    psraw                m3, m3, 3
> +    psraw                m4, m4, 3
> +    psraw                m6, m6, 3
> +    psraw                m1, m1, 3
> +    packuswb             m3, m3, m4
> +    packuswb             m6, m6, m1
> +    mov                  srcq, [filterq]
> +    movntdq              [destq + offsetq], m3
> +    movntdq              [destq + offsetq + mmsize], m6
> +    add                  offsetq, mmsize * 2
> +    mov                  rsiq, filterq
> +    cmp                  offsetq, dstWq
> +    jb                  .outerloop
> +    REP_RET
> +%endmacro
> +
> +INIT_XMM sse3
> +YUV2YUVX_FUNC
> +INIT_YMM avx2
> +YUV2YUVX_FUNC
> --
> 2.29.0.rc2.309.g374f81d7ae-goog
>
>
Carl Eugen Hoyos Oct. 31, 2020, 12:01 p.m. UTC | #2
Am Di., 27. Okt. 2020 um 09:56 Uhr schrieb Alan Kelly
<alankelly-at-google.com@ffmpeg.org>:

> --- /dev/null
> +++ b/libswscale/x86/yuv2yuvX.asm
> @@ -0,0 +1,105 @@
> +;******************************************************************************
> +;* x86-optimized yuv2yuvX
> +;* Copyright 2020 Google LLC

Either the commit message ("move a function") or this
copyright statement is wrong, please fix this.

Please do not commit as-is...

Carl Eugen
Alan Kelly Nov. 6, 2020, 7:56 a.m. UTC | #3
The function was re-written in asm, this code is heavily derived from the
original code, the algorithm remains unchanged, the implementation is
optimized. Would you agree to adding the copyright from swscale.c:
* Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
to this file, having both copyrights?  Thank you.


On Sat, Oct 31, 2020 at 1:02 PM Carl Eugen Hoyos <ceffmpeg@gmail.com> wrote:

> Am Di., 27. Okt. 2020 um 09:56 Uhr schrieb Alan Kelly
> <alankelly-at-google.com@ffmpeg.org>:
>
> > --- /dev/null
> > +++ b/libswscale/x86/yuv2yuvX.asm
> > @@ -0,0 +1,105 @@
> >
> +;******************************************************************************
> > +;* x86-optimized yuv2yuvX
> > +;* Copyright 2020 Google LLC
>
> Either the commit message ("move a function") or this
> copyright statement is wrong, please fix this.
>
> Please do not commit as-is...
>
> Carl Eugen
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
Carl Eugen Hoyos Nov. 10, 2020, 9:49 p.m. UTC | #4
Am Fr., 6. Nov. 2020 um 09:04 Uhr schrieb Alan Kelly
<alankelly-at-google.com@ffmpeg.org>:
>
> The function was re-written in asm, this code is heavily derived from the
> original code, the algorithm remains unchanged, the implementation is
> optimized. Would you agree to adding the copyright from swscale.c:
> * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
> to this file, having both copyrights?  Thank you.

No real opinion here but your argumentation sounds solid.

Thank you, Carl Eugen
diff mbox series

Patch

diff --git a/libswscale/x86/Makefile b/libswscale/x86/Makefile
index 831d5359aa..bfe383364e 100644
--- a/libswscale/x86/Makefile
+++ b/libswscale/x86/Makefile
@@ -13,3 +13,4 @@  X86ASM-OBJS                     += x86/input.o                          \
                                    x86/scale.o                          \
                                    x86/rgb_2_rgb.o                      \
                                    x86/yuv_2_rgb.o                      \
+                                   x86/yuv2yuvX.o                       \
diff --git a/libswscale/x86/swscale.c b/libswscale/x86/swscale.c
index 3160fedf04..fec9fa22e0 100644
--- a/libswscale/x86/swscale.c
+++ b/libswscale/x86/swscale.c
@@ -197,80 +197,25 @@  void ff_updateMMXDitherTables(SwsContext *c, int dstY)
 }
 
 #if HAVE_MMXEXT
+void ff_yuv2yuvX_sse3(const int16_t *filter, int filterSize,
+                           uint8_t *dest, int dstW,
+                           const uint8_t *dither, int offset);
+
 static void yuv2yuvX_sse3(const int16_t *filter, int filterSize,
                            const int16_t **src, uint8_t *dest, int dstW,
                            const uint8_t *dither, int offset)
 {
+    int remainder = (dstW % 32);
+    int pixelsProcessed = dstW - remainder;
     if(((uintptr_t)dest) & 15){
         yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither, offset);
         return;
     }
-    filterSize--;
-#define MAIN_FUNCTION \
-        "pxor       %%xmm0, %%xmm0 \n\t" \
-        "punpcklbw  %%xmm0, %%xmm3 \n\t" \
-        "movd           %4, %%xmm1 \n\t" \
-        "punpcklwd  %%xmm1, %%xmm1 \n\t" \
-        "punpckldq  %%xmm1, %%xmm1 \n\t" \
-        "punpcklqdq %%xmm1, %%xmm1 \n\t" \
-        "psllw          $3, %%xmm1 \n\t" \
-        "paddw      %%xmm1, %%xmm3 \n\t" \
-        "psraw          $4, %%xmm3 \n\t" \
-        "movdqa     %%xmm3, %%xmm4 \n\t" \
-        "movdqa     %%xmm3, %%xmm7 \n\t" \
-        "movl           %3, %%ecx  \n\t" \
-        "mov                                 %0, %%"FF_REG_d"        \n\t"\
-        "mov                        (%%"FF_REG_d"), %%"FF_REG_S"     \n\t"\
-        ".p2align                             4             \n\t" /* FIXME Unroll? */\
-        "1:                                                 \n\t"\
-        "movddup                  8(%%"FF_REG_d"), %%xmm0   \n\t" /* filterCoeff */\
-        "movdqa              (%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm2 \n\t" /* srcData */\
-        "movdqa            16(%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm5 \n\t" /* srcData */\
-        "add                                $16, %%"FF_REG_d"        \n\t"\
-        "mov                        (%%"FF_REG_d"), %%"FF_REG_S"     \n\t"\
-        "test                         %%"FF_REG_S", %%"FF_REG_S"     \n\t"\
-        "pmulhw                           %%xmm0, %%xmm2      \n\t"\
-        "pmulhw                           %%xmm0, %%xmm5      \n\t"\
-        "paddw                            %%xmm2, %%xmm3      \n\t"\
-        "paddw                            %%xmm5, %%xmm4      \n\t"\
-        " jnz                                1b             \n\t"\
-        "psraw                               $3, %%xmm3      \n\t"\
-        "psraw                               $3, %%xmm4      \n\t"\
-        "packuswb                         %%xmm4, %%xmm3      \n\t"\
-        "movntdq                          %%xmm3, (%1, %%"FF_REG_c") \n\t"\
-        "add                         $16, %%"FF_REG_c"        \n\t"\
-        "cmp                          %2, %%"FF_REG_c"        \n\t"\
-        "movdqa                   %%xmm7, %%xmm3            \n\t" \
-        "movdqa                   %%xmm7, %%xmm4            \n\t" \
-        "mov                                 %0, %%"FF_REG_d"        \n\t"\
-        "mov                        (%%"FF_REG_d"), %%"FF_REG_S"     \n\t"\
-        "jb                                  1b             \n\t"
-
-    if (offset) {
-        __asm__ volatile(
-            "movq          %5, %%xmm3  \n\t"
-            "movdqa    %%xmm3, %%xmm4  \n\t"
-            "psrlq        $24, %%xmm3  \n\t"
-            "psllq        $40, %%xmm4  \n\t"
-            "por       %%xmm4, %%xmm3  \n\t"
-            MAIN_FUNCTION
-              :: "g" (filter),
-              "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset),
-              "m"(filterSize), "m"(((uint64_t *) dither)[0])
-              : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,)
-                "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c
-              );
-    } else {
-        __asm__ volatile(
-            "movq          %5, %%xmm3   \n\t"
-            MAIN_FUNCTION
-              :: "g" (filter),
-              "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset),
-              "m"(filterSize), "m"(((uint64_t *) dither)[0])
-              : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,)
-                "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c
-              );
+    ff_yuv2yuvX_sse3(filter, filterSize - 1, dest - offset, pixelsProcessed + offset, dither, offset);
+    if(remainder > 0){
+      yuv2yuvX_mmxext(filter, filterSize, src, dest + pixelsProcessed, remainder, dither, offset + pixelsProcessed);
     }
+    return;
 }
 #endif
 
diff --git a/libswscale/x86/yuv2yuvX.asm b/libswscale/x86/yuv2yuvX.asm
new file mode 100644
index 0000000000..84727de599
--- /dev/null
+++ b/libswscale/x86/yuv2yuvX.asm
@@ -0,0 +1,105 @@ 
+;******************************************************************************
+;* x86-optimized yuv2yuvX
+;* Copyright 2020 Google LLC
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+;-----------------------------------------------------------------------------
+; yuv2yuvX
+;
+; void ff_yuv2yuvX_<opt>(const int16_t *filter, int filterSize,
+;                        uint8_t *dest, int dstW,
+;                        const uint8_t *dither, int offset);
+;
+;-----------------------------------------------------------------------------
+
+%macro YUV2YUVX_FUNC 0
+cglobal yuv2yuvX, 6, 7, 16, filter, rsi, dest, dstW, dither, offset, src
+%if ARCH_X86_64
+    movsxd               dstWq, dstWd
+    movsxd               offsetq, offsetd
+%endif ; x86-64
+    movq                 xmm3, [ditherq]
+    cmp                  offsetd, 0
+    jz                   .offset
+
+    ; offset != 0 path.
+    psrlq                m5, m3, $18
+    psllq                m3, m3, $28
+    por                  m3, m3, m5
+
+.offset:
+%if cpuflag(avx2)
+    vperm2i128           m3, m3, m3, 0
+%endif ; avx2
+%if ARCH_X86_64
+    movq                 xmm1, rsiq
+%else
+    movd                 mm1, rsi
+%endif
+    vpbroadcastw         m1, xmm1
+    pxor                 m0, m0, m0
+    mov                  rsiq, filterq
+    mov                  srcq, [rsiq]
+    punpcklbw            m3, m0
+    psllw                m1, m1, 3
+    paddw                m3, m3, m1
+    psraw                m7, m3, 4
+.outerloop:
+    mova                 m4, m7
+    mova                 m3, m7
+    mova                 m6, m7
+    mova                 m1, m7
+.loop:
+    vpbroadcastq         m0, [rsiq + 8]
+    pmulhw               m2, m0, [srcq + offsetq * 2]
+    pmulhw               m5, m0, [srcq + offsetq * 2 + mmsize]
+    paddw                m3, m3, m2
+    paddw                m4, m4, m5
+    pmulhw               m2, m0, [srcq + offsetq * 2 + 2 * mmsize]
+    pmulhw               m5, m0, [srcq + offsetq * 2 + 3 * mmsize]
+    paddw                m6, m6, m2
+    paddw                m1, m1, m5
+    add                  rsiq, $10
+    mov                  srcq, [rsiq]
+    test                 srcd, srcd
+    jnz                  .loop
+    psraw                m3, m3, 3
+    psraw                m4, m4, 3
+    psraw                m6, m6, 3
+    psraw                m1, m1, 3
+    packuswb             m3, m3, m4
+    packuswb             m6, m6, m1
+    mov                  srcq, [filterq]
+    movntdq              [destq + offsetq], m3
+    movntdq              [destq + offsetq + mmsize], m6
+    add                  offsetq, mmsize * 2
+    mov                  rsiq, filterq
+    cmp                  offsetq, dstWq
+    jb                  .outerloop
+    REP_RET
+%endmacro
+
+INIT_XMM sse3
+YUV2YUVX_FUNC
+INIT_YMM avx2
+YUV2YUVX_FUNC