Message ID | 20201217104216.1895963-1-alankelly@google.com |
---|---|
State | Superseded |
Headers | show |
Series | [FFmpeg-devel] Moves yuv2yuvX_sse3 to yasm, unrolls main loop and other small optimizations for ~20% speedup. | expand |
Context | Check | Description |
---|---|---|
andriy/x86_make | success | Make finished |
andriy/x86_make_fate | success | Make fate finished |
andriy/PPC64_make | success | Make finished |
andriy/PPC64_make_fate | success | Make fate finished |
Ping! On Thu, Dec 17, 2020 at 11:42 AM Alan Kelly <alankelly@google.com> wrote: > --- > Fixes memory alignment problem in checkasm-sw_scale > Tested on Linux 32 and 64 bit and mingw32 > libswscale/x86/Makefile | 1 + > libswscale/x86/swscale.c | 106 +++++++++----------------------- > libswscale/x86/yuv2yuvX.asm | 117 ++++++++++++++++++++++++++++++++++++ > tests/checkasm/sw_scale.c | 98 ++++++++++++++++++++++++++++++ > 4 files changed, 246 insertions(+), 76 deletions(-) > create mode 100644 libswscale/x86/yuv2yuvX.asm > > diff --git a/libswscale/x86/Makefile b/libswscale/x86/Makefile > index 831d5359aa..bfe383364e 100644 > --- a/libswscale/x86/Makefile > +++ b/libswscale/x86/Makefile > @@ -13,3 +13,4 @@ X86ASM-OBJS += x86/input.o > \ > x86/scale.o \ > x86/rgb_2_rgb.o \ > x86/yuv_2_rgb.o \ > + x86/yuv2yuvX.o \ > diff --git a/libswscale/x86/swscale.c b/libswscale/x86/swscale.c > index 3160fedf04..8cd8713705 100644 > --- a/libswscale/x86/swscale.c > +++ b/libswscale/x86/swscale.c > @@ -197,81 +197,30 @@ void ff_updateMMXDitherTables(SwsContext *c, int > dstY) > } > > #if HAVE_MMXEXT > -static void yuv2yuvX_sse3(const int16_t *filter, int filterSize, > - const int16_t **src, uint8_t *dest, int dstW, > - const uint8_t *dither, int offset) > -{ > - if(((uintptr_t)dest) & 15){ > - yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither, > offset); > - return; > - } > - filterSize--; > -#define MAIN_FUNCTION \ > - "pxor %%xmm0, %%xmm0 \n\t" \ > - "punpcklbw %%xmm0, %%xmm3 \n\t" \ > - "movd %4, %%xmm1 \n\t" \ > - "punpcklwd %%xmm1, %%xmm1 \n\t" \ > - "punpckldq %%xmm1, %%xmm1 \n\t" \ > - "punpcklqdq %%xmm1, %%xmm1 \n\t" \ > - "psllw $3, %%xmm1 \n\t" \ > - "paddw %%xmm1, %%xmm3 \n\t" \ > - "psraw $4, %%xmm3 \n\t" \ > - "movdqa %%xmm3, %%xmm4 \n\t" \ > - "movdqa %%xmm3, %%xmm7 \n\t" \ > - "movl %3, %%ecx \n\t" \ > - "mov %0, %%"FF_REG_d" > \n\t"\ > - "mov (%%"FF_REG_d"), %%"FF_REG_S" > \n\t"\ > - ".p2align 4 \n\t" /* > FIXME Unroll? */\ > - "1: \n\t"\ > - "movddup 8(%%"FF_REG_d"), %%xmm0 \n\t" /* > filterCoeff */\ > - "movdqa (%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm2 > \n\t" /* srcData */\ > - "movdqa 16(%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm5 > \n\t" /* srcData */\ > - "add $16, %%"FF_REG_d" > \n\t"\ > - "mov (%%"FF_REG_d"), %%"FF_REG_S" > \n\t"\ > - "test %%"FF_REG_S", %%"FF_REG_S" > \n\t"\ > - "pmulhw %%xmm0, %%xmm2 \n\t"\ > - "pmulhw %%xmm0, %%xmm5 \n\t"\ > - "paddw %%xmm2, %%xmm3 \n\t"\ > - "paddw %%xmm5, %%xmm4 \n\t"\ > - " jnz 1b \n\t"\ > - "psraw $3, %%xmm3 \n\t"\ > - "psraw $3, %%xmm4 \n\t"\ > - "packuswb %%xmm4, %%xmm3 \n\t"\ > - "movntdq %%xmm3, (%1, %%"FF_REG_c") > \n\t"\ > - "add $16, %%"FF_REG_c" \n\t"\ > - "cmp %2, %%"FF_REG_c" \n\t"\ > - "movdqa %%xmm7, %%xmm3 \n\t" \ > - "movdqa %%xmm7, %%xmm4 \n\t" \ > - "mov %0, %%"FF_REG_d" > \n\t"\ > - "mov (%%"FF_REG_d"), %%"FF_REG_S" > \n\t"\ > - "jb 1b \n\t" > - > - if (offset) { > - __asm__ volatile( > - "movq %5, %%xmm3 \n\t" > - "movdqa %%xmm3, %%xmm4 \n\t" > - "psrlq $24, %%xmm3 \n\t" > - "psllq $40, %%xmm4 \n\t" > - "por %%xmm4, %%xmm3 \n\t" > - MAIN_FUNCTION > - :: "g" (filter), > - "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" > (offset), > - "m"(filterSize), "m"(((uint64_t *) dither)[0]) > - : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , > "%xmm4" , "%xmm5" , "%xmm7" ,) > - "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c > - ); > - } else { > - __asm__ volatile( > - "movq %5, %%xmm3 \n\t" > - MAIN_FUNCTION > - :: "g" (filter), > - "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" > (offset), > - "m"(filterSize), "m"(((uint64_t *) dither)[0]) > - : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , > "%xmm4" , "%xmm5" , "%xmm7" ,) > - "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c > - ); > - } > +#define YUV2YUVX_FUNC(opt, step) \ > +void ff_yuv2yuvX_ ##opt(const int16_t *filter, long filterSize, const > int16_t **src, \ > + uint8_t *dest, int dstW, \ > + const uint8_t *dither, int offset); \ > +static void yuv2yuvX_ ##opt(const int16_t *filter, int filterSize, \ > + const int16_t **src, uint8_t *dest, int dstW, \ > + const uint8_t *dither, int offset) \ > +{ \ > + int remainder = (dstW % step); \ > + int pixelsProcessed = dstW - remainder; \ > + if(((uintptr_t)dest) & 15){ \ > + yuv2yuvX_mmx(filter, filterSize, src, dest, dstW, dither, > offset); \ > + return; \ > + } \ > + ff_yuv2yuvX_ ##opt(filter, filterSize - 1, src, dest - offset, > pixelsProcessed + offset, dither, offset); \ > + if(remainder > 0){ \ > + yuv2yuvX_mmx(filter, filterSize, src, dest + pixelsProcessed, > remainder, dither, offset + pixelsProcessed); \ > + } \ > + return; \ > } > + > +YUV2YUVX_FUNC(sse3, 32) > +YUV2YUVX_FUNC(avx2, 64) > + > #endif > > #endif /* HAVE_INLINE_ASM */ > @@ -402,9 +351,14 @@ av_cold void ff_sws_init_swscale_x86(SwsContext *c) > #if HAVE_MMXEXT_INLINE > if (INLINE_MMXEXT(cpu_flags)) > sws_init_swscale_mmxext(c); > - if (cpu_flags & AV_CPU_FLAG_SSE3){ > - if(c->use_mmx_vfilter && !(c->flags & SWS_ACCURATE_RND)) > + if (cpu_flags & AV_CPU_FLAG_AVX2){ > + if(c->use_mmx_vfilter && !(c->flags & SWS_ACCURATE_RND)){ > + c->yuv2planeX = yuv2yuvX_avx2; > + } > + } else if (cpu_flags & AV_CPU_FLAG_SSE3){ > + if(c->use_mmx_vfilter && !(c->flags & SWS_ACCURATE_RND)){ > c->yuv2planeX = yuv2yuvX_sse3; > + } > } > #endif > > diff --git a/libswscale/x86/yuv2yuvX.asm b/libswscale/x86/yuv2yuvX.asm > new file mode 100644 > index 0000000000..899b84c50b > --- /dev/null > +++ b/libswscale/x86/yuv2yuvX.asm > @@ -0,0 +1,117 @@ > > +;****************************************************************************** > +;* x86-optimized yuv2yuvX > +;* Copyright 2020 Google LLC > +;* Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at> > +;* > +;* This file is part of FFmpeg. > +;* > +;* FFmpeg is free software; you can redistribute it and/or > +;* modify it under the terms of the GNU Lesser General Public > +;* License as published by the Free Software Foundation; either > +;* version 2.1 of the License, or (at your option) any later version. > +;* > +;* FFmpeg is distributed in the hope that it will be useful, > +;* but WITHOUT ANY WARRANTY; without even the implied warranty of > +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > +;* Lesser General Public License for more details. > +;* > +;* You should have received a copy of the GNU Lesser General Public > +;* License along with FFmpeg; if not, write to the Free Software > +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA > 02110-1301 USA > > +;****************************************************************************** > + > +%include "libavutil/x86/x86util.asm" > + > +SECTION .text > + > > +;----------------------------------------------------------------------------- > +; yuv2yuvX > +; > +; void ff_yuv2yuvX_<opt>(const int16_t *filter, int filterSize, > +; uint8_t *dest, int dstW, > +; const uint8_t *dither, int offset); > +; > > +;----------------------------------------------------------------------------- > + > +%macro YUV2YUVX_FUNC 0 > +cglobal yuv2yuvX, 7, 7, 8, filter, filterSize, src, dest, dstW, dither, > offset > +%if ARCH_X86_64 > + movsxd dstWq, dstWd > + movsxd offsetq, offsetd > +%endif ; x86-64 > + movddup m0, [filterq + 8] > +%if cpuflag(avx2) > + vpbroadcastq m3, [ditherq] > +%else > + movq xmm3, [ditherq] > +%endif ; avx2 > + cmp offsetd, 0 > + jz .offset > + > + ; offset != 0 path. > + psrlq m5, m3, $18 > + psllq m3, m3, $28 > + por m3, m3, m5 > + > +.offset: > + movd xmm1, filterSized > +%if cpuflag(avx2) > + vpbroadcastw m1, xmm1 > +%else > + pshuflw m1, m1, q0000 > + punpcklqdq m1, m1 > +%endif ; avx2 > + pxor m0, m0, m0 > + mov filterSizeq, filterq > + mov srcq, [filterSizeq] > + punpcklbw m3, m0 > + psllw m1, m1, 3 > + paddw m3, m3, m1 > + psraw m7, m3, 4 > +.outerloop: > + mova m4, m7 > + mova m3, m7 > + mova m6, m7 > + mova m1, m7 > +.loop: > +%if cpuflag(avx2) > + vpbroadcastq m0, [filterSizeq + 8] > +%else > + movddup m0, [filterSizeq + 8] > +%endif > + pmulhw m2, m0, [srcq + offsetq * 2] > + pmulhw m5, m0, [srcq + offsetq * 2 + mmsize] > + paddw m3, m3, m2 > + paddw m4, m4, m5 > + pmulhw m2, m0, [srcq + offsetq * 2 + 2 * mmsize] > + pmulhw m5, m0, [srcq + offsetq * 2 + 3 * mmsize] > + paddw m6, m6, m2 > + paddw m1, m1, m5 > + add filterSizeq, $10 > + mov srcq, [filterSizeq] > + test srcq, srcq > + jnz .loop > + psraw m3, m3, 3 > + psraw m4, m4, 3 > + psraw m6, m6, 3 > + psraw m1, m1, 3 > + packuswb m3, m3, m4 > + packuswb m6, m6, m1 > + mov srcq, [filterq] > +%if cpuflag(avx2) > + vpermq m3, m3, 216 > + vpermq m6, m6, 216 > +%endif > + mova [destq + offsetq], m3 > + mova [destq + offsetq + mmsize], m6 > + add offsetq, mmsize * 2 > + mov filterSizeq, filterq > + cmp offsetq, dstWq > + jb .outerloop > + REP_RET > +%endmacro > + > +INIT_XMM sse3 > +YUV2YUVX_FUNC > +INIT_YMM avx2 > +YUV2YUVX_FUNC > diff --git a/tests/checkasm/sw_scale.c b/tests/checkasm/sw_scale.c > index 9efa2b4def..3bc6066f35 100644 > --- a/tests/checkasm/sw_scale.c > +++ b/tests/checkasm/sw_scale.c > @@ -37,6 +37,102 @@ > > #define SRC_PIXELS 128 > > + > +// This reference function is the same approximate algorithm employed by > the > +// SIMD functions > +static void ref_function(const int16_t *filter, int filterSize, > + const int16_t **src, > uint8_t *dest, int dstW, > + const uint8_t *dither, > int offset) > +{ > + int i, d; > + d = ((filterSize - 1) * 8 + dither[0]) >> 4; > + for (i=0; i<dstW; i++) { > + int16_t val = d; > + int j; > + union { > + int val; > + int16_t v[2]; > + } t; > + for (j=0; j<filterSize; j++){ > + t.val = (int)src[j][i + offset] * (int)filter[j]; > + val += t.v[1]; > + } > + dest[i]= av_clip_uint8(val>>3); > + } > +} > + > +static void check_yuv2yuvX(void) > +{ > + struct SwsContext *ctx; > + int fsi, osi, i, j; > +#define LARGEST_FILTER 16 > +#define FILTER_SIZES 4 > + static const int filter_sizes[FILTER_SIZES] = {1, 4, 8, 16}; > + > + declare_func_emms(AV_CPU_FLAG_MMX, void, const int16_t *filter, > + int filterSize, const int16_t **src, uint8_t *dest, > + int dstW, const uint8_t *dither, int offset); > + > + int dstW = SRC_PIXELS; > + const int16_t **src; > + LOCAL_ALIGNED_32(int16_t, src_pixels, [LARGEST_FILTER * SRC_PIXELS]); > + LOCAL_ALIGNED_32(int16_t, filter_coeff, [LARGEST_FILTER]); > + LOCAL_ALIGNED_32(uint8_t, dst0, [SRC_PIXELS]); > + LOCAL_ALIGNED_32(uint8_t, dst1, [SRC_PIXELS]); > + LOCAL_ALIGNED_32(uint8_t, dither, [SRC_PIXELS]); > + union VFilterData{ > + const int16_t *src; > + uint16_t coeff[8]; > + } *vFilterData; > + uint8_t d_val = rnd(); > + randomize_buffers(filter_coeff, LARGEST_FILTER); > + randomize_buffers(src_pixels, LARGEST_FILTER * SRC_PIXELS); > + ctx = sws_alloc_context(); > + if (sws_init_context(ctx, NULL, NULL) < 0) > + fail(); > + > + ff_getSwsFunc(ctx); > + for(i = 0; i < SRC_PIXELS; ++i){ > + dither[i] = d_val; > + } > + for(osi = 0; osi < 64; osi += 16){ > + for(fsi = 0; fsi < FILTER_SIZES; ++fsi){ > + src = av_malloc(sizeof(int16_t*) * filter_sizes[fsi]); > + vFilterData = av_malloc((filter_sizes[fsi] + 2) * > sizeof(union VFilterData)); > + memset(vFilterData, 0, (filter_sizes[fsi] + 2) * sizeof(union > VFilterData)); > + for(i = 0; i < filter_sizes[fsi]; ++i){ > + src[i] = &src_pixels[i * SRC_PIXELS]; > + vFilterData[i].src = src[i]; > + for(j = 0; j < 4; ++j){ > + vFilterData[i].coeff[j + 4] = filter_coeff[i]; > + } > + } > + if (check_func(ctx->yuv2planeX, "yuv2yuvX_%d_%d", > filter_sizes[fsi], osi)){ > + memset(dst0, 0, SRC_PIXELS * sizeof(dst0[0])); > + memset(dst1, 0, SRC_PIXELS * sizeof(dst1[0])); > + > + // The reference function is not the scalar function > selected when mmx > + // is deactivated as the SIMD functions do not give the > same result as > + // the scalar ones due to rounding. The SIMD functions > are activated by > + // the flag SWS_ACCURATE_RND > + ref_function(&filter_coeff[0], filter_sizes[fsi], src, > dst0, dstW - osi, dither, osi); > + // There's no point in calling new for the reference > function > + if(ctx->use_mmx_vfilter){ > + call_new((const int16_t*)vFilterData, > filter_sizes[fsi], src, dst1, dstW - osi, dither, osi); > + if (memcmp(dst0, dst1, SRC_PIXELS * sizeof(dst0[0]))){ > + fail(); > + } > + bench_new((const int16_t*)vFilterData, > filter_sizes[fsi], src, dst1, dstW - osi, dither, osi); > + } > + } > + free(src); > + free(vFilterData); > + } > + } > + sws_freeContext(ctx); > +#undef FILTER_SIZES > +} > + > static void check_hscale(void) > { > #define MAX_FILTER_WIDTH 40 > @@ -131,4 +227,6 @@ void checkasm_check_sw_scale(void) > { > check_hscale(); > report("hscale"); > + check_yuv2yuvX(); > + report("yuv2yuvX"); > } > -- > 2.29.2.684.gfbc64c5ab5-goog > >
On Tue, Jan 05, 2021 at 01:31:25PM +0100, Alan Kelly wrote:
> Ping!
crashes (due to alignment i think)
(gdb) disassemble $rip-32,$rip+32
Dump of assembler code from 0x5555555730a1 to 0x5555555730e1:
0x00005555555730a1 <ff_yuv2yuvX_avx2+161>: int $0x71
0x00005555555730a3 <ff_yuv2yuvX_avx2+163>: out %al,$0x3
0x00005555555730a5 <ff_yuv2yuvX_avx2+165>: vpsraw $0x3,%ymm1,%ymm1
0x00005555555730aa <ff_yuv2yuvX_avx2+170>: vpackuswb %ymm4,%ymm3,%ymm3
0x00005555555730ae <ff_yuv2yuvX_avx2+174>: vpackuswb %ymm1,%ymm6,%ymm6
0x00005555555730b2 <ff_yuv2yuvX_avx2+178>: mov (%rdi),%rdx
0x00005555555730b5 <ff_yuv2yuvX_avx2+181>: vpermq $0xd8,%ymm3,%ymm3
0x00005555555730bb <ff_yuv2yuvX_avx2+187>: vpermq $0xd8,%ymm6,%ymm6
=> 0x00005555555730c1 <ff_yuv2yuvX_avx2+193>: vmovdqa %ymm3,(%rcx,%rax,1)
0x00005555555730c6 <ff_yuv2yuvX_avx2+198>: vmovdqa %ymm6,0x20(%rcx,%rax,1)
0x00005555555730cc <ff_yuv2yuvX_avx2+204>: add $0x40,%rax
0x00005555555730d0 <ff_yuv2yuvX_avx2+208>: mov %rdi,%rsi
0x00005555555730d3 <ff_yuv2yuvX_avx2+211>: cmp %r8,%rax
0x00005555555730d6 <ff_yuv2yuvX_avx2+214>: jb 0x55555557304d <ff_yuv2yuvX_avx2+77>
0x00005555555730dc <ff_yuv2yuvX_avx2+220>: vzeroupper
0x00005555555730df <ff_yuv2yuvX_avx2+223>: retq
0x00005555555730e0 <yuv2rgb_c_48+0>: push %r15
End of assembler dump.
(gdb) info all-registers
rax 0x0 0
rbx 0x0 0
rcx 0x55555583f470 93824995292272
[...]
Thanks for your patience with this, I have replaced mova with movdqu - movu generated a compile error on ssse3. What system did this crash on? On Wed, Jan 6, 2021 at 9:10 PM Michael Niedermayer <michael@niedermayer.cc> wrote: > On Tue, Jan 05, 2021 at 01:31:25PM +0100, Alan Kelly wrote: > > Ping! > > crashes (due to alignment i think) > > (gdb) disassemble $rip-32,$rip+32 > Dump of assembler code from 0x5555555730a1 to 0x5555555730e1: > 0x00005555555730a1 <ff_yuv2yuvX_avx2+161>: int $0x71 > 0x00005555555730a3 <ff_yuv2yuvX_avx2+163>: out %al,$0x3 > 0x00005555555730a5 <ff_yuv2yuvX_avx2+165>: vpsraw $0x3,%ymm1,%ymm1 > 0x00005555555730aa <ff_yuv2yuvX_avx2+170>: vpackuswb %ymm4,%ymm3,%ymm3 > 0x00005555555730ae <ff_yuv2yuvX_avx2+174>: vpackuswb %ymm1,%ymm6,%ymm6 > 0x00005555555730b2 <ff_yuv2yuvX_avx2+178>: mov (%rdi),%rdx > 0x00005555555730b5 <ff_yuv2yuvX_avx2+181>: vpermq $0xd8,%ymm3,%ymm3 > 0x00005555555730bb <ff_yuv2yuvX_avx2+187>: vpermq $0xd8,%ymm6,%ymm6 > => 0x00005555555730c1 <ff_yuv2yuvX_avx2+193>: vmovdqa %ymm3,(%rcx,%rax,1) > 0x00005555555730c6 <ff_yuv2yuvX_avx2+198>: vmovdqa > %ymm6,0x20(%rcx,%rax,1) > 0x00005555555730cc <ff_yuv2yuvX_avx2+204>: add $0x40,%rax > 0x00005555555730d0 <ff_yuv2yuvX_avx2+208>: mov %rdi,%rsi > 0x00005555555730d3 <ff_yuv2yuvX_avx2+211>: cmp %r8,%rax > 0x00005555555730d6 <ff_yuv2yuvX_avx2+214>: jb 0x55555557304d > <ff_yuv2yuvX_avx2+77> > 0x00005555555730dc <ff_yuv2yuvX_avx2+220>: vzeroupper > 0x00005555555730df <ff_yuv2yuvX_avx2+223>: retq > 0x00005555555730e0 <yuv2rgb_c_48+0>: push %r15 > End of assembler dump. > (gdb) info all-registers > rax 0x0 0 > rbx 0x0 0 > rcx 0x55555583f470 93824995292272 > > > [...] > -- > Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB > > Modern terrorism, a quick summary: Need oil, start war with country that > has oil, kill hundread thousand in war. Let country fall into chaos, > be surprised about raise of fundamantalists. Drop more bombs, kill more > people, be surprised about them taking revenge and drop even more bombs > and strip your own citizens of their rights and freedoms. to be continued > _______________________________________________ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above, or email > ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
On Thu, Jan 07, 2021 at 10:39:56AM +0100, Alan Kelly wrote: > Thanks for your patience with this, I have replaced mova with movdqu - movu > generated a compile error on ssse3. What system did this crash on? AMD Ryzen 9 3950X on linux [...]
diff --git a/libswscale/x86/Makefile b/libswscale/x86/Makefile index 831d5359aa..bfe383364e 100644 --- a/libswscale/x86/Makefile +++ b/libswscale/x86/Makefile @@ -13,3 +13,4 @@ X86ASM-OBJS += x86/input.o \ x86/scale.o \ x86/rgb_2_rgb.o \ x86/yuv_2_rgb.o \ + x86/yuv2yuvX.o \ diff --git a/libswscale/x86/swscale.c b/libswscale/x86/swscale.c index 3160fedf04..8cd8713705 100644 --- a/libswscale/x86/swscale.c +++ b/libswscale/x86/swscale.c @@ -197,81 +197,30 @@ void ff_updateMMXDitherTables(SwsContext *c, int dstY) } #if HAVE_MMXEXT -static void yuv2yuvX_sse3(const int16_t *filter, int filterSize, - const int16_t **src, uint8_t *dest, int dstW, - const uint8_t *dither, int offset) -{ - if(((uintptr_t)dest) & 15){ - yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither, offset); - return; - } - filterSize--; -#define MAIN_FUNCTION \ - "pxor %%xmm0, %%xmm0 \n\t" \ - "punpcklbw %%xmm0, %%xmm3 \n\t" \ - "movd %4, %%xmm1 \n\t" \ - "punpcklwd %%xmm1, %%xmm1 \n\t" \ - "punpckldq %%xmm1, %%xmm1 \n\t" \ - "punpcklqdq %%xmm1, %%xmm1 \n\t" \ - "psllw $3, %%xmm1 \n\t" \ - "paddw %%xmm1, %%xmm3 \n\t" \ - "psraw $4, %%xmm3 \n\t" \ - "movdqa %%xmm3, %%xmm4 \n\t" \ - "movdqa %%xmm3, %%xmm7 \n\t" \ - "movl %3, %%ecx \n\t" \ - "mov %0, %%"FF_REG_d" \n\t"\ - "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ - ".p2align 4 \n\t" /* FIXME Unroll? */\ - "1: \n\t"\ - "movddup 8(%%"FF_REG_d"), %%xmm0 \n\t" /* filterCoeff */\ - "movdqa (%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm2 \n\t" /* srcData */\ - "movdqa 16(%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm5 \n\t" /* srcData */\ - "add $16, %%"FF_REG_d" \n\t"\ - "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ - "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\ - "pmulhw %%xmm0, %%xmm2 \n\t"\ - "pmulhw %%xmm0, %%xmm5 \n\t"\ - "paddw %%xmm2, %%xmm3 \n\t"\ - "paddw %%xmm5, %%xmm4 \n\t"\ - " jnz 1b \n\t"\ - "psraw $3, %%xmm3 \n\t"\ - "psraw $3, %%xmm4 \n\t"\ - "packuswb %%xmm4, %%xmm3 \n\t"\ - "movntdq %%xmm3, (%1, %%"FF_REG_c") \n\t"\ - "add $16, %%"FF_REG_c" \n\t"\ - "cmp %2, %%"FF_REG_c" \n\t"\ - "movdqa %%xmm7, %%xmm3 \n\t" \ - "movdqa %%xmm7, %%xmm4 \n\t" \ - "mov %0, %%"FF_REG_d" \n\t"\ - "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ - "jb 1b \n\t" - - if (offset) { - __asm__ volatile( - "movq %5, %%xmm3 \n\t" - "movdqa %%xmm3, %%xmm4 \n\t" - "psrlq $24, %%xmm3 \n\t" - "psllq $40, %%xmm4 \n\t" - "por %%xmm4, %%xmm3 \n\t" - MAIN_FUNCTION - :: "g" (filter), - "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset), - "m"(filterSize), "m"(((uint64_t *) dither)[0]) - : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,) - "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c - ); - } else { - __asm__ volatile( - "movq %5, %%xmm3 \n\t" - MAIN_FUNCTION - :: "g" (filter), - "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset), - "m"(filterSize), "m"(((uint64_t *) dither)[0]) - : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,) - "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c - ); - } +#define YUV2YUVX_FUNC(opt, step) \ +void ff_yuv2yuvX_ ##opt(const int16_t *filter, long filterSize, const int16_t **src, \ + uint8_t *dest, int dstW, \ + const uint8_t *dither, int offset); \ +static void yuv2yuvX_ ##opt(const int16_t *filter, int filterSize, \ + const int16_t **src, uint8_t *dest, int dstW, \ + const uint8_t *dither, int offset) \ +{ \ + int remainder = (dstW % step); \ + int pixelsProcessed = dstW - remainder; \ + if(((uintptr_t)dest) & 15){ \ + yuv2yuvX_mmx(filter, filterSize, src, dest, dstW, dither, offset); \ + return; \ + } \ + ff_yuv2yuvX_ ##opt(filter, filterSize - 1, src, dest - offset, pixelsProcessed + offset, dither, offset); \ + if(remainder > 0){ \ + yuv2yuvX_mmx(filter, filterSize, src, dest + pixelsProcessed, remainder, dither, offset + pixelsProcessed); \ + } \ + return; \ } + +YUV2YUVX_FUNC(sse3, 32) +YUV2YUVX_FUNC(avx2, 64) + #endif #endif /* HAVE_INLINE_ASM */ @@ -402,9 +351,14 @@ av_cold void ff_sws_init_swscale_x86(SwsContext *c) #if HAVE_MMXEXT_INLINE if (INLINE_MMXEXT(cpu_flags)) sws_init_swscale_mmxext(c); - if (cpu_flags & AV_CPU_FLAG_SSE3){ - if(c->use_mmx_vfilter && !(c->flags & SWS_ACCURATE_RND)) + if (cpu_flags & AV_CPU_FLAG_AVX2){ + if(c->use_mmx_vfilter && !(c->flags & SWS_ACCURATE_RND)){ + c->yuv2planeX = yuv2yuvX_avx2; + } + } else if (cpu_flags & AV_CPU_FLAG_SSE3){ + if(c->use_mmx_vfilter && !(c->flags & SWS_ACCURATE_RND)){ c->yuv2planeX = yuv2yuvX_sse3; + } } #endif diff --git a/libswscale/x86/yuv2yuvX.asm b/libswscale/x86/yuv2yuvX.asm new file mode 100644 index 0000000000..899b84c50b --- /dev/null +++ b/libswscale/x86/yuv2yuvX.asm @@ -0,0 +1,117 @@ +;****************************************************************************** +;* x86-optimized yuv2yuvX +;* Copyright 2020 Google LLC +;* Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at> +;* +;* This file is part of FFmpeg. +;* +;* FFmpeg is free software; you can redistribute it and/or +;* modify it under the terms of the GNU Lesser General Public +;* License as published by the Free Software Foundation; either +;* version 2.1 of the License, or (at your option) any later version. +;* +;* FFmpeg is distributed in the hope that it will be useful, +;* but WITHOUT ANY WARRANTY; without even the implied warranty of +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;* Lesser General Public License for more details. +;* +;* You should have received a copy of the GNU Lesser General Public +;* License along with FFmpeg; if not, write to the Free Software +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +;****************************************************************************** + +%include "libavutil/x86/x86util.asm" + +SECTION .text + +;----------------------------------------------------------------------------- +; yuv2yuvX +; +; void ff_yuv2yuvX_<opt>(const int16_t *filter, int filterSize, +; uint8_t *dest, int dstW, +; const uint8_t *dither, int offset); +; +;----------------------------------------------------------------------------- + +%macro YUV2YUVX_FUNC 0 +cglobal yuv2yuvX, 7, 7, 8, filter, filterSize, src, dest, dstW, dither, offset +%if ARCH_X86_64 + movsxd dstWq, dstWd + movsxd offsetq, offsetd +%endif ; x86-64 + movddup m0, [filterq + 8] +%if cpuflag(avx2) + vpbroadcastq m3, [ditherq] +%else + movq xmm3, [ditherq] +%endif ; avx2 + cmp offsetd, 0 + jz .offset + + ; offset != 0 path. + psrlq m5, m3, $18 + psllq m3, m3, $28 + por m3, m3, m5 + +.offset: + movd xmm1, filterSized +%if cpuflag(avx2) + vpbroadcastw m1, xmm1 +%else + pshuflw m1, m1, q0000 + punpcklqdq m1, m1 +%endif ; avx2 + pxor m0, m0, m0 + mov filterSizeq, filterq + mov srcq, [filterSizeq] + punpcklbw m3, m0 + psllw m1, m1, 3 + paddw m3, m3, m1 + psraw m7, m3, 4 +.outerloop: + mova m4, m7 + mova m3, m7 + mova m6, m7 + mova m1, m7 +.loop: +%if cpuflag(avx2) + vpbroadcastq m0, [filterSizeq + 8] +%else + movddup m0, [filterSizeq + 8] +%endif + pmulhw m2, m0, [srcq + offsetq * 2] + pmulhw m5, m0, [srcq + offsetq * 2 + mmsize] + paddw m3, m3, m2 + paddw m4, m4, m5 + pmulhw m2, m0, [srcq + offsetq * 2 + 2 * mmsize] + pmulhw m5, m0, [srcq + offsetq * 2 + 3 * mmsize] + paddw m6, m6, m2 + paddw m1, m1, m5 + add filterSizeq, $10 + mov srcq, [filterSizeq] + test srcq, srcq + jnz .loop + psraw m3, m3, 3 + psraw m4, m4, 3 + psraw m6, m6, 3 + psraw m1, m1, 3 + packuswb m3, m3, m4 + packuswb m6, m6, m1 + mov srcq, [filterq] +%if cpuflag(avx2) + vpermq m3, m3, 216 + vpermq m6, m6, 216 +%endif + mova [destq + offsetq], m3 + mova [destq + offsetq + mmsize], m6 + add offsetq, mmsize * 2 + mov filterSizeq, filterq + cmp offsetq, dstWq + jb .outerloop + REP_RET +%endmacro + +INIT_XMM sse3 +YUV2YUVX_FUNC +INIT_YMM avx2 +YUV2YUVX_FUNC diff --git a/tests/checkasm/sw_scale.c b/tests/checkasm/sw_scale.c index 9efa2b4def..3bc6066f35 100644 --- a/tests/checkasm/sw_scale.c +++ b/tests/checkasm/sw_scale.c @@ -37,6 +37,102 @@ #define SRC_PIXELS 128 + +// This reference function is the same approximate algorithm employed by the +// SIMD functions +static void ref_function(const int16_t *filter, int filterSize, + const int16_t **src, uint8_t *dest, int dstW, + const uint8_t *dither, int offset) +{ + int i, d; + d = ((filterSize - 1) * 8 + dither[0]) >> 4; + for (i=0; i<dstW; i++) { + int16_t val = d; + int j; + union { + int val; + int16_t v[2]; + } t; + for (j=0; j<filterSize; j++){ + t.val = (int)src[j][i + offset] * (int)filter[j]; + val += t.v[1]; + } + dest[i]= av_clip_uint8(val>>3); + } +} + +static void check_yuv2yuvX(void) +{ + struct SwsContext *ctx; + int fsi, osi, i, j; +#define LARGEST_FILTER 16 +#define FILTER_SIZES 4 + static const int filter_sizes[FILTER_SIZES] = {1, 4, 8, 16}; + + declare_func_emms(AV_CPU_FLAG_MMX, void, const int16_t *filter, + int filterSize, const int16_t **src, uint8_t *dest, + int dstW, const uint8_t *dither, int offset); + + int dstW = SRC_PIXELS; + const int16_t **src; + LOCAL_ALIGNED_32(int16_t, src_pixels, [LARGEST_FILTER * SRC_PIXELS]); + LOCAL_ALIGNED_32(int16_t, filter_coeff, [LARGEST_FILTER]); + LOCAL_ALIGNED_32(uint8_t, dst0, [SRC_PIXELS]); + LOCAL_ALIGNED_32(uint8_t, dst1, [SRC_PIXELS]); + LOCAL_ALIGNED_32(uint8_t, dither, [SRC_PIXELS]); + union VFilterData{ + const int16_t *src; + uint16_t coeff[8]; + } *vFilterData; + uint8_t d_val = rnd(); + randomize_buffers(filter_coeff, LARGEST_FILTER); + randomize_buffers(src_pixels, LARGEST_FILTER * SRC_PIXELS); + ctx = sws_alloc_context(); + if (sws_init_context(ctx, NULL, NULL) < 0) + fail(); + + ff_getSwsFunc(ctx); + for(i = 0; i < SRC_PIXELS; ++i){ + dither[i] = d_val; + } + for(osi = 0; osi < 64; osi += 16){ + for(fsi = 0; fsi < FILTER_SIZES; ++fsi){ + src = av_malloc(sizeof(int16_t*) * filter_sizes[fsi]); + vFilterData = av_malloc((filter_sizes[fsi] + 2) * sizeof(union VFilterData)); + memset(vFilterData, 0, (filter_sizes[fsi] + 2) * sizeof(union VFilterData)); + for(i = 0; i < filter_sizes[fsi]; ++i){ + src[i] = &src_pixels[i * SRC_PIXELS]; + vFilterData[i].src = src[i]; + for(j = 0; j < 4; ++j){ + vFilterData[i].coeff[j + 4] = filter_coeff[i]; + } + } + if (check_func(ctx->yuv2planeX, "yuv2yuvX_%d_%d", filter_sizes[fsi], osi)){ + memset(dst0, 0, SRC_PIXELS * sizeof(dst0[0])); + memset(dst1, 0, SRC_PIXELS * sizeof(dst1[0])); + + // The reference function is not the scalar function selected when mmx + // is deactivated as the SIMD functions do not give the same result as + // the scalar ones due to rounding. The SIMD functions are activated by + // the flag SWS_ACCURATE_RND + ref_function(&filter_coeff[0], filter_sizes[fsi], src, dst0, dstW - osi, dither, osi); + // There's no point in calling new for the reference function + if(ctx->use_mmx_vfilter){ + call_new((const int16_t*)vFilterData, filter_sizes[fsi], src, dst1, dstW - osi, dither, osi); + if (memcmp(dst0, dst1, SRC_PIXELS * sizeof(dst0[0]))){ + fail(); + } + bench_new((const int16_t*)vFilterData, filter_sizes[fsi], src, dst1, dstW - osi, dither, osi); + } + } + free(src); + free(vFilterData); + } + } + sws_freeContext(ctx); +#undef FILTER_SIZES +} + static void check_hscale(void) { #define MAX_FILTER_WIDTH 40 @@ -131,4 +227,6 @@ void checkasm_check_sw_scale(void) { check_hscale(); report("hscale"); + check_yuv2yuvX(); + report("yuv2yuvX"); }