From patchwork Thu Nov 12 08:33:18 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alan Kelly X-Patchwork-Id: 23592 Return-Path: X-Original-To: patchwork@ffaux-bg.ffmpeg.org Delivered-To: patchwork@ffaux-bg.ffmpeg.org Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org [79.124.17.100]) by ffaux.localdomain (Postfix) with ESMTP id 8C56B44A80D for ; Thu, 12 Nov 2020 10:33:29 +0200 (EET) Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 642BD68C100; Thu, 12 Nov 2020 10:33:29 +0200 (EET) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from mail-ej1-f74.google.com (mail-ej1-f74.google.com [209.85.218.74]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id 2194268BF13 for ; Thu, 12 Nov 2020 10:33:23 +0200 (EET) Received: by mail-ej1-f74.google.com with SMTP id f21so1554597ejf.11 for ; Thu, 12 Nov 2020 00:33:23 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20161025; h=sender:date:in-reply-to:message-id:mime-version:references:subject :from:to:cc; bh=l53zcNvdy0F49PE960sWm5NyhFTpNxmDmx20bcBGtQk=; b=Z4F+hwCYQkiehcc+tZHI9XS8vWkq6n12DJtj6Yuj89gQeOst/Pb9MM6Q+yNAF3M4qR AqIsQ11r8W4oloNJCWo0A5+FkH73jmAdUe6rMtZlubx206JEfsMF8lhLc85861He3qQk mR7lr3JH0hpI4nPO6YXm5oSw6pfww3utiI2tSoL0uB9HaXXe8HK8une69RQv9IvGGz/x Wb0wSCPSJHR86taQnMqm5XtaOBEsme2UcLnvzLOQMcptFpLLikMctl7bAKyVxWruyMd0 VRTyzaVT/+yHOb/EU3O6ZQApPG4G4dZTWmvCB17RsjtLgtfPm7VhWkf2CMW3fJBtX0UU ajCQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:sender:date:in-reply-to:message-id:mime-version :references:subject:from:to:cc; bh=l53zcNvdy0F49PE960sWm5NyhFTpNxmDmx20bcBGtQk=; b=hfWnoA8Ev2V6sIQIkGKM/50KO6vZc35ib3nhL4MM7MGWXCID7/6OMmZL682ogwQacy OfJmsDyV2TKM5MZhB/YHxpUxdLawcRU+ezUssnwhYrxT2AipMeTk3iMuV/kydwx2qt0q OaeQ3nKecPIqke7h9NRpbMHnyoxbH3hJv1TyL4mBfDizKlptNOSyBrsOyOoJBIxdGR+I s6jYVeeU4MgAjciZT1/e1cEbXSHnkbrp80feNYu7hQ5WHRvKvRw2d2NbaVYdO6GiQWb0 cMLay//VXGdI3nYbG1/z1arNdTB0m+uBOp3anqeyr/K/ZmZoJN2MMw3GQS7dNcd9IT6A V+vw== X-Gm-Message-State: AOAM5334toWJYIwiQDNkY84UY02IHgFsWUd6Dkse+EIC/a3rYbu8TfHC HxcHffhnUQeY+rtLVscos7jmTT3OzGdHZFOvkFP6vsr1TPUkI5ca4h1lSLROoT7g4gmkiz7phBu /Pp18xfGBrPH4PYU37qX9MWNh0UNVSRJDsIzp9A2hK+c47W1FjvK8i/FqbHQULckbD7Yb/mI= X-Google-Smtp-Source: ABdhPJxnHj3u9H5kbVQdk856M6Y9k2jYtKzqny8MtJ6E70y8xEjbJb2qJa7JSwHimd2bQwYZiJKrjrX7kkEwvLg= X-Received: from alankelly0.zrh.corp.google.com ([2a00:79e0:42:205:f693:9fff:fef7:aa73]) (user=alankelly job=sendgmr) by 2002:a17:906:3b81:: with SMTP id u1mr28642706ejf.542.1605170002062; Thu, 12 Nov 2020 00:33:22 -0800 (PST) Date: Thu, 12 Nov 2020 09:33:18 +0100 In-Reply-To: Message-Id: <20201112083318.1515486-1-alankelly@google.com> Mime-Version: 1.0 References: X-Mailer: git-send-email 2.29.2.222.g5d2a92d10f8-goog From: Alan Kelly To: ffmpeg-devel@ffmpeg.org Subject: [FFmpeg-devel] [PATCH] Moves yuv2yuvX_sse3 to yasm, unrolls main loop and other small optimizations for ~20% speedup. X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Cc: Alan Kelly Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" --- It now works on x86-32 libswscale/x86/Makefile | 1 + libswscale/x86/swscale.c | 75 ++++-------------------- libswscale/x86/yuv2yuvX.asm | 110 ++++++++++++++++++++++++++++++++++++ 3 files changed, 121 insertions(+), 65 deletions(-) create mode 100644 libswscale/x86/yuv2yuvX.asm diff --git a/libswscale/x86/Makefile b/libswscale/x86/Makefile index 831d5359aa..bfe383364e 100644 --- a/libswscale/x86/Makefile +++ b/libswscale/x86/Makefile @@ -13,3 +13,4 @@ X86ASM-OBJS += x86/input.o \ x86/scale.o \ x86/rgb_2_rgb.o \ x86/yuv_2_rgb.o \ + x86/yuv2yuvX.o \ diff --git a/libswscale/x86/swscale.c b/libswscale/x86/swscale.c index 3160fedf04..758c8e540f 100644 --- a/libswscale/x86/swscale.c +++ b/libswscale/x86/swscale.c @@ -197,80 +197,25 @@ void ff_updateMMXDitherTables(SwsContext *c, int dstY) } #if HAVE_MMXEXT +void ff_yuv2yuvX_sse3(const int16_t *filter, long filterSize, + uint8_t *dest, int dstW, + const uint8_t *dither, int offset); + static void yuv2yuvX_sse3(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset) { + int remainder = (dstW % 32); + int pixelsProcessed = dstW - remainder; if(((uintptr_t)dest) & 15){ yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither, offset); return; } - filterSize--; -#define MAIN_FUNCTION \ - "pxor %%xmm0, %%xmm0 \n\t" \ - "punpcklbw %%xmm0, %%xmm3 \n\t" \ - "movd %4, %%xmm1 \n\t" \ - "punpcklwd %%xmm1, %%xmm1 \n\t" \ - "punpckldq %%xmm1, %%xmm1 \n\t" \ - "punpcklqdq %%xmm1, %%xmm1 \n\t" \ - "psllw $3, %%xmm1 \n\t" \ - "paddw %%xmm1, %%xmm3 \n\t" \ - "psraw $4, %%xmm3 \n\t" \ - "movdqa %%xmm3, %%xmm4 \n\t" \ - "movdqa %%xmm3, %%xmm7 \n\t" \ - "movl %3, %%ecx \n\t" \ - "mov %0, %%"FF_REG_d" \n\t"\ - "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ - ".p2align 4 \n\t" /* FIXME Unroll? */\ - "1: \n\t"\ - "movddup 8(%%"FF_REG_d"), %%xmm0 \n\t" /* filterCoeff */\ - "movdqa (%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm2 \n\t" /* srcData */\ - "movdqa 16(%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm5 \n\t" /* srcData */\ - "add $16, %%"FF_REG_d" \n\t"\ - "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ - "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\ - "pmulhw %%xmm0, %%xmm2 \n\t"\ - "pmulhw %%xmm0, %%xmm5 \n\t"\ - "paddw %%xmm2, %%xmm3 \n\t"\ - "paddw %%xmm5, %%xmm4 \n\t"\ - " jnz 1b \n\t"\ - "psraw $3, %%xmm3 \n\t"\ - "psraw $3, %%xmm4 \n\t"\ - "packuswb %%xmm4, %%xmm3 \n\t"\ - "movntdq %%xmm3, (%1, %%"FF_REG_c") \n\t"\ - "add $16, %%"FF_REG_c" \n\t"\ - "cmp %2, %%"FF_REG_c" \n\t"\ - "movdqa %%xmm7, %%xmm3 \n\t" \ - "movdqa %%xmm7, %%xmm4 \n\t" \ - "mov %0, %%"FF_REG_d" \n\t"\ - "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ - "jb 1b \n\t" - - if (offset) { - __asm__ volatile( - "movq %5, %%xmm3 \n\t" - "movdqa %%xmm3, %%xmm4 \n\t" - "psrlq $24, %%xmm3 \n\t" - "psllq $40, %%xmm4 \n\t" - "por %%xmm4, %%xmm3 \n\t" - MAIN_FUNCTION - :: "g" (filter), - "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset), - "m"(filterSize), "m"(((uint64_t *) dither)[0]) - : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,) - "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c - ); - } else { - __asm__ volatile( - "movq %5, %%xmm3 \n\t" - MAIN_FUNCTION - :: "g" (filter), - "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset), - "m"(filterSize), "m"(((uint64_t *) dither)[0]) - : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,) - "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c - ); + ff_yuv2yuvX_sse3(filter, filterSize - 1, dest - offset, pixelsProcessed + offset, dither, offset); + if(remainder > 0){ + yuv2yuvX_mmxext(filter, filterSize, src, dest + pixelsProcessed, remainder, dither, offset + pixelsProcessed); } + return; } #endif diff --git a/libswscale/x86/yuv2yuvX.asm b/libswscale/x86/yuv2yuvX.asm new file mode 100644 index 0000000000..c7ab6129d9 --- /dev/null +++ b/libswscale/x86/yuv2yuvX.asm @@ -0,0 +1,110 @@ +;****************************************************************************** +;* x86-optimized yuv2yuvX +;* Copyright 2020 Google LLC +;* Copyright (C) 2001-2011 Michael Niedermayer +;* +;* This file is part of FFmpeg. +;* +;* FFmpeg is free software; you can redistribute it and/or +;* modify it under the terms of the GNU Lesser General Public +;* License as published by the Free Software Foundation; either +;* version 2.1 of the License, or (at your option) any later version. +;* +;* FFmpeg is distributed in the hope that it will be useful, +;* but WITHOUT ANY WARRANTY; without even the implied warranty of +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;* Lesser General Public License for more details. +;* +;* You should have received a copy of the GNU Lesser General Public +;* License along with FFmpeg; if not, write to the Free Software +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +;****************************************************************************** + +%include "libavutil/x86/x86util.asm" + +SECTION .text + +;----------------------------------------------------------------------------- +; yuv2yuvX +; +; void ff_yuv2yuvX_(const int16_t *filter, int filterSize, +; uint8_t *dest, int dstW, +; const uint8_t *dither, int offset); +; +;----------------------------------------------------------------------------- + +%macro YUV2YUVX_FUNC 0 +cglobal yuv2yuvX, 6, 7, 16, filter, filterSize, dest, dstW, dither, offset, src +%if ARCH_X86_64 +%define ptr_size 8 + movsxd dstWq, dstWd + movsxd offsetq, offsetd +%else +%define ptr_size 4 +%endif ; x86-64 + movddup m0, [filterq + ptr_size] + movq xmm3, [ditherq] + cmp offsetd, 0 + jz .offset + + ; offset != 0 path. + psrlq m5, m3, $18 + psllq m3, m3, $28 + por m3, m3, m5 + +.offset: + movd xmm1, filterSized +%if cpuflag(avx2) + vperm2i128 m3, m3, m3, 0 + vpbroadcastw m1, xmm1 +%endif ; avx2 + pxor m0, m0, m0 + mov filterSizeq, filterq + mov srcq, [filterSizeq] + punpcklbw m3, m0 + psllw m1, m1, 3 + paddw m3, m3, m1 + psraw m7, m3, 4 +.outerloop: + mova m4, m7 + mova m3, m7 + mova m6, m7 + mova m1, m7 +.loop: +%if cpuflag(avx2) + vpbroadcastq m0, [filterSizeq + ptr_size] +%else + movddup m0, [filterSizeq + ptr_size] +%endif + pmulhw m2, m0, [srcq + offsetq * 2] + pmulhw m5, m0, [srcq + offsetq * 2 + mmsize] + paddw m3, m3, m2 + paddw m4, m4, m5 + pmulhw m2, m0, [srcq + offsetq * 2 + 2 * mmsize] + pmulhw m5, m0, [srcq + offsetq * 2 + 3 * mmsize] + paddw m6, m6, m2 + paddw m1, m1, m5 + add filterSizeq, 2 * ptr_size + mov srcq, [filterSizeq] + test srcd, srcd + jnz .loop + psraw m3, m3, 3 + psraw m4, m4, 3 + psraw m6, m6, 3 + psraw m1, m1, 3 + packuswb m3, m3, m4 + packuswb m6, m6, m1 + mov srcq, [filterq] + movntdq [destq + offsetq], m3 + movntdq [destq + offsetq + mmsize], m6 + add offsetq, mmsize * 2 + mov filterSizeq, filterq + cmp offsetq, dstWq + jb .outerloop + REP_RET +%endmacro + +INIT_XMM sse3 +YUV2YUVX_FUNC +INIT_YMM avx2 +YUV2YUVX_FUNC