diff mbox

[FFmpeg-devel] swscale_unscaled: fix and speed up DITHER_COPY macro for x86 with SSE2

Message ID d6e32949-1b35-ccb2-34c6-0ae340c4c65c@poczta.onet.pl
State New
Headers show

Commit Message

Mateusz Sept. 22, 2017, 3:23 p.m. UTC
New version of the patch -- now it uses the same logic independent of the target bitdepth.

For x86_64 it is much faster than current code (with perfect quality), for x86_32 it is fast
if you add to configure: --extra-cflags="-msse2"
(for x86_32 with default configure options it is slower than current code but with better quality)

Please review/test.

Mateusz
From 8eaa76fc82550f62f1a22e9388a51dc61c031a2c Mon Sep 17 00:00:00 2001
From: Mateusz <mateuszb@poczta.onet.pl>
Date: Fri, 22 Sep 2017 14:54:53 +0200
Subject: [PATCH] swscale_unscaled: fix and speed up DITHER_COPY macro for x86
 with SSE2

---
 libswscale/swscale_unscaled.c | 220 +++++++++++++++++++++++++++++++++++-------
 1 file changed, 185 insertions(+), 35 deletions(-)

Comments

James Almer Sept. 22, 2017, 3:47 p.m. UTC | #1
On 9/22/2017 12:23 PM, Mateusz wrote:
> New version of the patch -- now it uses the same logic independent of the target bitdepth.
> 
> For x86_64 it is much faster than current code (with perfect quality), for x86_32 it is fast
> if you add to configure: --extra-cflags="-msse2"
> (for x86_32 with default configure options it is slower than current code but with better quality)
> 
> Please review/test.
> 
> Mateusz

We don't accept intrinsics, or new arch specific code outside of arch
specific folders.

Either write this in NASM syntax, or if it *really* needs to be inlined,
use __asm__() inline blocks. But whichever you use, it needs to go in
the x86/ folder.
Mateusz Sept. 22, 2017, 5:06 p.m. UTC | #2
W dniu 2017-09-22 o 17:47, James Almer pisze:
> On 9/22/2017 12:23 PM, Mateusz wrote:
>> New version of the patch -- now it uses the same logic independent of the target bitdepth.
>>
>> For x86_64 it is much faster than current code (with perfect quality), for x86_32 it is fast
>> if you add to configure: --extra-cflags="-msse2"
>> (for x86_32 with default configure options it is slower than current code but with better quality)
>>
>> Please review/test.
>>
>> Mateusz
> 
> We don't accept intrinsics, or new arch specific code outside of arch
> specific folders.
> 
> Either write this in NASM syntax, or if it *really* needs to be inlined,
> use __asm__() inline blocks. But whichever you use, it needs to go in
> the x86/ folder.

Thank you for the information! I'm starting learning NASM syntax (it could last for months).
James Almer Sept. 22, 2017, 5:28 p.m. UTC | #3
On 9/22/2017 2:06 PM, Mateusz wrote:
> W dniu 2017-09-22 o 17:47, James Almer pisze:
>> On 9/22/2017 12:23 PM, Mateusz wrote:
>>> New version of the patch -- now it uses the same logic independent of the target bitdepth.
>>>
>>> For x86_64 it is much faster than current code (with perfect quality), for x86_32 it is fast
>>> if you add to configure: --extra-cflags="-msse2"
>>> (for x86_32 with default configure options it is slower than current code but with better quality)
>>>
>>> Please review/test.
>>>
>>> Mateusz
>>
>> We don't accept intrinsics, or new arch specific code outside of arch
>> specific folders.
>>
>> Either write this in NASM syntax, or if it *really* needs to be inlined,
>> use __asm__() inline blocks. But whichever you use, it needs to go in
>> the x86/ folder.
> 
> Thank you for the information! I'm starting learning NASM syntax (it could last for months).

https://blogs.gnome.org/rbultje/2017/07/14/writing-x86-simd-using-x86inc-asm/

Give that a read. It's a tutorial for handwritten ASM written in NASM
syntax using the x86inc.asm helper we use in our codebase. It simplifies
the work considerably.
Of course, you can also take a look at existing asm functions in the
project.
diff mbox

Patch

diff --git a/libswscale/swscale_unscaled.c b/libswscale/swscale_unscaled.c
index ef36aec..cd3e917 100644
--- a/libswscale/swscale_unscaled.c
+++ b/libswscale/swscale_unscaled.c
@@ -35,6 +35,10 @@ 
 #include "libavutil/avassert.h"
 #include "libavutil/avconfig.h"
 
+#if ARCH_X86_64 || (ARCH_X86_32 && defined(__SSE2__))
+#include <emmintrin.h>
+#endif
+
 DECLARE_ALIGNED(8, static const uint8_t, dithers)[8][8][8]={
 {
   {   0,  1,  0,  1,  0,  1,  0,  1,},
@@ -110,24 +114,6 @@  DECLARE_ALIGNED(8, static const uint8_t, dithers)[8][8][8]={
   { 112, 16,104,  8,118, 22,110, 14,},
 }};
 
-static const uint16_t dither_scale[15][16]={
-{    2,    3,    3,    5,    5,    5,    5,    5,    5,    5,    5,    5,    5,    5,    5,    5,},
-{    2,    3,    7,    7,   13,   13,   25,   25,   25,   25,   25,   25,   25,   25,   25,   25,},
-{    3,    3,    4,   15,   15,   29,   57,   57,   57,  113,  113,  113,  113,  113,  113,  113,},
-{    3,    4,    4,    5,   31,   31,   61,  121,  241,  241,  241,  241,  481,  481,  481,  481,},
-{    3,    4,    5,    5,    6,   63,   63,  125,  249,  497,  993,  993,  993,  993,  993, 1985,},
-{    3,    5,    6,    6,    6,    7,  127,  127,  253,  505, 1009, 2017, 4033, 4033, 4033, 4033,},
-{    3,    5,    6,    7,    7,    7,    8,  255,  255,  509, 1017, 2033, 4065, 8129,16257,16257,},
-{    3,    5,    6,    8,    8,    8,    8,    9,  511,  511, 1021, 2041, 4081, 8161,16321,32641,},
-{    3,    5,    7,    8,    9,    9,    9,    9,   10, 1023, 1023, 2045, 4089, 8177,16353,32705,},
-{    3,    5,    7,    8,   10,   10,   10,   10,   10,   11, 2047, 2047, 4093, 8185,16369,32737,},
-{    3,    5,    7,    8,   10,   11,   11,   11,   11,   11,   12, 4095, 4095, 8189,16377,32753,},
-{    3,    5,    7,    9,   10,   12,   12,   12,   12,   12,   12,   13, 8191, 8191,16381,32761,},
-{    3,    5,    7,    9,   10,   12,   13,   13,   13,   13,   13,   13,   14,16383,16383,32765,},
-{    3,    5,    7,    9,   10,   12,   14,   14,   14,   14,   14,   14,   14,   15,32767,32767,},
-{    3,    5,    7,    9,   11,   12,   14,   15,   15,   15,   15,   15,   15,   15,   16,65535,},
-};
-
 
 static void fillPlane(uint8_t *plane, int stride, int width, int height, int y,
                       uint8_t val)
@@ -1502,24 +1488,164 @@  static int packedCopyWrapper(SwsContext *c, const uint8_t *src[],
 }
 
 #define DITHER_COPY(dst, dstStride, src, srcStride, bswap, dbswap)\
-    uint16_t scale= dither_scale[dst_depth-1][src_depth-1];\
-    int shift= src_depth-dst_depth + dither_scale[src_depth-2][dst_depth-1];\
-    for (i = 0; i < height; i++) {\
-        const uint8_t *dither= dithers[src_depth-9][i&7];\
-        for (j = 0; j < length-7; j+=8){\
-            dst[j+0] = dbswap((bswap(src[j+0]) + dither[0])*scale>>shift);\
-            dst[j+1] = dbswap((bswap(src[j+1]) + dither[1])*scale>>shift);\
-            dst[j+2] = dbswap((bswap(src[j+2]) + dither[2])*scale>>shift);\
-            dst[j+3] = dbswap((bswap(src[j+3]) + dither[3])*scale>>shift);\
-            dst[j+4] = dbswap((bswap(src[j+4]) + dither[4])*scale>>shift);\
-            dst[j+5] = dbswap((bswap(src[j+5]) + dither[5])*scale>>shift);\
-            dst[j+6] = dbswap((bswap(src[j+6]) + dither[6])*scale>>shift);\
-            dst[j+7] = dbswap((bswap(src[j+7]) + dither[7])*scale>>shift);\
+    unsigned shift= src_depth-dst_depth, tmp;\
+    if (shiftonly) {\
+        for (i = 0; i < height; i++) {\
+            const uint8_t *dither= dithers[shift-1][i&7];\
+            for (j = 0; j < length-7; j+=8) {\
+                tmp = (bswap(src[j+0]) + dither[0])>>shift; dst[j+0] = dbswap(tmp - (tmp>>dst_depth));\
+                tmp = (bswap(src[j+1]) + dither[1])>>shift; dst[j+1] = dbswap(tmp - (tmp>>dst_depth));\
+                tmp = (bswap(src[j+2]) + dither[2])>>shift; dst[j+2] = dbswap(tmp - (tmp>>dst_depth));\
+                tmp = (bswap(src[j+3]) + dither[3])>>shift; dst[j+3] = dbswap(tmp - (tmp>>dst_depth));\
+                tmp = (bswap(src[j+4]) + dither[4])>>shift; dst[j+4] = dbswap(tmp - (tmp>>dst_depth));\
+                tmp = (bswap(src[j+5]) + dither[5])>>shift; dst[j+5] = dbswap(tmp - (tmp>>dst_depth));\
+                tmp = (bswap(src[j+6]) + dither[6])>>shift; dst[j+6] = dbswap(tmp - (tmp>>dst_depth));\
+                tmp = (bswap(src[j+7]) + dither[7])>>shift; dst[j+7] = dbswap(tmp - (tmp>>dst_depth));\
+            }\
+            for (; j < length; j++) {\
+                tmp = (bswap(src[j]) + dither[j&7])>>shift; dst[j] = dbswap(tmp - (tmp>>dst_depth));\
+            }\
+            dst += dstStride;\
+            src += srcStride;\
+        }\
+    } else {\
+        for (i = 0; i < height; i++) {\
+            const uint8_t *dither= dithers[shift-1][i&7];\
+            for (j = 0; j < length-7; j+=8) {\
+                tmp = bswap(src[j+0]); dst[j+0] = dbswap((tmp - (tmp>>dst_depth) + dither[0])>>shift);\
+                tmp = bswap(src[j+1]); dst[j+1] = dbswap((tmp - (tmp>>dst_depth) + dither[1])>>shift);\
+                tmp = bswap(src[j+2]); dst[j+2] = dbswap((tmp - (tmp>>dst_depth) + dither[2])>>shift);\
+                tmp = bswap(src[j+3]); dst[j+3] = dbswap((tmp - (tmp>>dst_depth) + dither[3])>>shift);\
+                tmp = bswap(src[j+4]); dst[j+4] = dbswap((tmp - (tmp>>dst_depth) + dither[4])>>shift);\
+                tmp = bswap(src[j+5]); dst[j+5] = dbswap((tmp - (tmp>>dst_depth) + dither[5])>>shift);\
+                tmp = bswap(src[j+6]); dst[j+6] = dbswap((tmp - (tmp>>dst_depth) + dither[6])>>shift);\
+                tmp = bswap(src[j+7]); dst[j+7] = dbswap((tmp - (tmp>>dst_depth) + dither[7])>>shift);\
+            }\
+            for (; j < length; j++) {\
+                tmp = bswap(src[j]); dst[j] = dbswap((tmp - (tmp>>dst_depth) + dither[j&7])>>shift);\
+            }\
+            dst += dstStride;\
+            src += srcStride;\
+        }\
+    }
+
+#define MM_BSWAP16(n) _mm_or_si128(_mm_srli_epi16(n, 8), _mm_slli_epi16(n, 8))
+
+#define DITHER_COPY_X64_1(dst, dstStride, src, srcStride, bswap, mbswap)\
+    unsigned shift= src_depth-8, tmp;\
+    __m128i A0, A1, D0;\
+    if (shiftonly) {\
+        for (i = 0; i < height; i++) {\
+            const uint8_t *dither= dithers[shift-1][i&7];\
+            D0 = _mm_loadl_epi64((__m128i const*)dither);\
+            D0 = _mm_unpacklo_epi8(D0, _mm_setzero_si128());\
+            for (j = 0; j < length-15; j+=16) {\
+                A0 = _mm_loadu_si128((__m128i const*)(src + j));\
+                A1 = _mm_loadu_si128((__m128i const*)(src + j+8));\
+                A0 = mbswap(A0);\
+                A1 = mbswap(A1);\
+                A0 = _mm_adds_epu16(A0, D0);\
+                A1 = _mm_adds_epu16(A1, D0);\
+                A0 = _mm_srli_epi16(A0, shift);\
+                A1 = _mm_srli_epi16(A1, shift);\
+                A0 = _mm_packus_epi16(A0, A1);\
+                _mm_storeu_si128((__m128i*)(dst + j), A0);\
+            }\
+            if (j < length-7) {\
+                A0 = _mm_loadu_si128((__m128i const*)(src + j));\
+                A0 = mbswap(A0);\
+                A0 = _mm_adds_epu16(A0, D0);\
+                A0 = _mm_srli_epi16(A0, shift);\
+                A0 = _mm_packus_epi16(A0, A0);\
+                _mm_storel_epi64((__m128i*)(dst + j), A0);\
+                j += 8;\
+            }\
+            for (; j < length; j++) {\
+                tmp = (bswap(src[j]) + dither[j&7])>>shift; dst[j] = tmp - (tmp>>8);\
+            }\
+            dst += dstStride;\
+            src += srcStride;\
+        }\
+    } else {\
+        for (i = 0; i < height; i++) {\
+            const uint8_t *dither= dithers[shift-1][i&7];\
+            D0 = _mm_loadl_epi64((__m128i const*)dither);\
+            D0 = _mm_unpacklo_epi8(D0, _mm_setzero_si128());\
+            for (j = 0; j < length-15; j+=16) {\
+                A0 = _mm_loadu_si128((__m128i const*)(src + j));\
+                A1 = _mm_loadu_si128((__m128i const*)(src + j+8));\
+                A0 = mbswap(A0);\
+                A1 = mbswap(A1);\
+                A0 = _mm_sub_epi16(A0, _mm_srli_epi16(A0, 8));\
+                A1 = _mm_sub_epi16(A1, _mm_srli_epi16(A1, 8));\
+                A0 = _mm_add_epi16(A0, D0);\
+                A1 = _mm_add_epi16(A1, D0);\
+                A0 = _mm_srli_epi16(A0, shift);\
+                A1 = _mm_srli_epi16(A1, shift);\
+                A0 = _mm_packus_epi16(A0, A1);\
+                _mm_storeu_si128((__m128i*)(dst + j), A0);\
+            }\
+            if (j < length-7) {\
+                A0 = _mm_loadu_si128((__m128i const*)(src + j));\
+                A0 = mbswap(A0);\
+                A0 = _mm_sub_epi16(A0, _mm_srli_epi16(A0, 8));\
+                A0 = _mm_add_epi16(A0, D0);\
+                A0 = _mm_srli_epi16(A0, shift);\
+                A0 = _mm_packus_epi16(A0, A0);\
+                _mm_storel_epi64((__m128i*)(dst + j), A0);\
+                j += 8;\
+            }\
+            for (; j < length; j++) {\
+                tmp = bswap(src[j]); dst[j] = (tmp - (tmp>>8) + dither[j&7])>>shift;\
+            }\
+            dst += dstStride;\
+            src += srcStride;\
+        }\
+    }
+
+#define DITHER_COPY_X64_2(dst, dstStride, src, srcStride, bswap, dbswap, mbswap, mdbswap)\
+    unsigned shift= src_depth-dst_depth, tmp;\
+    __m128i A0, D0;\
+    if (shiftonly) {\
+        for (i = 0; i < height; i++) {\
+            const uint8_t *dither= dithers[shift-1][i&7];\
+            D0 = _mm_loadl_epi64((__m128i const*)dither);\
+            D0 = _mm_unpacklo_epi8(D0, _mm_setzero_si128());\
+            for (j = 0; j < length-7; j+=8) {\
+                A0 = _mm_loadu_si128((__m128i const*)(src + j));\
+                A0 = mbswap(A0);\
+                A0 = _mm_adds_epu16(A0, D0);\
+                A0 = _mm_srli_epi16(A0, shift);\
+                A0 = _mm_sub_epi16(A0, _mm_srli_epi16(A0, dst_depth));\
+                A0 = mdbswap(A0);\
+                _mm_storeu_si128((__m128i*)(dst + j), A0);\
+            }\
+            for (; j < length; j++) {\
+                tmp = (bswap(src[j]) + dither[j&7])>>shift; dst[j] = dbswap(tmp - (tmp>>dst_depth));\
+            }\
+            dst += dstStride;\
+            src += srcStride;\
+        }\
+    } else {\
+        for (i = 0; i < height; i++) {\
+            const uint8_t *dither= dithers[shift-1][i&7];\
+            D0 = _mm_loadl_epi64((__m128i const*)dither);\
+            D0 = _mm_unpacklo_epi8(D0, _mm_setzero_si128());\
+            for (j = 0; j < length-7; j+=8) {\
+                A0 = _mm_loadu_si128((__m128i const*)(src + j));\
+                A0 = mbswap(A0);\
+                A0 = _mm_sub_epi16(A0, _mm_srli_epi16(A0, dst_depth));\
+                A0 = _mm_add_epi16(A0, D0);\
+                A0 = _mm_srli_epi16(A0, shift);\
+                A0 = mdbswap(A0);\
+                _mm_storeu_si128((__m128i*)(dst + j), A0);\
+            }\
+            for (; j < length; j++) {\
+                tmp = bswap(src[j]); dst[j] = dbswap((tmp - (tmp>>dst_depth) + dither[j&7])>>shift);\
+            }\
+            dst += dstStride;\
+            src += srcStride;\
         }\
-        for (; j < length; j++)\
-            dst[j] = dbswap((bswap(src[j]) + dither[j&7])*scale>>shift);\
-        dst += dstStride;\
-        src += srcStride;\
     }
 
 static int planarCopyWrapper(SwsContext *c, const uint8_t *src[],
@@ -1561,9 +1687,17 @@  static int planarCopyWrapper(SwsContext *c, const uint8_t *src[],
 
                 if (dst_depth == 8) {
                     if(isBE(c->srcFormat) == HAVE_BIGENDIAN){
+#if ARCH_X86_64 || (ARCH_X86_32 && defined(__SSE2__))
+                        DITHER_COPY_X64_1(dstPtr, dstStride[plane], srcPtr2, srcStride[plane]/2, , )
+#else
                         DITHER_COPY(dstPtr, dstStride[plane], srcPtr2, srcStride[plane]/2, , )
+#endif
                     } else {
+#if ARCH_X86_64 || (ARCH_X86_32 && defined(__SSE2__))
+                        DITHER_COPY_X64_1(dstPtr, dstStride[plane], srcPtr2, srcStride[plane]/2, av_bswap16, MM_BSWAP16)
+#else
                         DITHER_COPY(dstPtr, dstStride[plane], srcPtr2, srcStride[plane]/2, av_bswap16, )
+#endif
                     }
                 } else if (src_depth == 8) {
                     for (i = 0; i < height; i++) {
@@ -1642,15 +1776,31 @@  static int planarCopyWrapper(SwsContext *c, const uint8_t *src[],
                 } else {
                     if(isBE(c->srcFormat) == HAVE_BIGENDIAN){
                         if(isBE(c->dstFormat) == HAVE_BIGENDIAN){
+#if ARCH_X86_64 || (ARCH_X86_32 && defined(__SSE2__))
+                            DITHER_COPY_X64_2(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, , , , )
+#else
                             DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, , )
+#endif
                         } else {
+#if ARCH_X86_64 || (ARCH_X86_32 && defined(__SSE2__))
+                            DITHER_COPY_X64_2(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, , av_bswap16, , MM_BSWAP16)
+#else
                             DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, , av_bswap16)
+#endif
                         }
                     }else{
                         if(isBE(c->dstFormat) == HAVE_BIGENDIAN){
+#if ARCH_X86_64 || (ARCH_X86_32 && defined(__SSE2__))
+                            DITHER_COPY_X64_2(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, av_bswap16, , MM_BSWAP16, )
+#else
                             DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, av_bswap16, )
+#endif
                         } else {
+#if ARCH_X86_64 || (ARCH_X86_32 && defined(__SSE2__))
+                            DITHER_COPY_X64_2(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, av_bswap16, av_bswap16, MM_BSWAP16, MM_BSWAP16)
+#else
                             DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, av_bswap16, av_bswap16)
+#endif
                         }
                     }
                 }