@@ -194,7 +194,7 @@ static void yuv2yuvX_ ##opt(const int16_t *filter, int filterSize, \
return; \
}
-#define YUV2YUVX_FUNC(opt, step) \
+#define YUV2YUVX_FUNC(opt, step, tail) \
void ff_yuv2yuvX_ ##opt(const int16_t *filter, int filterSize, int srcOffset, \
uint8_t *dest, int dstW, \
const uint8_t *dither, int offset); \
@@ -202,17 +202,14 @@ static void yuv2yuvX_ ##opt(const int16_t *filter, int filterSize, \
const int16_t **src, uint8_t *dest, int dstW, \
const uint8_t *dither, int offset) \
{ \
- int remainder = (dstW % step); \
- int pixelsProcessed = dstW - remainder; \
if(((uintptr_t)dest) & 15){ \
yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither, offset); \
return; \
} \
- if(pixelsProcessed > 0) \
- ff_yuv2yuvX_ ##opt(filter, filterSize - 1, 0, dest - offset, pixelsProcessed + offset, dither, offset); \
- if(remainder > 0){ \
- ff_yuv2yuvX_mmxext(filter, filterSize - 1, pixelsProcessed, dest - offset, pixelsProcessed + remainder + offset, dither, offset); \
- } \
+ if (dstW >= step) \
+ ff_yuv2yuvX_ ##opt(filter, filterSize - 1, 0, dest - offset, dstW + offset, dither, offset); \
+ else \
+ yuv2yuvX_ ##tail(filter, filterSize, src, dest, dstW, dither, offset); \
return; \
}
@@ -220,13 +217,13 @@ static void yuv2yuvX_ ##opt(const int16_t *filter, int filterSize, \
YUV2YUVX_FUNC_MMX(mmxext, 16)
#endif
#if HAVE_SSE3_EXTERNAL
-YUV2YUVX_FUNC(sse3, 32)
+YUV2YUVX_FUNC(sse3, 32, mmxext)
#endif
#if HAVE_AVX2_EXTERNAL
-YUV2YUVX_FUNC(avx2, 64)
+YUV2YUVX_FUNC(avx2, 64, sse3)
#endif
#if ARCH_X86_64 && HAVE_AVX512_EXTERNAL
-YUV2YUVX_FUNC(avx512, 128)
+YUV2YUVX_FUNC(avx512, 128, avx2)
#endif
#define SCALE_FUNC(filter_n, from_bpc, to_bpc, opt) \
@@ -54,6 +54,8 @@ cglobal yuv2yuvX, 7, 7, 8, filter, filterSize, src, dest, dstW, dither, offset
%else
movq xm3, [ditherq]
%endif ; avx2
+ mov ditherq, dstWq
+ sub dstWq, mmsize * unroll
%if cpuflag(avx512)
mova m15, [permutation]
@@ -92,13 +94,17 @@ cglobal yuv2yuvX, 7, 7, 8, filter, filterSize, src, dest, dstW, dither, offset
%else
mova m0, [filterSizeq + 8]
%endif
- pmulhw m2, m0, [srcq + offsetq * 2]
- pmulhw m5, m0, [srcq + offsetq * 2 + mmsize]
+ movu m2, [srcq + offsetq * 2]
+ movu m5, [srcq + offsetq * 2 + mmsize]
+ pmulhw m2, m0, m2
+ pmulhw m5, m0, m5
paddw m3, m3, m2
paddw m4, m4, m5
%if cpuflag(sse3)
- pmulhw m2, m0, [srcq + offsetq * 2 + 2 * mmsize]
- pmulhw m5, m0, [srcq + offsetq * 2 + 3 * mmsize]
+ movu m2, [srcq + offsetq * 2 + 2 * mmsize]
+ movu m5, [srcq + offsetq * 2 + 3 * mmsize]
+ pmulhw m2, m0, m2
+ pmulhw m5, m0, m5
paddw m6, m6, m2
paddw m1, m1, m5
%endif
@@ -131,8 +137,14 @@ cglobal yuv2yuvX, 7, 7, 8, filter, filterSize, src, dest, dstW, dither, offset
add offsetq, mmsize * unroll
mov filterSizeq, filterq
cmp offsetq, dstWq
- jb .outerloop
- RET
+ jb .outerloop
+
+ mov dstWq, offsetq
+ mov offsetq, ditherq
+ sub offsetq, mmsize * unroll
+ cmp dstWq, ditherq
+ jb .outerloop
+ REP_RET
%endmacro
INIT_MMX mmxext