diff mbox

[FFmpeg-devel] checkasm/hevc_mc : add hevc_mc for checkasm

Message ID 20180409021239.4235-1-fanyingming@bytedance.com
State New
Headers show

Commit Message

Yingming Fan April 9, 2018, 2:12 a.m. UTC
From: Yingming Fan <yingmingfan@gmail.com>

---
 Hi, there.
 I plane to submit our arm32 neon codes for qpel and epel.
 While before this i will submit hevc_mc checkasm codes.
 This hevc_mc checkasm codes check every qpel and epel function, including 8 10 and 12 bit.
 Passed test by using 'checkasm --test=hevc_mc' under Linux x86_64 MacOS x86_64 and Linux arm64 platform.
 Also passed FATE test. 

 tests/checkasm/Makefile   |   2 +-
 tests/checkasm/checkasm.c |   1 +
 tests/checkasm/checkasm.h |   1 +
 tests/checkasm/hevc_mc.c  | 547 ++++++++++++++++++++++++++++++++++++++++++++++
 tests/fate/checkasm.mak   |   1 +
 5 files changed, 551 insertions(+), 1 deletion(-)
 create mode 100644 tests/checkasm/hevc_mc.c

Comments

Yingming Fan April 12, 2018, 12:04 p.m. UTC | #1
Hello,

Any review?

Yingming Fan

> On Apr 9, 2018, at 10:12 AM, Yingming Fan <yingmingfan@gmail.com> wrote:
> 
> From: Yingming Fan <yingmingfan@gmail.com>
> 
> ---
> Hi, there.
> I plane to submit our arm32 neon codes for qpel and epel.
> While before this i will submit hevc_mc checkasm codes.
> This hevc_mc checkasm codes check every qpel and epel function, including 8 10 and 12 bit.
> Passed test by using 'checkasm --test=hevc_mc' under Linux x86_64 MacOS x86_64 and Linux arm64 platform.
> Also passed FATE test. 
> 
> tests/checkasm/Makefile   |   2 +-
> tests/checkasm/checkasm.c |   1 +
> tests/checkasm/checkasm.h |   1 +
> tests/checkasm/hevc_mc.c  | 547 ++++++++++++++++++++++++++++++++++++++++++++++
> tests/fate/checkasm.mak   |   1 +
> 5 files changed, 551 insertions(+), 1 deletion(-)
> create mode 100644 tests/checkasm/hevc_mc.c
> 
> diff --git a/tests/checkasm/Makefile b/tests/checkasm/Makefile
> index 0233d2f989..e6c94cd676 100644
> --- a/tests/checkasm/Makefile
> +++ b/tests/checkasm/Makefile
> @@ -23,7 +23,7 @@ AVCODECOBJS-$(CONFIG_EXR_DECODER)       += exrdsp.o
> AVCODECOBJS-$(CONFIG_HUFFYUV_DECODER)   += huffyuvdsp.o
> AVCODECOBJS-$(CONFIG_JPEG2000_DECODER)  += jpeg2000dsp.o
> AVCODECOBJS-$(CONFIG_PIXBLOCKDSP)       += pixblockdsp.o
> -AVCODECOBJS-$(CONFIG_HEVC_DECODER)      += hevc_add_res.o hevc_idct.o hevc_sao.o
> +AVCODECOBJS-$(CONFIG_HEVC_DECODER)      += hevc_add_res.o hevc_idct.o hevc_sao.o hevc_mc.o
> AVCODECOBJS-$(CONFIG_UTVIDEO_DECODER)   += utvideodsp.o
> AVCODECOBJS-$(CONFIG_V210_ENCODER)      += v210enc.o
> AVCODECOBJS-$(CONFIG_VP9_DECODER)       += vp9dsp.o
> diff --git a/tests/checkasm/checkasm.c b/tests/checkasm/checkasm.c
> index 20ce56932f..b95efc674d 100644
> --- a/tests/checkasm/checkasm.c
> +++ b/tests/checkasm/checkasm.c
> @@ -117,6 +117,7 @@ static const struct {
>         { "hevc_add_res", checkasm_check_hevc_add_res },
>         { "hevc_idct", checkasm_check_hevc_idct },
>         { "hevc_sao", checkasm_check_hevc_sao },
> +        { "hevc_mc", checkasm_check_hevc_mc },
>     #endif
>     #if CONFIG_HUFFYUV_DECODER
>         { "huffyuvdsp", checkasm_check_huffyuvdsp },
> diff --git a/tests/checkasm/checkasm.h b/tests/checkasm/checkasm.h
> index dcab74de06..5a4a612da7 100644
> --- a/tests/checkasm/checkasm.h
> +++ b/tests/checkasm/checkasm.h
> @@ -58,6 +58,7 @@ void checkasm_check_h264qpel(void);
> void checkasm_check_hevc_add_res(void);
> void checkasm_check_hevc_idct(void);
> void checkasm_check_hevc_sao(void);
> +void checkasm_check_hevc_mc(void);
> void checkasm_check_huffyuvdsp(void);
> void checkasm_check_jpeg2000dsp(void);
> void checkasm_check_llviddsp(void);
> diff --git a/tests/checkasm/hevc_mc.c b/tests/checkasm/hevc_mc.c
> new file mode 100644
> index 0000000000..018f322c11
> --- /dev/null
> +++ b/tests/checkasm/hevc_mc.c
> @@ -0,0 +1,547 @@
> +/*
> + * Copyright (c) 2018 Yingming Fan <yingmingfan@gmail.com>
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License along
> + * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
> + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
> + */
> +
> +#include <string.h>
> +
> +#include "libavutil/intreadwrite.h"
> +
> +#include "libavcodec/avcodec.h"
> +
> +#include "libavcodec/hevcdsp.h"
> +
> +#include "checkasm.h"
> +
> +static const uint32_t pixel_mask[3] = { 0xffffffff, 0x03ff03ff, 0x0fff0fff };
> +static const uint32_t idx_width_map[8][2] = {{1, 4}, {3, 8}, {4, 12}, {5, 16}, {6, 24}, {7, 32}, {8, 48}, {9, 64}};
> +#define SIZEOF_PIXEL ((bit_depth + 7) / 8)
> +#define PIXEL_STRIDE (AV_INPUT_BUFFER_PADDING_SIZE + MAX_PB_SIZE + AV_INPUT_BUFFER_PADDING_SIZE)
> +#define BUF_SIZE ((MAX_PB_SIZE+4+4) * PIXEL_STRIDE * 2)
> +
> +#define randomize_buffers(buf0, buf1, size)                 \
> +    do {                                                    \
> +        uint32_t mask = pixel_mask[(bit_depth - 8) >> 1];   \
> +        int k;                                              \
> +        for (k = 0; k < size; k += 4) {                     \
> +            uint32_t r = rnd() & mask;                      \
> +            AV_WN32A(buf0 + k, r);                          \
> +            AV_WN32A(buf1 + k, r);                          \
> +        }                                                   \
> +    } while (0)
> +
> +#define randomize_buffers2(buf0, buf1, size)                \
> +    do {                                                    \
> +        uint32_t mask = 0x1fff1fff;                         \
> +        int k;                                              \
> +        for (k = 0; k < size; k += 4) {                     \
> +            uint32_t r = rnd() & mask;                      \
> +            AV_WN32A(buf0 + k, r);                          \
> +            AV_WN32A(buf1 + k, r);                          \
> +        }                                                   \
> +    } while (0)
> +
> +static int get_random(min, max, mod)
> +{
> +    int value = rnd()%mod;
> +    int sign = rnd()&0x1 == 0 ? 1 : -1;
> +    return av_clip(sign*value, min, max);
> +}
> +
> +static void check_put_hevc_qpel(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 4; y++) {
> +            for (x = 0; x < 4; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
> +                                  int height, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_qpel[idx][!!y][!!x], "put_hevc_qpel_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref((int16_t *)dst0, src0 + offset, stride, height, x, y, width);
> +                    call_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_epel(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 8; y++) {
> +            for (x = 0; x < 8; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
> +                                  int height, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_epel[idx][!!y][!!x], "put_hevc_epel_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref((int16_t *)dst0, src0 + offset, stride, height, x, y, width);
> +                    call_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_qpel_uni(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 4; y++) {
> +            for (x = 0; x < 4; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
> +                                  ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_qpel_uni[idx][!!y][!!x], "put_hevc_qpel_uni_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, height, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, height, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, height, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_epel_uni(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 8; y++) {
> +            for (x = 0; x < 8; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
> +                                  ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_epel_uni[idx][!!y][!!x], "put_hevc_epel_uni_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, height, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, height, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, height, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_qpel_bi(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 4; y++) {
> +            for (x = 0; x < 4; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *_src,
> +                                  ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my,
> +                                  int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                randomize_buffers2(src00, src11, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_qpel_bi[idx][!!y][!!x], "put_hevc_qpel_bi_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, (int16_t *)src00, height, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_epel_bi(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 8; y++) {
> +            for (x = 0; x < 8; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *_src,
> +                                  ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my,
> +                                  int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                randomize_buffers2(src00, src11, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_epel_bi[idx][!!y][!!x], "put_hevc_epel_bi_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, (int16_t *)src00, height, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_qpel_uni_w(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 4; y++) {
> +            for (x = 0; x < 4; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                int denom = rnd()%8;
> +                int wx = (1<<denom) + get_random(-127, 128, 129);
> +                int ox = get_random(-127, 128, 129);
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
> +                                  int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                randomize_buffers(src00, src11, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_qpel_uni_w[idx][!!y][!!x], "put_hevc_qpel_uni_w_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, height, denom, wx, ox, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, height, denom, wx, ox, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, height, denom, wx, ox, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_epel_uni_w(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 8; y++) {
> +            for (x = 0; x < 8; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                int denom = rnd()%8;
> +                int wx = (1<<denom) + get_random(-127, 128, 129);
> +                int ox = get_random(-127, 128, 129);
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
> +                                  int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                randomize_buffers(src00, src11, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_epel_uni_w[idx][!!y][!!x], "put_hevc_epel_uni_w_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, height, denom, wx, ox, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, height, denom, wx, ox, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, height, denom, wx, ox, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_qpel_bi_w(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 4; y++) {
> +            for (x = 0; x < 4; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                int denom = rnd()%8;
> +                int wx = (1<<denom) + get_random(-127, 128, 129);
> +                int ox = get_random(-127, 128, 129);
> +                int wx1 = (1<<denom) + get_random(-127, 128, 129);
> +                int ox1 = get_random(-127, 128, 129);
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride,
> +                                  int16_t *src2, int height, int denom, int wx0, int wx1,
> +                                  int ox0, int ox1, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                randomize_buffers2(src00, src11, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_qpel_bi_w[idx][!!y][!!x], "put_hevc_qpel_bi_w_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, (int16_t *)src00, height, denom, wx, wx1, ox, ox1, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, denom, wx, wx1, ox, ox1, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, denom, wx, wx1, ox, ox1, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_epel_bi_w(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 8; y++) {
> +            for (x = 0; x < 8; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                int denom = rnd()%8;
> +                int wx = (1<<denom) + get_random(-127, 128, 129);
> +                int ox = get_random(-127, 128, 129);
> +                int wx1 = (1<<denom) + get_random(-127, 128, 129);
> +                int ox1 = get_random(-127, 128, 129);
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride,
> +                                  int16_t *src2, int height, int denom, int wx0, int wx1,
> +                                  int ox0, int ox1, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                randomize_buffers2(src00, src11, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_epel_bi_w[idx][!!y][!!x], "put_hevc_epel_bi_w_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, (int16_t *)src00, height, denom, wx, wx1, ox, ox1, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, denom, wx, wx1, ox, ox1, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, denom, wx, wx1, ox, ox1, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +void checkasm_check_hevc_mc(void)
> +{
> +    int bit_depth;
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_qpel(h, bit_depth);
> +    }
> +    report("put_hevc_qpel");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_qpel_uni(h, bit_depth);
> +    }
> +    report("put_hevc_qpel_uni");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_qpel_bi(h, bit_depth);
> +    }
> +    report("put_hevc_qpel_bi");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_qpel_uni_w(h, bit_depth);
> +    }
> +    report("put_hevc_qpel_uni_w");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_qpel_bi_w(h, bit_depth);
> +    }
> +    report("put_hevc_qpel_bi_w");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_epel(h, bit_depth);
> +    }
> +    report("put_hevc_epel");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_epel_uni(h, bit_depth);
> +    }
> +    report("put_hevc_epel_uni");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_epel_bi(h, bit_depth);
> +    }
> +    report("put_hevc_epel_bi");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_epel_uni_w(h, bit_depth);
> +    }
> +    report("put_hevc_epel_uni_w");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_epel_bi_w(h, bit_depth);
> +    }
> +    report("put_hevc_epel_bi_w");
> +}
> diff --git a/tests/fate/checkasm.mak b/tests/fate/checkasm.mak
> index a722b4a917..aeb47091a1 100644
> --- a/tests/fate/checkasm.mak
> +++ b/tests/fate/checkasm.mak
> @@ -15,6 +15,7 @@ FATE_CHECKASM = fate-checkasm-aacpsdsp                                  \
>                 fate-checkasm-hevc_add_res                              \
>                 fate-checkasm-hevc_idct                                 \
>                 fate-checkasm-hevc_sao                                  \
> +                fate-checkasm-hevc_mc                                   \
>                 fate-checkasm-jpeg2000dsp                               \
>                 fate-checkasm-llviddsp                                  \
>                 fate-checkasm-llviddspenc                               \
> -- 
> 2.15.1 (Apple Git-101)
>
Shengbin Meng April 17, 2018, 7:29 a.m. UTC | #2
> On Apr 9, 2018, at 10:12, Yingming Fan <yingmingfan@gmail.com> wrote:
> 
> From: Yingming Fan <yingmingfan@gmail.com>
> 
> ---
> Hi, there.
> I plane to submit our arm32 neon codes for qpel and epel.
> While before this i will submit hevc_mc checkasm codes.
> This hevc_mc checkasm codes check every qpel and epel function, including 8 10 and 12 bit.
> Passed test by using 'checkasm --test=hevc_mc' under Linux x86_64 MacOS x86_64 and Linux arm64 platform.
> Also passed FATE test. 
> 
> tests/checkasm/Makefile   |   2 +-
> tests/checkasm/checkasm.c |   1 +
> tests/checkasm/checkasm.h |   1 +
> tests/checkasm/hevc_mc.c  | 547 ++++++++++++++++++++++++++++++++++++++++++++++
> tests/fate/checkasm.mak   |   1 +
> 5 files changed, 551 insertions(+), 1 deletion(-)
> create mode 100644 tests/checkasm/hevc_mc.c
> 
> diff --git a/tests/checkasm/Makefile b/tests/checkasm/Makefile
> index 0233d2f989..e6c94cd676 100644
> --- a/tests/checkasm/Makefile
> +++ b/tests/checkasm/Makefile
> @@ -23,7 +23,7 @@ AVCODECOBJS-$(CONFIG_EXR_DECODER)       += exrdsp.o
> AVCODECOBJS-$(CONFIG_HUFFYUV_DECODER)   += huffyuvdsp.o
> AVCODECOBJS-$(CONFIG_JPEG2000_DECODER)  += jpeg2000dsp.o
> AVCODECOBJS-$(CONFIG_PIXBLOCKDSP)       += pixblockdsp.o
> -AVCODECOBJS-$(CONFIG_HEVC_DECODER)      += hevc_add_res.o hevc_idct.o hevc_sao.o
> +AVCODECOBJS-$(CONFIG_HEVC_DECODER)      += hevc_add_res.o hevc_idct.o hevc_sao.o hevc_mc.o
> AVCODECOBJS-$(CONFIG_UTVIDEO_DECODER)   += utvideodsp.o
> AVCODECOBJS-$(CONFIG_V210_ENCODER)      += v210enc.o
> AVCODECOBJS-$(CONFIG_VP9_DECODER)       += vp9dsp.o
> diff --git a/tests/checkasm/checkasm.c b/tests/checkasm/checkasm.c
> index 20ce56932f..b95efc674d 100644
> --- a/tests/checkasm/checkasm.c
> +++ b/tests/checkasm/checkasm.c
> @@ -117,6 +117,7 @@ static const struct {
>         { "hevc_add_res", checkasm_check_hevc_add_res },
>         { "hevc_idct", checkasm_check_hevc_idct },
>         { "hevc_sao", checkasm_check_hevc_sao },
> +        { "hevc_mc", checkasm_check_hevc_mc },
>     #endif
>     #if CONFIG_HUFFYUV_DECODER
>         { "huffyuvdsp", checkasm_check_huffyuvdsp },
> diff --git a/tests/checkasm/checkasm.h b/tests/checkasm/checkasm.h
> index dcab74de06..5a4a612da7 100644
> --- a/tests/checkasm/checkasm.h
> +++ b/tests/checkasm/checkasm.h
> @@ -58,6 +58,7 @@ void checkasm_check_h264qpel(void);
> void checkasm_check_hevc_add_res(void);
> void checkasm_check_hevc_idct(void);
> void checkasm_check_hevc_sao(void);
> +void checkasm_check_hevc_mc(void);
> void checkasm_check_huffyuvdsp(void);
> void checkasm_check_jpeg2000dsp(void);
> void checkasm_check_llviddsp(void);
> diff --git a/tests/checkasm/hevc_mc.c b/tests/checkasm/hevc_mc.c
> new file mode 100644
> index 0000000000..018f322c11
> --- /dev/null
> +++ b/tests/checkasm/hevc_mc.c
> @@ -0,0 +1,547 @@
> +/*
> + * Copyright (c) 2018 Yingming Fan <yingmingfan@gmail.com>
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License along
> + * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
> + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
> + */
> +
> +#include <string.h>
> +
> +#include "libavutil/intreadwrite.h"
> +
> +#include "libavcodec/avcodec.h"
> +
> +#include "libavcodec/hevcdsp.h"
> +
> +#include "checkasm.h"
> +
> +static const uint32_t pixel_mask[3] = { 0xffffffff, 0x03ff03ff, 0x0fff0fff };
> +static const uint32_t idx_width_map[8][2] = {{1, 4}, {3, 8}, {4, 12}, {5, 16}, {6, 24}, {7, 32}, {8, 48}, {9, 64}};
Why not include block width 2 and 6? I notice that there are already some optimization code for that.

> +#define SIZEOF_PIXEL ((bit_depth + 7) / 8)
> +#define PIXEL_STRIDE (AV_INPUT_BUFFER_PADDING_SIZE + MAX_PB_SIZE + AV_INPUT_BUFFER_PADDING_SIZE)
> +#define BUF_SIZE ((MAX_PB_SIZE+4+4) * PIXEL_STRIDE * 2)
> +
> +#define randomize_buffers(buf0, buf1, size)                 \
> +    do {                                                    \
> +        uint32_t mask = pixel_mask[(bit_depth - 8) >> 1];   \
> +        int k;                                              \
> +        for (k = 0; k < size; k += 4) {                     \
> +            uint32_t r = rnd() & mask;                      \
> +            AV_WN32A(buf0 + k, r);                          \
> +            AV_WN32A(buf1 + k, r);                          \
> +        }                                                   \
> +    } while (0)
> +
> +#define randomize_buffers2(buf0, buf1, size)                \
> +    do {                                                    \
> +        uint32_t mask = 0x1fff1fff;                         \
> +        int k;                                              \
> +        for (k = 0; k < size; k += 4) {                     \
> +            uint32_t r = rnd() & mask;                      \
> +            AV_WN32A(buf0 + k, r);                          \
> +            AV_WN32A(buf1 + k, r);                          \
> +        }                                                   \
> +    } while (0)
> +
> +static int get_random(min, max, mod)
Function parameters should have types.

> +{
> +    int value = rnd()%mod;
> +    int sign = rnd()&0x1 == 0 ? 1 : -1;
> +    return av_clip(sign*value, min, max);
> +}
> +
> +static void check_put_hevc_qpel(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 4; y++) {
> +            for (x = 0; x < 4; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
So we are only testing the case of height == width? It would be better to consider those non-square blocks too.

> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
> +                                  int height, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_qpel[idx][!!y][!!x], "put_hevc_qpel_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref((int16_t *)dst0, src0 + offset, stride, height, x, y, width);
> +                    call_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +

All these check_put_hevc_* functions look very similar. Can you reduce duplicate code in some way? Using macros for example.
> +static void check_put_hevc_epel(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 8; y++) {
> +            for (x = 0; x < 8; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
> +                                  int height, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_epel[idx][!!y][!!x], "put_hevc_epel_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref((int16_t *)dst0, src0 + offset, stride, height, x, y, width);
> +                    call_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_qpel_uni(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 4; y++) {
> +            for (x = 0; x < 4; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
> +                                  ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_qpel_uni[idx][!!y][!!x], "put_hevc_qpel_uni_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, height, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, height, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, height, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_epel_uni(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 8; y++) {
> +            for (x = 0; x < 8; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
> +                                  ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_epel_uni[idx][!!y][!!x], "put_hevc_epel_uni_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, height, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, height, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, height, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_qpel_bi(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 4; y++) {
> +            for (x = 0; x < 4; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *_src,
> +                                  ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my,
> +                                  int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                randomize_buffers2(src00, src11, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_qpel_bi[idx][!!y][!!x], "put_hevc_qpel_bi_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, (int16_t *)src00, height, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_epel_bi(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 8; y++) {
> +            for (x = 0; x < 8; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *_src,
> +                                  ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my,
> +                                  int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                randomize_buffers2(src00, src11, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_epel_bi[idx][!!y][!!x], "put_hevc_epel_bi_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, (int16_t *)src00, height, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_qpel_uni_w(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 4; y++) {
> +            for (x = 0; x < 4; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                int denom = rnd()%8;
> +                int wx = (1<<denom) + get_random(-127, 128, 129);
> +                int ox = get_random(-127, 128, 129);
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
> +                                  int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                randomize_buffers(src00, src11, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_qpel_uni_w[idx][!!y][!!x], "put_hevc_qpel_uni_w_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, height, denom, wx, ox, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, height, denom, wx, ox, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, height, denom, wx, ox, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_epel_uni_w(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 8; y++) {
> +            for (x = 0; x < 8; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                int denom = rnd()%8;
> +                int wx = (1<<denom) + get_random(-127, 128, 129);
> +                int ox = get_random(-127, 128, 129);
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
> +                                  int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                randomize_buffers(src00, src11, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_epel_uni_w[idx][!!y][!!x], "put_hevc_epel_uni_w_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, height, denom, wx, ox, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, height, denom, wx, ox, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, height, denom, wx, ox, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_qpel_bi_w(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 4; y++) {
> +            for (x = 0; x < 4; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                int denom = rnd()%8;
> +                int wx = (1<<denom) + get_random(-127, 128, 129);
> +                int ox = get_random(-127, 128, 129);
> +                int wx1 = (1<<denom) + get_random(-127, 128, 129);
> +                int ox1 = get_random(-127, 128, 129);
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride,
> +                                  int16_t *src2, int height, int denom, int wx0, int wx1,
> +                                  int ox0, int ox1, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                randomize_buffers2(src00, src11, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_qpel_bi_w[idx][!!y][!!x], "put_hevc_qpel_bi_w_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, (int16_t *)src00, height, denom, wx, wx1, ox, ox1, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, denom, wx, wx1, ox, ox1, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, denom, wx, wx1, ox, ox1, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +static void check_put_hevc_epel_bi_w(HEVCDSPContext h, int bit_depth)
> +{
> +    int i, x, y;
> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
> +    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
> +        for (y = 0; y < 8; y++) {
> +            for (x = 0; x < 8; x++) {
> +                int idx = idx_width_map[i][0];
> +                int width = idx_width_map[i][1];
> +                int height= width;
> +                int denom = rnd()%8;
> +                int wx = (1<<denom) + get_random(-127, 128, 129);
> +                int ox = get_random(-127, 128, 129);
> +                int wx1 = (1<<denom) + get_random(-127, 128, 129);
> +                int ox1 = get_random(-127, 128, 129);
> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
> +                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride,
> +                                  int16_t *src2, int height, int denom, int wx0, int wx1,
> +                                  int ox0, int ox1, intptr_t mx, intptr_t my, int width);
> +
> +                randomize_buffers(src0, src1, BUF_SIZE);
> +                randomize_buffers2(src00, src11, BUF_SIZE);
> +                memset(dst0, 0, BUF_SIZE);
> +                memset(dst1, 0, BUF_SIZE);
> +
> +                if (check_func(h.put_hevc_epel_bi_w[idx][!!y][!!x], "put_hevc_epel_bi_w_%dx%d_%d_%d_%d", width, height,
> +                    x, y, bit_depth)) {
> +                    call_ref(dst0, stride, src0 + offset, stride, (int16_t *)src00, height, denom, wx, wx1, ox, ox1, x, y, width);
> +                    call_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, denom, wx, wx1, ox, ox1, x, y, width);
> +                    if (memcmp(dst0, dst1, BUF_SIZE))
> +                        fail();
> +                    bench_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, denom, wx, wx1, ox, ox1, x, y, width);
> +                }
> +            }
> +        }
> +    }
> +}
> +
> +void checkasm_check_hevc_mc(void)
> +{
> +    int bit_depth;
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_qpel(h, bit_depth);
> +    }
> +    report("put_hevc_qpel");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_qpel_uni(h, bit_depth);
> +    }
> +    report("put_hevc_qpel_uni");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_qpel_bi(h, bit_depth);
> +    }
> +    report("put_hevc_qpel_bi");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_qpel_uni_w(h, bit_depth);
> +    }
> +    report("put_hevc_qpel_uni_w");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_qpel_bi_w(h, bit_depth);
> +    }
> +    report("put_hevc_qpel_bi_w");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_epel(h, bit_depth);
> +    }
> +    report("put_hevc_epel");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_epel_uni(h, bit_depth);
> +    }
> +    report("put_hevc_epel_uni");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_epel_bi(h, bit_depth);
> +    }
> +    report("put_hevc_epel_bi");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_epel_uni_w(h, bit_depth);
> +    }
> +    report("put_hevc_epel_uni_w");
> +
> +    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
> +        HEVCDSPContext h;
> +
> +        ff_hevc_dsp_init(&h, bit_depth);
> +        check_put_hevc_epel_bi_w(h, bit_depth);
> +    }
> +    report("put_hevc_epel_bi_w");
> +}
> diff --git a/tests/fate/checkasm.mak b/tests/fate/checkasm.mak
> index a722b4a917..aeb47091a1 100644
> --- a/tests/fate/checkasm.mak
> +++ b/tests/fate/checkasm.mak
> @@ -15,6 +15,7 @@ FATE_CHECKASM = fate-checkasm-aacpsdsp                                  \
>                 fate-checkasm-hevc_add_res                              \
>                 fate-checkasm-hevc_idct                                 \
>                 fate-checkasm-hevc_sao                                  \
> +                fate-checkasm-hevc_mc                                   \
>                 fate-checkasm-jpeg2000dsp                               \
>                 fate-checkasm-llviddsp                                  \
>                 fate-checkasm-llviddspenc                               \
> -- 
> 2.15.1 (Apple Git-101)
> 
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Yingming Fan April 27, 2018, 11:30 a.m. UTC | #3
Thank you for your review.

> On Apr 17, 2018, at 15:29, Shengbin Meng <shengbinmeng@gmail.com <mailto:shengbinmeng@gmail.com>> wrote:
> 
> 
> 
>> On Apr 9, 2018, at 10:12, Yingming Fan <yingmingfan@gmail.com <mailto:yingmingfan@gmail.com>> wrote:
>> 
>> From: Yingming Fan <yingmingfan@gmail.com <mailto:yingmingfan@gmail.com>>
>> 
>> ---
>> Hi, there.
>> I plane to submit our arm32 neon codes for qpel and epel.
>> While before this i will submit hevc_mc checkasm codes.
>> This hevc_mc checkasm codes check every qpel and epel function, including 8 10 and 12 bit.
>> Passed test by using 'checkasm --test=hevc_mc' under Linux x86_64 MacOS x86_64 and Linux arm64 platform.
>> Also passed FATE test. 
>> 
>> tests/checkasm/Makefile   |   2 +-
>> tests/checkasm/checkasm.c |   1 +
>> tests/checkasm/checkasm.h |   1 +
>> tests/checkasm/hevc_mc.c  | 547 ++++++++++++++++++++++++++++++++++++++++++++++
>> tests/fate/checkasm.mak   |   1 +
>> 5 files changed, 551 insertions(+), 1 deletion(-)
>> create mode 100644 tests/checkasm/hevc_mc.c
>> 
>> diff --git a/tests/checkasm/Makefile b/tests/checkasm/Makefile
>> index 0233d2f989..e6c94cd676 100644
>> --- a/tests/checkasm/Makefile
>> +++ b/tests/checkasm/Makefile
>> @@ -23,7 +23,7 @@ AVCODECOBJS-$(CONFIG_EXR_DECODER)       += exrdsp.o
>> AVCODECOBJS-$(CONFIG_HUFFYUV_DECODER)   += huffyuvdsp.o
>> AVCODECOBJS-$(CONFIG_JPEG2000_DECODER)  += jpeg2000dsp.o
>> AVCODECOBJS-$(CONFIG_PIXBLOCKDSP)       += pixblockdsp.o
>> -AVCODECOBJS-$(CONFIG_HEVC_DECODER)      += hevc_add_res.o hevc_idct.o hevc_sao.o
>> +AVCODECOBJS-$(CONFIG_HEVC_DECODER)      += hevc_add_res.o hevc_idct.o hevc_sao.o hevc_mc.o
>> AVCODECOBJS-$(CONFIG_UTVIDEO_DECODER)   += utvideodsp.o
>> AVCODECOBJS-$(CONFIG_V210_ENCODER)      += v210enc.o
>> AVCODECOBJS-$(CONFIG_VP9_DECODER)       += vp9dsp.o
>> diff --git a/tests/checkasm/checkasm.c b/tests/checkasm/checkasm.c
>> index 20ce56932f..b95efc674d 100644
>> --- a/tests/checkasm/checkasm.c
>> +++ b/tests/checkasm/checkasm.c
>> @@ -117,6 +117,7 @@ static const struct {
>>        { "hevc_add_res", checkasm_check_hevc_add_res },
>>        { "hevc_idct", checkasm_check_hevc_idct },
>>        { "hevc_sao", checkasm_check_hevc_sao },
>> +        { "hevc_mc", checkasm_check_hevc_mc },
>>    #endif
>>    #if CONFIG_HUFFYUV_DECODER
>>        { "huffyuvdsp", checkasm_check_huffyuvdsp },
>> diff --git a/tests/checkasm/checkasm.h b/tests/checkasm/checkasm.h
>> index dcab74de06..5a4a612da7 100644
>> --- a/tests/checkasm/checkasm.h
>> +++ b/tests/checkasm/checkasm.h
>> @@ -58,6 +58,7 @@ void checkasm_check_h264qpel(void);
>> void checkasm_check_hevc_add_res(void);
>> void checkasm_check_hevc_idct(void);
>> void checkasm_check_hevc_sao(void);
>> +void checkasm_check_hevc_mc(void);
>> void checkasm_check_huffyuvdsp(void);
>> void checkasm_check_jpeg2000dsp(void);
>> void checkasm_check_llviddsp(void);
>> diff --git a/tests/checkasm/hevc_mc.c b/tests/checkasm/hevc_mc.c
>> new file mode 100644
>> index 0000000000..018f322c11
>> --- /dev/null
>> +++ b/tests/checkasm/hevc_mc.c
>> @@ -0,0 +1,547 @@
>> +/*
>> + * Copyright (c) 2018 Yingming Fan <yingmingfan@gmail.com <mailto:yingmingfan@gmail.com>>
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License as published by
>> + * the Free Software Foundation; either version 2 of the License, or
>> + * (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License along
>> + * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
>> + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
>> + */
>> +
>> +#include <string.h>
>> +
>> +#include "libavutil/intreadwrite.h"
>> +
>> +#include "libavcodec/avcodec.h"
>> +
>> +#include "libavcodec/hevcdsp.h"
>> +
>> +#include "checkasm.h"
>> +
>> +static const uint32_t pixel_mask[3] = { 0xffffffff, 0x03ff03ff, 0x0fff0fff };
>> +static const uint32_t idx_width_map[8][2] = {{1, 4}, {3, 8}, {4, 12}, {5, 16}, {6, 24}, {7, 32}, {8, 48}, {9, 64}};
> Why not include block width 2 and 6? I notice that there are already some optimization code for that.

I find there are no width 2 and 6 in x86 asm codes, so i remove 2 and 6.
After your remind i find neon codes have 2 and 6.
I will fix this in patch v2.

> 
>> +#define SIZEOF_PIXEL ((bit_depth + 7) / 8)
>> +#define PIXEL_STRIDE (AV_INPUT_BUFFER_PADDING_SIZE + MAX_PB_SIZE + AV_INPUT_BUFFER_PADDING_SIZE)
>> +#define BUF_SIZE ((MAX_PB_SIZE+4+4) * PIXEL_STRIDE * 2)
>> +
>> +#define randomize_buffers(buf0, buf1, size)                 \
>> +    do {                                                    \
>> +        uint32_t mask = pixel_mask[(bit_depth - 8) >> 1];   \
>> +        int k;                                              \
>> +        for (k = 0; k < size; k += 4) {                     \
>> +            uint32_t r = rnd() & mask;                      \
>> +            AV_WN32A(buf0 + k, r);                          \
>> +            AV_WN32A(buf1 + k, r);                          \
>> +        }                                                   \
>> +    } while (0)
>> +
>> +#define randomize_buffers2(buf0, buf1, size)                \
>> +    do {                                                    \
>> +        uint32_t mask = 0x1fff1fff;                         \
>> +        int k;                                              \
>> +        for (k = 0; k < size; k += 4) {                     \
>> +            uint32_t r = rnd() & mask;                      \
>> +            AV_WN32A(buf0 + k, r);                          \
>> +            AV_WN32A(buf1 + k, r);                          \
>> +        }                                                   \
>> +    } while (0)
>> +
>> +static int get_random(min, max, mod)
> Function parameters should have types.

agree

>> +{
>> +    int value = rnd()%mod;
>> +    int sign = rnd()&0x1 == 0 ? 1 : -1;
>> +    return av_clip(sign*value, min, max);
>> +}
>> +
>> +static void check_put_hevc_qpel(HEVCDSPContext h, int bit_depth)
>> +{
>> +    int i, x, y;
>> +    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
>> +    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
>> +    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
>> +    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
>> +
>> +    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
>> +        for (y = 0; y < 4; y++) {
>> +            for (x = 0; x < 4; x++) {
>> +                int idx = idx_width_map[i][0];
>> +                int width = idx_width_map[i][1];
>> +                int height= width;
> So we are only testing the case of height == width? It would be better to consider those non-square blocks too.

I think square size is enough. 
Because square and non-square will have the same result, all height in mc asm codes implemented by loop.

> 
>> +                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
>> +                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
>> +                declare_func_emms(AV_CPU_FLAG_MMX, void, int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
>> +                                  int height, intptr_t mx, intptr_t my, int width);
>> +
>> +                randomize_buffers(src0, src1, BUF_SIZE);
>> +                memset(dst0, 0, BUF_SIZE);
>> +                memset(dst1, 0, BUF_SIZE);
>> +
>> +                if (check_func(h.put_hevc_qpel[idx][!!y][!!x], "put_hevc_qpel_%dx%d_%d_%d_%d", width, height,
>> +                    x, y, bit_depth)) {
>> +                    call_ref((int16_t *)dst0, src0 + offset, stride, height, x, y, width);
>> +                    call_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
>> +                    if (memcmp(dst0, dst1, BUF_SIZE))
>> +                        fail();
>> +                    bench_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
>> +                }
>> +            }
>> +        }
>> +    }
>> +}
>> +
> 
> All these check_put_hevc_* functions look very similar. Can you reduce duplicate code in some way? Using macros for example.

Agree, good idea.
diff mbox

Patch

diff --git a/tests/checkasm/Makefile b/tests/checkasm/Makefile
index 0233d2f989..e6c94cd676 100644
--- a/tests/checkasm/Makefile
+++ b/tests/checkasm/Makefile
@@ -23,7 +23,7 @@  AVCODECOBJS-$(CONFIG_EXR_DECODER)       += exrdsp.o
 AVCODECOBJS-$(CONFIG_HUFFYUV_DECODER)   += huffyuvdsp.o
 AVCODECOBJS-$(CONFIG_JPEG2000_DECODER)  += jpeg2000dsp.o
 AVCODECOBJS-$(CONFIG_PIXBLOCKDSP)       += pixblockdsp.o
-AVCODECOBJS-$(CONFIG_HEVC_DECODER)      += hevc_add_res.o hevc_idct.o hevc_sao.o
+AVCODECOBJS-$(CONFIG_HEVC_DECODER)      += hevc_add_res.o hevc_idct.o hevc_sao.o hevc_mc.o
 AVCODECOBJS-$(CONFIG_UTVIDEO_DECODER)   += utvideodsp.o
 AVCODECOBJS-$(CONFIG_V210_ENCODER)      += v210enc.o
 AVCODECOBJS-$(CONFIG_VP9_DECODER)       += vp9dsp.o
diff --git a/tests/checkasm/checkasm.c b/tests/checkasm/checkasm.c
index 20ce56932f..b95efc674d 100644
--- a/tests/checkasm/checkasm.c
+++ b/tests/checkasm/checkasm.c
@@ -117,6 +117,7 @@  static const struct {
         { "hevc_add_res", checkasm_check_hevc_add_res },
         { "hevc_idct", checkasm_check_hevc_idct },
         { "hevc_sao", checkasm_check_hevc_sao },
+        { "hevc_mc", checkasm_check_hevc_mc },
     #endif
     #if CONFIG_HUFFYUV_DECODER
         { "huffyuvdsp", checkasm_check_huffyuvdsp },
diff --git a/tests/checkasm/checkasm.h b/tests/checkasm/checkasm.h
index dcab74de06..5a4a612da7 100644
--- a/tests/checkasm/checkasm.h
+++ b/tests/checkasm/checkasm.h
@@ -58,6 +58,7 @@  void checkasm_check_h264qpel(void);
 void checkasm_check_hevc_add_res(void);
 void checkasm_check_hevc_idct(void);
 void checkasm_check_hevc_sao(void);
+void checkasm_check_hevc_mc(void);
 void checkasm_check_huffyuvdsp(void);
 void checkasm_check_jpeg2000dsp(void);
 void checkasm_check_llviddsp(void);
diff --git a/tests/checkasm/hevc_mc.c b/tests/checkasm/hevc_mc.c
new file mode 100644
index 0000000000..018f322c11
--- /dev/null
+++ b/tests/checkasm/hevc_mc.c
@@ -0,0 +1,547 @@ 
+/*
+ * Copyright (c) 2018 Yingming Fan <yingmingfan@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <string.h>
+
+#include "libavutil/intreadwrite.h"
+
+#include "libavcodec/avcodec.h"
+
+#include "libavcodec/hevcdsp.h"
+
+#include "checkasm.h"
+
+static const uint32_t pixel_mask[3] = { 0xffffffff, 0x03ff03ff, 0x0fff0fff };
+static const uint32_t idx_width_map[8][2] = {{1, 4}, {3, 8}, {4, 12}, {5, 16}, {6, 24}, {7, 32}, {8, 48}, {9, 64}};
+#define SIZEOF_PIXEL ((bit_depth + 7) / 8)
+#define PIXEL_STRIDE (AV_INPUT_BUFFER_PADDING_SIZE + MAX_PB_SIZE + AV_INPUT_BUFFER_PADDING_SIZE)
+#define BUF_SIZE ((MAX_PB_SIZE+4+4) * PIXEL_STRIDE * 2)
+
+#define randomize_buffers(buf0, buf1, size)                 \
+    do {                                                    \
+        uint32_t mask = pixel_mask[(bit_depth - 8) >> 1];   \
+        int k;                                              \
+        for (k = 0; k < size; k += 4) {                     \
+            uint32_t r = rnd() & mask;                      \
+            AV_WN32A(buf0 + k, r);                          \
+            AV_WN32A(buf1 + k, r);                          \
+        }                                                   \
+    } while (0)
+
+#define randomize_buffers2(buf0, buf1, size)                \
+    do {                                                    \
+        uint32_t mask = 0x1fff1fff;                         \
+        int k;                                              \
+        for (k = 0; k < size; k += 4) {                     \
+            uint32_t r = rnd() & mask;                      \
+            AV_WN32A(buf0 + k, r);                          \
+            AV_WN32A(buf1 + k, r);                          \
+        }                                                   \
+    } while (0)
+
+static int get_random(min, max, mod)
+{
+    int value = rnd()%mod;
+    int sign = rnd()&0x1 == 0 ? 1 : -1;
+    return av_clip(sign*value, min, max);
+}
+
+static void check_put_hevc_qpel(HEVCDSPContext h, int bit_depth)
+{
+    int i, x, y;
+    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
+        for (y = 0; y < 4; y++) {
+            for (x = 0; x < 4; x++) {
+                int idx = idx_width_map[i][0];
+                int width = idx_width_map[i][1];
+                int height= width;
+                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
+                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
+                declare_func_emms(AV_CPU_FLAG_MMX, void, int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
+                                  int height, intptr_t mx, intptr_t my, int width);
+
+                randomize_buffers(src0, src1, BUF_SIZE);
+                memset(dst0, 0, BUF_SIZE);
+                memset(dst1, 0, BUF_SIZE);
+
+                if (check_func(h.put_hevc_qpel[idx][!!y][!!x], "put_hevc_qpel_%dx%d_%d_%d_%d", width, height,
+                    x, y, bit_depth)) {
+                    call_ref((int16_t *)dst0, src0 + offset, stride, height, x, y, width);
+                    call_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
+                    if (memcmp(dst0, dst1, BUF_SIZE))
+                        fail();
+                    bench_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
+                }
+            }
+        }
+    }
+}
+
+static void check_put_hevc_epel(HEVCDSPContext h, int bit_depth)
+{
+    int i, x, y;
+    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
+        for (y = 0; y < 8; y++) {
+            for (x = 0; x < 8; x++) {
+                int idx = idx_width_map[i][0];
+                int width = idx_width_map[i][1];
+                int height= width;
+                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
+                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
+                declare_func_emms(AV_CPU_FLAG_MMX, void, int16_t *dst, uint8_t *src, ptrdiff_t srcstride,
+                                  int height, intptr_t mx, intptr_t my, int width);
+
+                randomize_buffers(src0, src1, BUF_SIZE);
+                memset(dst0, 0, BUF_SIZE);
+                memset(dst1, 0, BUF_SIZE);
+
+                if (check_func(h.put_hevc_epel[idx][!!y][!!x], "put_hevc_epel_%dx%d_%d_%d_%d", width, height,
+                    x, y, bit_depth)) {
+                    call_ref((int16_t *)dst0, src0 + offset, stride, height, x, y, width);
+                    call_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
+                    if (memcmp(dst0, dst1, BUF_SIZE))
+                        fail();
+                    bench_new((int16_t *)dst1, src1 + offset, stride, height, x, y, width);
+                }
+            }
+        }
+    }
+}
+
+static void check_put_hevc_qpel_uni(HEVCDSPContext h, int bit_depth)
+{
+    int i, x, y;
+    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
+        for (y = 0; y < 4; y++) {
+            for (x = 0; x < 4; x++) {
+                int idx = idx_width_map[i][0];
+                int width = idx_width_map[i][1];
+                int height= width;
+                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
+                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
+                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
+                                  ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width);
+
+                randomize_buffers(src0, src1, BUF_SIZE);
+                memset(dst0, 0, BUF_SIZE);
+                memset(dst1, 0, BUF_SIZE);
+
+                if (check_func(h.put_hevc_qpel_uni[idx][!!y][!!x], "put_hevc_qpel_uni_%dx%d_%d_%d_%d", width, height,
+                    x, y, bit_depth)) {
+                    call_ref(dst0, stride, src0 + offset, stride, height, x, y, width);
+                    call_new(dst1, stride, src1 + offset, stride, height, x, y, width);
+                    if (memcmp(dst0, dst1, BUF_SIZE))
+                        fail();
+                    bench_new(dst1, stride, src1 + offset, stride, height, x, y, width);
+                }
+            }
+        }
+    }
+}
+
+static void check_put_hevc_epel_uni(HEVCDSPContext h, int bit_depth)
+{
+    int i, x, y;
+    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
+        for (y = 0; y < 8; y++) {
+            for (x = 0; x < 8; x++) {
+                int idx = idx_width_map[i][0];
+                int width = idx_width_map[i][1];
+                int height= width;
+                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
+                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
+                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *src,
+                                  ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width);
+
+                randomize_buffers(src0, src1, BUF_SIZE);
+                memset(dst0, 0, BUF_SIZE);
+                memset(dst1, 0, BUF_SIZE);
+
+                if (check_func(h.put_hevc_epel_uni[idx][!!y][!!x], "put_hevc_epel_uni_%dx%d_%d_%d_%d", width, height,
+                    x, y, bit_depth)) {
+                    call_ref(dst0, stride, src0 + offset, stride, height, x, y, width);
+                    call_new(dst1, stride, src1 + offset, stride, height, x, y, width);
+                    if (memcmp(dst0, dst1, BUF_SIZE))
+                        fail();
+                    bench_new(dst1, stride, src1 + offset, stride, height, x, y, width);
+                }
+            }
+        }
+    }
+}
+
+static void check_put_hevc_qpel_bi(HEVCDSPContext h, int bit_depth)
+{
+    int i, x, y;
+    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
+        for (y = 0; y < 4; y++) {
+            for (x = 0; x < 4; x++) {
+                int idx = idx_width_map[i][0];
+                int width = idx_width_map[i][1];
+                int height= width;
+                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
+                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
+                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *_src,
+                                  ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my,
+                                  int width);
+
+                randomize_buffers(src0, src1, BUF_SIZE);
+                randomize_buffers2(src00, src11, BUF_SIZE);
+                memset(dst0, 0, BUF_SIZE);
+                memset(dst1, 0, BUF_SIZE);
+
+                if (check_func(h.put_hevc_qpel_bi[idx][!!y][!!x], "put_hevc_qpel_bi_%dx%d_%d_%d_%d", width, height,
+                    x, y, bit_depth)) {
+                    call_ref(dst0, stride, src0 + offset, stride, (int16_t *)src00, height, x, y, width);
+                    call_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, x, y, width);
+                    if (memcmp(dst0, dst1, BUF_SIZE))
+                        fail();
+                    bench_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, x, y, width);
+                }
+            }
+        }
+    }
+}
+
+static void check_put_hevc_epel_bi(HEVCDSPContext h, int bit_depth)
+{
+    int i, x, y;
+    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
+        for (y = 0; y < 8; y++) {
+            for (x = 0; x < 8; x++) {
+                int idx = idx_width_map[i][0];
+                int width = idx_width_map[i][1];
+                int height= width;
+                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
+                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
+                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *_src,
+                                  ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my,
+                                  int width);
+
+                randomize_buffers(src0, src1, BUF_SIZE);
+                randomize_buffers2(src00, src11, BUF_SIZE);
+                memset(dst0, 0, BUF_SIZE);
+                memset(dst1, 0, BUF_SIZE);
+
+                if (check_func(h.put_hevc_epel_bi[idx][!!y][!!x], "put_hevc_epel_bi_%dx%d_%d_%d_%d", width, height,
+                    x, y, bit_depth)) {
+                    call_ref(dst0, stride, src0 + offset, stride, (int16_t *)src00, height, x, y, width);
+                    call_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, x, y, width);
+                    if (memcmp(dst0, dst1, BUF_SIZE))
+                        fail();
+                    bench_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, x, y, width);
+                }
+            }
+        }
+    }
+}
+
+static void check_put_hevc_qpel_uni_w(HEVCDSPContext h, int bit_depth)
+{
+    int i, x, y;
+    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
+        for (y = 0; y < 4; y++) {
+            for (x = 0; x < 4; x++) {
+                int idx = idx_width_map[i][0];
+                int width = idx_width_map[i][1];
+                int height= width;
+                int denom = rnd()%8;
+                int wx = (1<<denom) + get_random(-127, 128, 129);
+                int ox = get_random(-127, 128, 129);
+                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
+                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
+                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                  int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width);
+
+                randomize_buffers(src0, src1, BUF_SIZE);
+                randomize_buffers(src00, src11, BUF_SIZE);
+                memset(dst0, 0, BUF_SIZE);
+                memset(dst1, 0, BUF_SIZE);
+
+                if (check_func(h.put_hevc_qpel_uni_w[idx][!!y][!!x], "put_hevc_qpel_uni_w_%dx%d_%d_%d_%d", width, height,
+                    x, y, bit_depth)) {
+                    call_ref(dst0, stride, src0 + offset, stride, height, denom, wx, ox, x, y, width);
+                    call_new(dst1, stride, src1 + offset, stride, height, denom, wx, ox, x, y, width);
+                    if (memcmp(dst0, dst1, BUF_SIZE))
+                        fail();
+                    bench_new(dst1, stride, src1 + offset, stride, height, denom, wx, ox, x, y, width);
+                }
+            }
+        }
+    }
+}
+
+static void check_put_hevc_epel_uni_w(HEVCDSPContext h, int bit_depth)
+{
+    int i, x, y;
+    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
+        for (y = 0; y < 8; y++) {
+            for (x = 0; x < 8; x++) {
+                int idx = idx_width_map[i][0];
+                int width = idx_width_map[i][1];
+                int height= width;
+                int denom = rnd()%8;
+                int wx = (1<<denom) + get_random(-127, 128, 129);
+                int ox = get_random(-127, 128, 129);
+                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
+                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
+                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                  int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width);
+
+                randomize_buffers(src0, src1, BUF_SIZE);
+                randomize_buffers(src00, src11, BUF_SIZE);
+                memset(dst0, 0, BUF_SIZE);
+                memset(dst1, 0, BUF_SIZE);
+
+                if (check_func(h.put_hevc_epel_uni_w[idx][!!y][!!x], "put_hevc_epel_uni_w_%dx%d_%d_%d_%d", width, height,
+                    x, y, bit_depth)) {
+                    call_ref(dst0, stride, src0 + offset, stride, height, denom, wx, ox, x, y, width);
+                    call_new(dst1, stride, src1 + offset, stride, height, denom, wx, ox, x, y, width);
+                    if (memcmp(dst0, dst1, BUF_SIZE))
+                        fail();
+                    bench_new(dst1, stride, src1 + offset, stride, height, denom, wx, ox, x, y, width);
+                }
+            }
+        }
+    }
+}
+
+static void check_put_hevc_qpel_bi_w(HEVCDSPContext h, int bit_depth)
+{
+    int i, x, y;
+    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
+        for (y = 0; y < 4; y++) {
+            for (x = 0; x < 4; x++) {
+                int idx = idx_width_map[i][0];
+                int width = idx_width_map[i][1];
+                int height= width;
+                int denom = rnd()%8;
+                int wx = (1<<denom) + get_random(-127, 128, 129);
+                int ox = get_random(-127, 128, 129);
+                int wx1 = (1<<denom) + get_random(-127, 128, 129);
+                int ox1 = get_random(-127, 128, 129);
+                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
+                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
+                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                  int16_t *src2, int height, int denom, int wx0, int wx1,
+                                  int ox0, int ox1, intptr_t mx, intptr_t my, int width);
+
+                randomize_buffers(src0, src1, BUF_SIZE);
+                randomize_buffers2(src00, src11, BUF_SIZE);
+                memset(dst0, 0, BUF_SIZE);
+                memset(dst1, 0, BUF_SIZE);
+
+                if (check_func(h.put_hevc_qpel_bi_w[idx][!!y][!!x], "put_hevc_qpel_bi_w_%dx%d_%d_%d_%d", width, height,
+                    x, y, bit_depth)) {
+                    call_ref(dst0, stride, src0 + offset, stride, (int16_t *)src00, height, denom, wx, wx1, ox, ox1, x, y, width);
+                    call_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, denom, wx, wx1, ox, ox1, x, y, width);
+                    if (memcmp(dst0, dst1, BUF_SIZE))
+                        fail();
+                    bench_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, denom, wx, wx1, ox, ox1, x, y, width);
+                }
+            }
+        }
+    }
+}
+
+static void check_put_hevc_epel_bi_w(HEVCDSPContext h, int bit_depth)
+{
+    int i, x, y;
+    LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src0, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src1, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src00, [BUF_SIZE]);
+    LOCAL_ALIGNED_32(uint8_t, src11, [BUF_SIZE]);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(idx_width_map); i++) {
+        for (y = 0; y < 8; y++) {
+            for (x = 0; x < 8; x++) {
+                int idx = idx_width_map[i][0];
+                int width = idx_width_map[i][1];
+                int height= width;
+                int denom = rnd()%8;
+                int wx = (1<<denom) + get_random(-127, 128, 129);
+                int ox = get_random(-127, 128, 129);
+                int wx1 = (1<<denom) + get_random(-127, 128, 129);
+                int ox1 = get_random(-127, 128, 129);
+                ptrdiff_t stride = PIXEL_STRIDE*SIZEOF_PIXEL;
+                int offset = (AV_INPUT_BUFFER_PADDING_SIZE+4*PIXEL_STRIDE)*SIZEOF_PIXEL;
+                declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride,
+                                  int16_t *src2, int height, int denom, int wx0, int wx1,
+                                  int ox0, int ox1, intptr_t mx, intptr_t my, int width);
+
+                randomize_buffers(src0, src1, BUF_SIZE);
+                randomize_buffers2(src00, src11, BUF_SIZE);
+                memset(dst0, 0, BUF_SIZE);
+                memset(dst1, 0, BUF_SIZE);
+
+                if (check_func(h.put_hevc_epel_bi_w[idx][!!y][!!x], "put_hevc_epel_bi_w_%dx%d_%d_%d_%d", width, height,
+                    x, y, bit_depth)) {
+                    call_ref(dst0, stride, src0 + offset, stride, (int16_t *)src00, height, denom, wx, wx1, ox, ox1, x, y, width);
+                    call_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, denom, wx, wx1, ox, ox1, x, y, width);
+                    if (memcmp(dst0, dst1, BUF_SIZE))
+                        fail();
+                    bench_new(dst1, stride, src1 + offset, stride, (int16_t *)src11, height, denom, wx, wx1, ox, ox1, x, y, width);
+                }
+            }
+        }
+    }
+}
+
+void checkasm_check_hevc_mc(void)
+{
+    int bit_depth;
+
+    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
+        HEVCDSPContext h;
+
+        ff_hevc_dsp_init(&h, bit_depth);
+        check_put_hevc_qpel(h, bit_depth);
+    }
+    report("put_hevc_qpel");
+
+    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
+        HEVCDSPContext h;
+
+        ff_hevc_dsp_init(&h, bit_depth);
+        check_put_hevc_qpel_uni(h, bit_depth);
+    }
+    report("put_hevc_qpel_uni");
+
+    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
+        HEVCDSPContext h;
+
+        ff_hevc_dsp_init(&h, bit_depth);
+        check_put_hevc_qpel_bi(h, bit_depth);
+    }
+    report("put_hevc_qpel_bi");
+
+    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
+        HEVCDSPContext h;
+
+        ff_hevc_dsp_init(&h, bit_depth);
+        check_put_hevc_qpel_uni_w(h, bit_depth);
+    }
+    report("put_hevc_qpel_uni_w");
+
+    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
+        HEVCDSPContext h;
+
+        ff_hevc_dsp_init(&h, bit_depth);
+        check_put_hevc_qpel_bi_w(h, bit_depth);
+    }
+    report("put_hevc_qpel_bi_w");
+
+    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
+        HEVCDSPContext h;
+
+        ff_hevc_dsp_init(&h, bit_depth);
+        check_put_hevc_epel(h, bit_depth);
+    }
+    report("put_hevc_epel");
+
+    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
+        HEVCDSPContext h;
+
+        ff_hevc_dsp_init(&h, bit_depth);
+        check_put_hevc_epel_uni(h, bit_depth);
+    }
+    report("put_hevc_epel_uni");
+
+    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
+        HEVCDSPContext h;
+
+        ff_hevc_dsp_init(&h, bit_depth);
+        check_put_hevc_epel_bi(h, bit_depth);
+    }
+    report("put_hevc_epel_bi");
+
+    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
+        HEVCDSPContext h;
+
+        ff_hevc_dsp_init(&h, bit_depth);
+        check_put_hevc_epel_uni_w(h, bit_depth);
+    }
+    report("put_hevc_epel_uni_w");
+
+    for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
+        HEVCDSPContext h;
+
+        ff_hevc_dsp_init(&h, bit_depth);
+        check_put_hevc_epel_bi_w(h, bit_depth);
+    }
+    report("put_hevc_epel_bi_w");
+}
diff --git a/tests/fate/checkasm.mak b/tests/fate/checkasm.mak
index a722b4a917..aeb47091a1 100644
--- a/tests/fate/checkasm.mak
+++ b/tests/fate/checkasm.mak
@@ -15,6 +15,7 @@  FATE_CHECKASM = fate-checkasm-aacpsdsp                                  \
                 fate-checkasm-hevc_add_res                              \
                 fate-checkasm-hevc_idct                                 \
                 fate-checkasm-hevc_sao                                  \
+                fate-checkasm-hevc_mc                                   \
                 fate-checkasm-jpeg2000dsp                               \
                 fate-checkasm-llviddsp                                  \
                 fate-checkasm-llviddspenc                               \