@@ -22,6 +22,12 @@
#include "libavcodec/mips/hevcdsp_mips.h"
#include "libavcodec/mips/hevc_macros_msa.h"
+static const uint8_t ff_hevc_mask_arr[16 * 2] __attribute__((aligned(0x40))) = {
+ /* 8 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20
+};
+
#define HEVC_BI_RND_CLIP2(in0, in1, vec0, vec1, rnd_val, out0, out1) \
{ \
ADDS_SH2_SH(vec0, in0, vec1, in1, out0, out1); \
@@ -531,7 +537,7 @@ static void hevc_hz_bi_8t_4w_msa(uint8_t *src0_ptr,
v8i16 dst0, dst1, dst2, dst3;
v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
v8i16 filter_vec, const_vec;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20 };
+ v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[16]);
src0_ptr -= 3;
@@ -557,26 +563,26 @@ static void hevc_hz_bi_8t_4w_msa(uint8_t *src0_ptr,
ILVR_D2_SH(in5, in4, in7, in6, in2, in3);
XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
- VSHF_B4_SB(src0, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst0 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst0, dst0, dst0, dst0);
- VSHF_B4_SB(src2, src3, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst1 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst1, dst1, dst1, dst1);
- VSHF_B4_SB(src4, src5, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst2 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst2, dst2, dst2, dst2);
- VSHF_B4_SB(src6, src7, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst3 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst3, dst3, dst3, dst3);
+ VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0, vec1);
+ VSHF_B2_SB(src4, src5, src6, src7, mask0, mask0, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec0, vec1);
+ VSHF_B2_SB(src4, src5, src6, src7, mask1, mask1, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec0, vec1);
+ VSHF_B2_SB(src4, src5, src6, src7, mask2, mask2, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec0, vec1);
+ VSHF_B2_SB(src4, src5, src6, src7, mask3, mask3, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
+ dst1, dst2, dst3);
HEVC_BI_RND_CLIP4(in0, in1, in2, in3,
dst0, dst1, dst2, dst3, 7, dst0, dst1, dst2, dst3);
@@ -604,7 +610,7 @@ static void hevc_hz_bi_8t_8w_msa(uint8_t *src0_ptr,
v8i16 dst0, dst1, dst2, dst3;
v8i16 in0, in1, in2, in3;
v8i16 filter_vec, const_vec;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
src0_ptr -= 3;
@@ -625,26 +631,26 @@ static void hevc_hz_bi_8t_8w_msa(uint8_t *src0_ptr,
src1_ptr += (4 * src2_stride);
XORI_B4_128_SB(src0, src1, src2, src3);
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst0 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst0, dst0, dst0, dst0);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst1 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst1, dst1, dst1, dst1);
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst2 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst2, dst2, dst2, dst2);
- VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst3 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst3, dst3, dst3, dst3);
+ VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0, vec1);
+ VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0, vec1);
+ VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0, vec1);
+ VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src1, src1, mask3, mask3, vec0, vec1);
+ VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
+ dst1, dst2, dst3);
HEVC_BI_RND_CLIP4(in0, in1, in2, in3,
dst0, dst1, dst2, dst3, 7, dst0, dst1, dst2, dst3);
@@ -664,10 +670,83 @@ static void hevc_hz_bi_8t_12w_msa(uint8_t *src0_ptr,
const int8_t *filter,
int32_t height)
{
- hevc_hz_bi_8t_8w_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter, height);
- hevc_hz_bi_8t_4w_msa(src0_ptr + 8, src_stride, src1_ptr + 8, src2_stride,
- dst + 8, dst_stride, filter, height);
+ uint32_t loop_cnt;
+ int32_t tmp0, tmp1;
+ int64_t tmp2, tmp3;
+ v16i8 src0, src1, src2, src3;
+ v16i8 vec0, vec1, vec2;
+ v8i16 filt0, filt1, filt2, filt3;
+ v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
+ v8i16 dst0, dst1, dst2;
+ v8i16 in0, in1, in2, in3;
+ v8i16 filter_vec, const_vec;
+
+ src0_ptr -= 3;
+ const_vec = __msa_ldi_h(128);
+ const_vec <<= 6;
+
+ filter_vec = LD_SH(filter);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
+
+ mask0 = LD_SB(ff_hevc_mask_arr);
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+ mask3 = mask0 + 6;
+ mask4 = LD_SB(&ff_hevc_mask_arr[16]);
+ mask5 = mask4 + 2;
+ mask6 = mask4 + 4;
+ mask7 = mask4 + 6;
+
+ for (loop_cnt = 8; loop_cnt--;) {
+ LD_SB2(src0_ptr, 8, src0, src1);
+ src0_ptr += src_stride;
+ LD_SB2(src0_ptr, 8, src2, src3);
+ src0_ptr += src_stride;
+ LD_SH2(src1_ptr, 8, in0, in1);
+ src1_ptr += src2_stride;
+ LD_SH2(src1_ptr, 8, in2, in3);
+ src1_ptr += src2_stride;
+ XORI_B4_128_SB(src0, src1, src2, src3);
+
+ dst0 = const_vec;
+ dst1 = const_vec;
+ dst2 = const_vec;
+
+ VSHF_B3_SB(src0, src0, src1, src3, src2, src2, mask0, mask4, mask0,
+ vec0, vec1, vec2);
+ DPADD_SB2_SH(vec0, vec1, filt0, filt0, dst0, dst1);
+ dst2 = __msa_dpadd_s_h(dst2, vec2, (v16i8) filt0);
+ VSHF_B3_SB(src0, src0, src1, src3, src2, src2, mask1, mask5, mask1,
+ vec0, vec1, vec2);
+ DPADD_SB2_SH(vec0, vec1, filt1, filt1, dst0, dst1);
+ dst2 = __msa_dpadd_s_h(dst2, vec2, (v16i8) filt1);
+ VSHF_B3_SB(src0, src0, src1, src3, src2, src2, mask2, mask6, mask2,
+ vec0, vec1, vec2);
+ DPADD_SB2_SH(vec0, vec1, filt2, filt2, dst0, dst1);
+ dst2 = __msa_dpadd_s_h(dst2, vec2, (v16i8) filt2);
+ VSHF_B3_SB(src0, src0, src1, src3, src2, src2, mask3, mask7, mask3,
+ vec0, vec1, vec2);
+ DPADD_SB2_SH(vec0, vec1, filt3, filt3, dst0, dst1);
+ dst2 = __msa_dpadd_s_h(dst2, vec2, (v16i8) filt3);
+
+ in1 = (v8i16) __msa_pckev_d((v2i64) in3, (v2i64) in1);
+ HEVC_BI_RND_CLIP2(in0, in1, dst0, dst1, 7, dst0, dst1);
+ dst2 = __msa_adds_s_h(in2, dst2);
+ dst2 = __msa_srari_h(dst2, 7);
+ dst2 = CLIP_SH_0_255(dst2);
+ PCKEV_B2_SH(dst1, dst0, dst2, dst2, dst0, dst1);
+
+ tmp2 = __msa_copy_s_d((v2i64) dst0, 0);
+ tmp0 = __msa_copy_s_w((v4i32) dst0, 2);
+ tmp3 = __msa_copy_s_d((v2i64) dst1, 0);
+ tmp1 = __msa_copy_s_w((v4i32) dst0, 3);
+ SD(tmp2, dst);
+ SW(tmp0, dst + 8);
+ dst += dst_stride;
+ SD(tmp3, dst);
+ SW(tmp1, dst + 8);
+ dst += dst_stride;
+ }
}
static void hevc_hz_bi_8t_16w_msa(uint8_t *src0_ptr,
@@ -687,7 +766,7 @@ static void hevc_hz_bi_8t_16w_msa(uint8_t *src0_ptr,
v8i16 dst0, dst1, dst2, dst3;
v8i16 in0, in1, in2, in3;
v8i16 filter_vec, const_vec;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
src0_ptr -= 3;
const_vec = __msa_ldi_h(128);
@@ -711,26 +790,26 @@ static void hevc_hz_bi_8t_16w_msa(uint8_t *src0_ptr,
src1_ptr += src2_stride;
XORI_B4_128_SB(src0, src1, src2, src3);
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst0 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst0, dst0, dst0, dst0);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst1 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst1, dst1, dst1, dst1);
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst2 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst2, dst2, dst2, dst2);
- VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst3 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst3, dst3, dst3, dst3);
+ VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0, vec1);
+ VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0, vec1);
+ VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0, vec1);
+ VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src1, src1, mask3, mask3, vec0, vec1);
+ VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
+ dst1, dst2, dst3);
HEVC_BI_RND_CLIP4(in0, in1, in2, in3,
dst0, dst1, dst2, dst3, 7, dst0, dst1, dst2, dst3);
@@ -759,7 +838,7 @@ static void hevc_hz_bi_8t_24w_msa(uint8_t *src0_ptr,
v8i16 dst0, dst1, dst2;
v8i16 in0, in1, in2;
v8i16 filter_vec, const_vec;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
src0_ptr = src0_ptr - 3;
const_vec = __msa_ldi_h(128);
@@ -784,21 +863,21 @@ static void hevc_hz_bi_8t_24w_msa(uint8_t *src0_ptr,
src1_ptr += src2_stride;
XORI_B2_128_SB(src0, src1);
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst0 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst0, dst0, dst0, dst0);
- VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
- vec0, vec1, vec2, vec3);
dst1 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst1, dst1, dst1, dst1);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst2 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst2, dst2, dst2, dst2);
+ VSHF_B2_SB(src0, src0, src0, src1, mask0, mask4, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src0, src0, mask0, mask1, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt1, dst0,
+ dst1, dst2, dst0);
+ VSHF_B2_SB(src0, src1, src1, src1, mask5, mask1, vec0, vec1);
+ VSHF_B2_SB(src0, src0, src0, src1, mask2, mask6, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt2, filt2, dst1,
+ dst2, dst0, dst1);
+ VSHF_B2_SB(src1, src1, src0, src0, mask2, mask3, vec0, vec1);
+ VSHF_B2_SB(src0, src1, src1, src1, mask7, mask3, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt3, filt3, filt3, dst2,
+ dst0, dst1, dst2);
HEVC_BI_RND_CLIP2(in0, in1, dst0, dst1, 7, dst0, dst1);
dst2 = __msa_adds_s_h(dst2, in2);
@@ -830,7 +909,7 @@ static void hevc_hz_bi_8t_32w_msa(uint8_t *src0_ptr,
v8i16 dst0, dst1, dst2, dst3;
v8i16 in0, in1, in2, in3;
v8i16 filter_vec, const_vec;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
src0_ptr -= 3;
const_vec = __msa_ldi_h(128);
@@ -855,26 +934,26 @@ static void hevc_hz_bi_8t_32w_msa(uint8_t *src0_ptr,
src1_ptr += src2_stride;
XORI_B3_128_SB(src0, src1, src2);
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst0 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst0, dst0, dst0, dst0);
- VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
- vec0, vec1, vec2, vec3);
dst1 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst1, dst1, dst1, dst1);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst2 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst2, dst2, dst2, dst2);
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst3 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst3, dst3, dst3, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask0, mask4, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src2, src2, mask0, mask0, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask1, mask5, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src2, src2, mask1, mask1, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask2, mask6, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src2, src2, mask2, mask2, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask3, mask7, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src2, src2, mask3, mask3, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
+ dst1, dst2, dst3);
HEVC_BI_RND_CLIP4(in0, in1, in2, in3,
dst0, dst1, dst2, dst3, 7, dst0, dst1, dst2, dst3);
@@ -903,7 +982,7 @@ static void hevc_hz_bi_8t_48w_msa(uint8_t *src0_ptr,
v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
v8i16 in0, in1, in2, in3, in4, in5;
v8i16 filter_vec, const_vec;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
src0_ptr -= 3;
@@ -921,62 +1000,53 @@ static void hevc_hz_bi_8t_48w_msa(uint8_t *src0_ptr,
mask6 = mask0 + 12;
mask7 = mask0 + 14;
- for (loop_cnt = height; loop_cnt--;) {
- LD_SB2(src0_ptr, 16, src0, src1);
- XORI_B2_128_SB(src0, src1);
- LD_SH2(src1_ptr, 8, in0, in1);
+ for (loop_cnt = 64; loop_cnt--;) {
+ LD_SB3(src0_ptr, 16, src0, src1, src2);
+ src3 = LD_SB(src0_ptr + 40);
+ src0_ptr += src_stride;
+ LD_SH4(src1_ptr, 8, in0, in1, in2, in3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst0 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst0, dst0, dst0, dst0);
- VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
- vec0, vec1, vec2, vec3);
dst1 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst1, dst1, dst1, dst1);
-
- HEVC_BI_RND_CLIP2(in0, in1, dst0, dst1, 7, dst0, dst1);
-
- tmp0 = __msa_pckev_b((v16i8) dst1, (v16i8) dst0);
- ST_SB(tmp0, dst);
-
- LD_SB2(src0_ptr + 32, 8, src2, src3);
- XORI_B2_128_SB(src2, src3);
- src0_ptr += src_stride;
-
- LD_SH2(src1_ptr + 16, 8, in2, in3);
-
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst2 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst2, dst2, dst2, dst2);
- VSHF_B4_SB(src1, src2, mask4, mask5, mask6, mask7,
- vec0, vec1, vec2, vec3);
dst3 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst3, dst3, dst3, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask0, mask4, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src1, src2, mask0, mask4, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask1, mask5, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src1, src2, mask1, mask5, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask2, mask6, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src1, src2, mask2, mask6, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask3, mask7, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src1, src2, mask3, mask7, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
+ dst1, dst2, dst3);
+ HEVC_BI_RND_CLIP2(in0, in1, dst0, dst1, 7, dst0, dst1);
HEVC_BI_RND_CLIP2(in2, in3, dst2, dst3, 7, dst2, dst3);
-
- tmp1 = __msa_pckev_b((v16i8) dst3, (v16i8) dst2);
+ PCKEV_B2_SB(dst1, dst0, dst3, dst2, tmp0, tmp1);
+ ST_SB(tmp0, dst);
ST_SB(tmp1, dst + 16);
LD_SH2(src1_ptr + 32, 8, in4, in5);
src1_ptr += src2_stride;
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst4 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst4, dst4, dst4, dst4);
- VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
dst5 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst5, dst5, dst5, dst5);
+ VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec0, vec1);
+ VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt1, filt1, dst4,
+ dst5, dst4, dst5);
+ VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec0, vec1);
+ VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt3, filt3, dst4,
+ dst5, dst4, dst5);
HEVC_BI_RND_CLIP2(in4, in5, dst4, dst5, 7, dst4, dst5);
@@ -995,14 +1065,10 @@ static void hevc_hz_bi_8t_64w_msa(uint8_t *src0_ptr,
const int8_t *filter,
int32_t height)
{
- uint8_t *src0_ptr_tmp;
- uint8_t *dst_tmp;
- int16_t *src1_ptr_tmp;
uint32_t loop_cnt;
- uint32_t cnt;
- v16i8 src0, src1, src2, tmp0, tmp1;
+ v16i8 src0, src1, src2, src3, src4, src5, tmp0, tmp1;
v8i16 filt0, filt1, filt2, filt3;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
v16i8 vec0, vec1, vec2, vec3;
v8i16 dst0, dst1, dst2, dst3;
@@ -1026,48 +1092,74 @@ static void hevc_hz_bi_8t_64w_msa(uint8_t *src0_ptr,
mask7 = mask0 + 14;
for (loop_cnt = height; loop_cnt--;) {
- src0_ptr_tmp = src0_ptr;
- dst_tmp = dst;
- src1_ptr_tmp = src1_ptr;
+ LD_SB2(src0_ptr, 16, src0, src1);
+ src2 = LD_SB(src0_ptr + 24);
+ LD_SB2(src0_ptr + 32, 16, src3, src4);
+ src5 = LD_SB(src0_ptr + 56);
+ LD_SH4(src1_ptr, 8, in0, in1, in2, in3);
+ XORI_B3_128_SB(src0, src1, src2);
- for (cnt = 2; cnt--;) {
- LD_SB2(src0_ptr_tmp, 16, src0, src1);
- src2 = LD_SB(src0_ptr_tmp + 24);
- src0_ptr_tmp += 32;
- LD_SH4(src1_ptr_tmp, 8, in0, in1, in2, in3);
- src1_ptr_tmp += 32;
- XORI_B3_128_SB(src0, src1, src2);
+ dst0 = const_vec;
+ dst1 = const_vec;
+ dst2 = const_vec;
+ dst3 = const_vec;
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst0 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst0, dst0, dst0, dst0);
- VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
- vec0, vec1, vec2, vec3);
- dst1 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst1, dst1, dst1, dst1);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst2 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst2, dst2, dst2, dst2);
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst3 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst3, dst3, dst3, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask0, mask4, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src2, src2, mask0, mask0, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask1, mask5, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src2, src2, mask1, mask1, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask2, mask6, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src2, src2, mask2, mask2, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask3, mask7, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src2, src2, mask3, mask3, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
+ dst1, dst2, dst3);
- HEVC_BI_RND_CLIP4(in0, in1, in2, in3,
- dst0, dst1, dst2, dst3, 7,
- dst0, dst1, dst2, dst3);
+ HEVC_BI_RND_CLIP4(in0, in1, in2, in3,
+ dst0, dst1, dst2, dst3, 7,
+ dst0, dst1, dst2, dst3);
- PCKEV_B2_SB(dst1, dst0, dst3, dst2, tmp0, tmp1);
- ST_SB2(tmp0, tmp1, dst_tmp, 16);
- dst_tmp += 32;
- }
+ PCKEV_B2_SB(dst1, dst0, dst3, dst2, tmp0, tmp1);
+ ST_SB2(tmp0, tmp1, dst, 16);
+
+ src0 = src3;
+ src1 = src4;
+ src2 = src5;
+ LD_SH4(src1_ptr + 32, 8, in0, in1, in2, in3);
+ XORI_B3_128_SB(src0, src1, src2);
+
+ dst0 = const_vec;
+ dst1 = const_vec;
+ dst2 = const_vec;
+ dst3 = const_vec;
+ VSHF_B2_SB(src0, src0, src0, src1, mask0, mask4, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src2, src2, mask0, mask0, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask1, mask5, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src2, src2, mask1, mask1, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask2, mask6, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src2, src2, mask2, mask2, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
+ dst1, dst2, dst3);
+ VSHF_B2_SB(src0, src0, src0, src1, mask3, mask7, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src2, src2, mask3, mask3, vec2, vec3);
+ DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
+ dst1, dst2, dst3);
+ HEVC_BI_RND_CLIP4(in0, in1, in2, in3,
+ dst0, dst1, dst2, dst3, 7,
+ dst0, dst1, dst2, dst3);
+ PCKEV_B2_SB(dst1, dst0, dst3, dst2, tmp0, tmp1);
+ ST_SB2(tmp0, tmp1, dst + 32, 16);
src1_ptr += src2_stride;
src0_ptr += src_stride;
dst += dst_stride;
@@ -1528,30 +1620,30 @@ static void hevc_hv_bi_8t_4w_msa(uint8_t *src0_ptr,
int32_t height)
{
uint32_t loop_cnt;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
- v8i16 in0, in1;
+ uint64_t tp0, tp1;
+ v16u8 out;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ v8i16 in0 = { 0 }, in1 = { 0 };
v8i16 filt0, filt1, filt2, filt3;
- v4i32 filt_h0, filt_h1, filt_h2, filt_h3;
+ v8i16 filt_h0, filt_h1, filt_h2, filt_h3;
v16i8 mask1, mask2, mask3;
v8i16 filter_vec, const_vec;
v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
- v8i16 dst30, dst41, dst52, dst63, dst66, dst87;
- v4i32 dst0_r, dst1_r, in0_r, in0_l;
- v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
- v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20 };
- v8u16 mask4 = { 0, 4, 1, 5, 2, 6, 3, 7 };
+ v8i16 out0, out1;
+ v8i16 dst30, dst41, dst52, dst63, dst66, dst97, dst108;
+ v8i16 dst10, dst32, dst54, dst76, dst98, dst21, dst43, dst65, dst87, dst109;
+ v4i32 dst0, dst1, dst2, dst3;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
src0_ptr -= ((3 * src_stride) + 3);
filter_vec = LD_SH(filter_x);
SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W4_SW(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
+ SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
@@ -1572,71 +1664,77 @@ static void hevc_hv_bi_8t_4w_msa(uint8_t *src0_ptr,
VSHF_B4_SB(src3, src6, mask0, mask1, mask2, mask3,
vec12, vec13, vec14, vec15);
- dst30 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst30, dst30, dst30, dst30);
- dst41 = const_vec;
- DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
- dst41, dst41, dst41, dst41);
- dst52 = const_vec;
- DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
- dst52, dst52, dst52, dst52);
- dst63 = const_vec;
- DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3,
- dst63, dst63, dst63, dst63);
-
- ILVR_H3_SH(dst41, dst30, dst52, dst41, dst63, dst52,
- dst10_r, dst21_r, dst32_r);
- dst43_r = __msa_ilvl_h(dst41, dst30);
- dst54_r = __msa_ilvl_h(dst52, dst41);
- dst65_r = __msa_ilvl_h(dst63, dst52);
+ dst30 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst41 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+ dst52 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
+ filt3);
+ dst63 = HEVC_FILT_8TAP_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2,
+ filt3);
+
+ ILVRL_H2_SH(dst41, dst30, dst10, dst43);
+ ILVRL_H2_SH(dst52, dst41, dst21, dst54);
+ ILVRL_H2_SH(dst63, dst52, dst32, dst65);
+
dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
- for (loop_cnt = height >> 1; loop_cnt--;) {
- LD_SB2(src0_ptr, src_stride, src7, src8);
- src0_ptr += (2 * src_stride);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- src1_ptr += (2 * src2_stride);
+ for (loop_cnt = height >> 2; loop_cnt--;) {
+ LD_SB4(src0_ptr, src_stride, src7, src8, src9, src10);
+ src0_ptr += (4 * src_stride);
+ XORI_B4_128_SB(src7, src8, src9, src10);
- in0 = (v8i16) __msa_ilvr_d((v2i64) in1, (v2i64) in0);
- XORI_B2_128_SB(src7, src8);
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ INSERT_D2_SH(tp0, tp1, in0);
+ src1_ptr += (2 * src2_stride);
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ INSERT_D2_SH(tp0, tp1, in1);
+ src1_ptr += (2 * src2_stride);
- VSHF_B4_SB(src7, src8, mask0, mask1, mask2, mask3,
+ VSHF_B4_SB(src7, src9, mask0, mask1, mask2, mask3,
vec0, vec1, vec2, vec3);
- dst87 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst87, dst87, dst87, dst87);
- dst76_r = __msa_ilvr_h(dst87, dst66);
- dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r,
- filt_h0, filt_h1, filt_h2, filt_h3);
- dst87_r = __msa_vshf_h((v8i16) mask4, dst87, dst87);
- dst1_r = HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r,
- filt_h0, filt_h1, filt_h2, filt_h3);
-
- dst0_r >>= 6;
- dst1_r >>= 6;
- UNPCK_SH_SW(in0, in0_r, in0_l);
- dst0_r = __msa_adds_s_w(dst0_r, in0_r);
- dst1_r = __msa_adds_s_w(dst1_r, in0_l);
- SRARI_W2_SW(dst0_r, dst1_r, 7);
- dst0_r = CLIP_SW_0_255(dst0_r);
- dst1_r = CLIP_SW_0_255(dst1_r);
-
- HEVC_PCK_SW_SB2(dst1_r, dst0_r, dst0_r);
- ST4x2_UB(dst0_r, dst, dst_stride);
- dst += (2 * dst_stride);
+ VSHF_B4_SB(src8, src10, mask0, mask1, mask2, mask3,
+ vec4, vec5, vec6, vec7);
+ dst97 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst108 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+
+ dst76 = __msa_ilvr_h(dst97, dst66);
+ ILVRL_H2_SH(dst108, dst97, dst87, dst109);
+ dst66 = (v8i16) __msa_splati_d((v2i64) dst97, 1);
+ dst98 = __msa_ilvr_h(dst66, dst108);
+
+ dst0 = HEVC_FILT_8TAP(dst10, dst32, dst54, dst76, filt_h0, filt_h1,
+ filt_h2, filt_h3);
+ dst1 = HEVC_FILT_8TAP(dst21, dst43, dst65, dst87, filt_h0, filt_h1,
+ filt_h2, filt_h3);
+ dst2 = HEVC_FILT_8TAP(dst32, dst54, dst76, dst98, filt_h0, filt_h1,
+ filt_h2, filt_h3);
+ dst3 = HEVC_FILT_8TAP(dst43, dst65, dst87, dst109, filt_h0, filt_h1,
+ filt_h2, filt_h3);
+
+ SRA_4V(dst0, dst1, dst2, dst3, 6);
+ PCKEV_H2_SH(dst1, dst0, dst3, dst2, out0, out1);
+ ADDS_SH2_SH(out0, in0, out1, in1, out0, out1);
+ ADDS_SH2_SH(out0, const_vec, out1, const_vec, out0, out1);
+ SRARI_H2_SH(out0, out1, 7);
+ CLIP_SH2_0_255_MAX_SATU(out0, out1);
+ out = (v16u8) __msa_pckev_b((v16i8) out1, (v16i8) out0);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
- dst10_r = dst32_r;
- dst32_r = dst54_r;
- dst54_r = dst76_r;
- dst21_r = dst43_r;
- dst43_r = dst65_r;
- dst65_r = dst87_r;
- dst66 = (v8i16) __msa_splati_d((v2i64) dst87, 1);
+ dst10 = dst54;
+ dst32 = dst76;
+ dst54 = dst98;
+ dst21 = dst65;
+ dst43 = dst87;
+ dst65 = dst109;
+ dst66 = (v8i16) __msa_splati_d((v2i64) dst108, 1);
}
}
-static void hevc_hv_bi_8t_8multx2mult_msa(uint8_t *src0_ptr,
+static void hevc_hv_bi_8t_8multx1mult_msa(uint8_t *src0_ptr,
int32_t src_stride,
int16_t *src1_ptr,
int32_t src2_stride,
@@ -1651,22 +1749,20 @@ static void hevc_hv_bi_8t_8multx2mult_msa(uint8_t *src0_ptr,
uint8_t *src0_ptr_tmp;
int16_t *src1_ptr_tmp;
uint8_t *dst_tmp;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
- v8i16 in0, in1;
- v4i32 in0_r, in0_l, in1_r, in1_l;
+ v16u8 out;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
+ v8i16 in0, tmp;
v8i16 filt0, filt1, filt2, filt3;
- v4i32 filt_h0, filt_h1, filt_h2, filt_h3;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v8i16 filt_h0, filt_h1, filt_h2, filt_h3;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
v16i8 mask1, mask2, mask3;
v8i16 filter_vec, const_vec;
v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
- v4i32 dst0_r, dst0_l, dst1_r, dst1_l;
+ v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ v4i32 dst0_r, dst0_l;
v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
v8i16 dst10_l, dst32_l, dst54_l, dst76_l;
- v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
- v8i16 dst21_l, dst43_l, dst65_l, dst87_l;
src0_ptr -= ((3 * src_stride) + 3);
const_vec = __msa_ldi_h(128);
@@ -1676,9 +1772,9 @@ static void hevc_hv_bi_8t_8multx2mult_msa(uint8_t *src0_ptr,
SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
- SPLATI_W4_SW(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
+
+ SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
@@ -1703,18 +1799,14 @@ static void hevc_hv_bi_8t_8multx2mult_msa(uint8_t *src0_ptr,
vec8, vec9, vec10, vec11);
VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
vec12, vec13, vec14, vec15);
- dst0 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst0, dst0, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
- dst1, dst1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
- dst2, dst2, dst2, dst2);
- dst3 = const_vec;
- DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3,
- dst3, dst3, dst3, dst3);
+ dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst1 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+ dst2 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
+ filt3);
+ dst3 = HEVC_FILT_8TAP_SH(vec12, vec13, vec14, vec15, filt0, filt1,
+ filt2, filt3);
VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
vec0, vec1, vec2, vec3);
@@ -1722,38 +1814,28 @@ static void hevc_hv_bi_8t_8multx2mult_msa(uint8_t *src0_ptr,
vec4, vec5, vec6, vec7);
VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3,
vec8, vec9, vec10, vec11);
- dst4 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst4, dst4, dst4, dst4);
- dst5 = const_vec;
- DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
- dst5, dst5, dst5, dst5);
- dst6 = const_vec;
- DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
- dst6, dst6, dst6, dst6);
-
- ILVR_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst2, dst1,
- dst10_r, dst32_r, dst54_r, dst21_r);
- ILVR_H2_SH(dst4, dst3, dst6, dst5, dst43_r, dst65_r);
- ILVL_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst2, dst1,
- dst10_l, dst32_l, dst54_l, dst21_l);
- ILVL_H2_SH(dst4, dst3, dst6, dst5, dst43_l, dst65_l);
-
- for (loop_cnt = height >> 1; loop_cnt--;) {
- /* row 7 */
- LD_SB2(src0_ptr_tmp, src_stride, src7, src8);
- XORI_B2_128_SB(src7, src8);
- src0_ptr_tmp += 2 * src_stride;
+ dst4 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst5 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+ dst6 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
+ filt3);
- LD_SH2(src1_ptr_tmp, src2_stride, in0, in1);
- src1_ptr_tmp += (2 * src2_stride);
+ for (loop_cnt = height; loop_cnt--;) {
+ src7 = LD_SB(src0_ptr_tmp);
+ src7 = (v16i8) __msa_xori_b((v16u8) src7, 128);
+ src0_ptr_tmp += src_stride;
+
+ in0 = LD_SH(src1_ptr_tmp);
+ src1_ptr_tmp += src2_stride;
VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3,
vec0, vec1, vec2, vec3);
- dst7 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst7, dst7, dst7, dst7);
-
+ dst7 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1,
+ filt2, filt3);
+ ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
+ ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
ILVRL_H2_SH(dst7, dst6, dst76_r, dst76_l);
dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r,
filt_h0, filt_h1, filt_h2, filt_h3);
@@ -1762,49 +1844,21 @@ static void hevc_hv_bi_8t_8multx2mult_msa(uint8_t *src0_ptr,
dst0_r >>= 6;
dst0_l >>= 6;
- VSHF_B4_SB(src8, src8, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst8 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst8, dst8, dst8, dst8);
-
- ILVRL_H2_SH(dst8, dst7, dst87_r, dst87_l);
- dst1_r = HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r,
- filt_h0, filt_h1, filt_h2, filt_h3);
- dst1_l = HEVC_FILT_8TAP(dst21_l, dst43_l, dst65_l, dst87_l,
- filt_h0, filt_h1, filt_h2, filt_h3);
- dst1_r >>= 6;
- dst1_l >>= 6;
-
- UNPCK_SH_SW(in0, in0_r, in0_l);
- UNPCK_SH_SW(in1, in1_r, in1_l);
- in0_r = __msa_adds_s_w(in0_r, dst0_r);
- in0_l = __msa_adds_s_w(in0_l, dst0_l);
- in1_r = __msa_adds_s_w(in1_r, dst1_r);
- in1_l = __msa_adds_s_w(in1_l, dst1_l);
- SRARI_W4_SW(in0_r, in0_l, in1_r, in1_l, 7);
- in0_r = CLIP_SW_0_255(in0_r);
- in0_l = CLIP_SW_0_255(in0_l);
- in1_r = CLIP_SW_0_255(in1_r);
- in1_l = CLIP_SW_0_255(in1_l);
-
- HEVC_PCK_SW_SB4(in0_l, in0_r, in1_l, in1_r, dst0_r);
- ST8x2_UB(dst0_r, dst_tmp, dst_stride);
- dst_tmp += (2 * dst_stride);
-
- dst10_r = dst32_r;
- dst32_r = dst54_r;
- dst54_r = dst76_r;
- dst10_l = dst32_l;
- dst32_l = dst54_l;
- dst54_l = dst76_l;
- dst21_r = dst43_r;
- dst43_r = dst65_r;
- dst65_r = dst87_r;
- dst21_l = dst43_l;
- dst43_l = dst65_l;
- dst65_l = dst87_l;
- dst6 = dst8;
+ tmp = __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
+ ADDS_SH2_SH(tmp, in0, tmp, const_vec, tmp, tmp);
+ tmp = __msa_srari_h(tmp, 7);
+ tmp = CLIP_SH_0_255_MAX_SATU(tmp);
+ out = (v16u8) __msa_pckev_b((v16i8) tmp, (v16i8) tmp);
+ ST8x1_UB(out, dst_tmp);
+ dst_tmp += dst_stride;
+
+ dst0 = dst1;
+ dst1 = dst2;
+ dst2 = dst3;
+ dst3 = dst4;
+ dst4 = dst5;
+ dst5 = dst6;
+ dst6 = dst7;
}
src0_ptr += 8;
@@ -1823,7 +1877,7 @@ static void hevc_hv_bi_8t_8w_msa(uint8_t *src0_ptr,
const int8_t *filter_y,
int32_t height)
{
- hevc_hv_bi_8t_8multx2mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
+ hevc_hv_bi_8t_8multx1mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
dst, dst_stride, filter_x, filter_y,
height, 8);
}
@@ -1838,12 +1892,208 @@ static void hevc_hv_bi_8t_12w_msa(uint8_t *src0_ptr,
const int8_t *filter_y,
int32_t height)
{
- hevc_hv_bi_8t_8multx2mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- height, 8);
+ uint32_t loop_cnt;
+ uint8_t *src0_ptr_tmp, *dst_tmp;
+ int16_t *src1_ptr_tmp;
+ uint64_t tp0, tp1;
+ v16u8 out;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
+ v8i16 in0, in1 = { 0 }, out0, out1, tmp, filter_vec, const_vec;
+ v8i16 filt0, filt1, filt2, filt3, filt_h0, filt_h1, filt_h2, filt_h3;
+ v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ v8i16 dst30, dst41, dst52, dst63, dst66, dst97, dst108;
+ v8i16 dst10, dst32, dst54, dst76, dst98, dst21, dst43, dst65, dst87, dst109;
+ v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
+ v8i16 dst10_l, dst32_l, dst54_l, dst76_l;
+ v4i32 dst0_r, dst0_l, tmp0, tmp1, tmp2, tmp3;
- hevc_hv_bi_8t_4w_msa(src0_ptr + 8, src_stride, src1_ptr + 8, src2_stride,
- dst + 8, dst_stride, filter_x, filter_y, height);
+ src0_ptr -= ((3 * src_stride) + 3);
+
+ const_vec = __msa_ldi_h(128);
+ const_vec <<= 6;
+
+ filter_vec = LD_SH(filter_x);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
+
+ filter_vec = LD_SH(filter_y);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
+
+ SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
+
+ mask0 = LD_SB(ff_hevc_mask_arr);
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+ mask3 = mask0 + 6;
+
+ src0_ptr_tmp = src0_ptr;
+ dst_tmp = dst;
+ src1_ptr_tmp = src1_ptr;
+
+ LD_SB7(src0_ptr_tmp, src_stride, src0, src1, src2, src3, src4, src5,
+ src6);
+ src0_ptr_tmp += (7 * src_stride);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+
+ /* row 0 row 1 row 2 row 3 */
+ VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
+ vec3);
+ VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3, vec4, vec5, vec6,
+ vec7);
+ VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3, vec8, vec9, vec10,
+ vec11);
+ VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3, vec12, vec13, vec14,
+ vec15);
+ dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst1 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+ dst2 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
+ filt3);
+ dst3 = HEVC_FILT_8TAP_SH(vec12, vec13, vec14, vec15, filt0, filt1,
+ filt2, filt3);
+ VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
+ vec3);
+ VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3, vec4, vec5, vec6,
+ vec7);
+ VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3, vec8, vec9, vec10,
+ vec11);
+ dst4 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst5 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+ dst6 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
+ filt3);
+
+ for (loop_cnt = 16; loop_cnt--;) {
+ src7 = LD_SB(src0_ptr_tmp);
+ src7 = (v16i8) __msa_xori_b((v16u8) src7, 128);
+ src0_ptr_tmp += src_stride;
+
+ in0 = LD_SH(src1_ptr_tmp);
+ src1_ptr_tmp += src2_stride;
+
+ VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
+ vec3);
+ dst7 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1,
+ filt2, filt3);
+ ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
+ ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
+ ILVRL_H2_SH(dst7, dst6, dst76_r, dst76_l);
+ dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r, filt_h0,
+ filt_h1, filt_h2, filt_h3);
+ dst0_l = HEVC_FILT_8TAP(dst10_l, dst32_l, dst54_l, dst76_l, filt_h0,
+ filt_h1, filt_h2, filt_h3);
+ dst0_r >>= 6;
+ dst0_l >>= 6;
+
+ tmp = __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
+ ADDS_SH2_SH(tmp, in0, tmp, const_vec, tmp, tmp);
+ tmp = __msa_srari_h(tmp, 7);
+ tmp = CLIP_SH_0_255_MAX_SATU(tmp);
+ out = (v16u8) __msa_pckev_b((v16i8) tmp, (v16i8) tmp);
+ ST8x1_UB(out, dst_tmp);
+ dst_tmp += dst_stride;
+
+ dst0 = dst1;
+ dst1 = dst2;
+ dst2 = dst3;
+ dst3 = dst4;
+ dst4 = dst5;
+ dst5 = dst6;
+ dst6 = dst7;
+ }
+
+ src0_ptr += 8;
+ dst += 8;
+ src1_ptr += 8;
+
+ mask4 = LD_SB(ff_hevc_mask_arr + 16);
+ mask5 = mask4 + 2;
+ mask6 = mask4 + 4;
+ mask7 = mask4 + 6;
+
+ LD_SB7(src0_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6);
+ src0_ptr += (7 * src_stride);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+
+ /* row 0 row 1 row 2 row 3 */
+ VSHF_B4_SB(src0, src3, mask4, mask5, mask6, mask7, vec0, vec1, vec2, vec3);
+ VSHF_B4_SB(src1, src4, mask4, mask5, mask6, mask7, vec4, vec5, vec6, vec7);
+ VSHF_B4_SB(src2, src5, mask4, mask5, mask6, mask7,
+ vec8, vec9, vec10, vec11);
+ VSHF_B4_SB(src3, src6, mask4, mask5, mask6, mask7,
+ vec12, vec13, vec14, vec15);
+ dst30 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst41 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+ dst52 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
+ filt3);
+ dst63 = HEVC_FILT_8TAP_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2,
+ filt3);
+
+ ILVRL_H2_SH(dst41, dst30, dst10, dst43);
+ ILVRL_H2_SH(dst52, dst41, dst21, dst54);
+ ILVRL_H2_SH(dst63, dst52, dst32, dst65);
+
+ dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
+
+ for (loop_cnt = 4; loop_cnt--;) {
+ LD_SB4(src0_ptr, src_stride, src7, src8, src9, src10);
+ src0_ptr += (4 * src_stride);
+ XORI_B4_128_SB(src7, src8, src9, src10);
+
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ INSERT_D2_SH(tp0, tp1, in0);
+ src1_ptr += (2 * src2_stride);
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ INSERT_D2_SH(tp0, tp1, in1);
+ src1_ptr += (2 * src2_stride);
+
+ VSHF_B4_SB(src7, src9, mask4, mask5, mask6, mask7, vec0, vec1, vec2,
+ vec3);
+ VSHF_B4_SB(src8, src10, mask4, mask5, mask6, mask7, vec4, vec5, vec6,
+ vec7);
+ dst97 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst108 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+
+ dst76 = __msa_ilvr_h(dst97, dst66);
+ ILVRL_H2_SH(dst108, dst97, dst87, dst109);
+ dst66 = (v8i16) __msa_splati_d((v2i64) dst97, 1);
+ dst98 = __msa_ilvr_h(dst66, dst108);
+
+ tmp0 = HEVC_FILT_8TAP(dst10, dst32, dst54, dst76, filt_h0, filt_h1,
+ filt_h2, filt_h3);
+ tmp1 = HEVC_FILT_8TAP(dst21, dst43, dst65, dst87, filt_h0, filt_h1,
+ filt_h2, filt_h3);
+ tmp2 = HEVC_FILT_8TAP(dst32, dst54, dst76, dst98, filt_h0, filt_h1,
+ filt_h2, filt_h3);
+ tmp3 = HEVC_FILT_8TAP(dst43, dst65, dst87, dst109, filt_h0, filt_h1,
+ filt_h2, filt_h3);
+ SRA_4V(tmp0, tmp1, tmp2, tmp3, 6);
+ PCKEV_H2_SH(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ADDS_SH2_SH(out0, in0, out1, in1, out0, out1);
+ ADDS_SH2_SH(out0, const_vec, out1, const_vec, out0, out1);
+ SRARI_H2_SH(out0, out1, 7);
+ CLIP_SH2_0_255_MAX_SATU(out0, out1);
+ out = (v16u8) __msa_pckev_b((v16i8) out1, (v16i8) out0);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ dst10 = dst54;
+ dst32 = dst76;
+ dst54 = dst98;
+ dst21 = dst65;
+ dst43 = dst87;
+ dst65 = dst109;
+ dst66 = (v8i16) __msa_splati_d((v2i64) dst108, 1);
+ }
}
static void hevc_hv_bi_8t_16w_msa(uint8_t *src0_ptr,
@@ -1856,7 +2106,7 @@ static void hevc_hv_bi_8t_16w_msa(uint8_t *src0_ptr,
const int8_t *filter_y,
int32_t height)
{
- hevc_hv_bi_8t_8multx2mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
+ hevc_hv_bi_8t_8multx1mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
dst, dst_stride, filter_x, filter_y,
height, 16);
}
@@ -1871,7 +2121,7 @@ static void hevc_hv_bi_8t_24w_msa(uint8_t *src0_ptr,
const int8_t *filter_y,
int32_t height)
{
- hevc_hv_bi_8t_8multx2mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
+ hevc_hv_bi_8t_8multx1mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
dst, dst_stride, filter_x, filter_y,
height, 24);
}
@@ -1886,7 +2136,7 @@ static void hevc_hv_bi_8t_32w_msa(uint8_t *src0_ptr,
const int8_t *filter_y,
int32_t height)
{
- hevc_hv_bi_8t_8multx2mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
+ hevc_hv_bi_8t_8multx1mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
dst, dst_stride, filter_x, filter_y,
height, 32);
}
@@ -1901,7 +2151,7 @@ static void hevc_hv_bi_8t_48w_msa(uint8_t *src0_ptr,
const int8_t *filter_y,
int32_t height)
{
- hevc_hv_bi_8t_8multx2mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
+ hevc_hv_bi_8t_8multx1mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
dst, dst_stride, filter_x, filter_y,
height, 48);
}
@@ -1916,7 +2166,7 @@ static void hevc_hv_bi_8t_64w_msa(uint8_t *src0_ptr,
const int8_t *filter_y,
int32_t height)
{
- hevc_hv_bi_8t_8multx2mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
+ hevc_hv_bi_8t_8multx1mult_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
dst, dst_stride, filter_x, filter_y,
height, 64);
}