@@ -80,6 +80,15 @@
out_m; \
} )
+#define HEVC_FILT_4TAP_SH(in0, in1, filt0, filt1) \
+( { \
+ v8i16 out_m; \
+ \
+ out_m = __msa_dotp_s_h((v16i8) in0, (v16i8) filt0); \
+ out_m = __msa_dpadd_s_h(out_m, (v16i8) in1, (v16i8) filt1); \
+ out_m; \
+} )
+
#define HEVC_FILT_4TAP(in0, in1, filt0, filt1) \
( { \
v4i32 out_m; \
@@ -1801,40 +1801,42 @@ static void hevc_hv_uniwgt_8t_4w_msa(uint8_t *src,
int32_t rnd_val)
{
uint32_t loop_cnt;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v16u8 out;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
v8i16 filt0, filt1, filt2, filt3;
- v4i32 filt_h0, filt_h1, filt_h2, filt_h3;
+ v8i16 filt_h0, filt_h1, filt_h2, filt_h3;
v16i8 mask1, mask2, mask3;
- v8i16 filter_vec, const_vec;
+ v8i16 filter_vec;
v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
- v8i16 dst30, dst41, dst52, dst63, dst66, dst87;
- v4i32 dst0_r, dst1_r, weight_vec, offset_vec, rnd_vec;
- v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
- v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20 };
- v8u16 mask4 = { 0, 4, 1, 5, 2, 6, 3, 7 };
+ v8i16 dst30, dst41, dst52, dst63, dst66, dst97, dst108;
+ v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r;
+ v8i16 dst21_r, dst43_r, dst65_r, dst87_r, dst109_r;
+ v4i32 dst0_r, dst1_r, dst2_r, dst3_r;
+ v4i32 weight_vec, offset_vec, rnd_vec, const_128, denom_vec;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
src -= ((3 * src_stride) + 3);
filter_vec = LD_SH(filter_x);
SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W4_SW(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
+ SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
- const_vec = __msa_ldi_h(128);
- const_vec <<= 6;
-
weight_vec = __msa_fill_w(weight);
offset_vec = __msa_fill_w(offset);
rnd_vec = __msa_fill_w(rnd_val);
+ denom_vec = rnd_vec - 6;
+
+ const_128 = __msa_ldi_w(128);
+ const_128 *= weight_vec;
+ offset_vec += __msa_srar_w(const_128, denom_vec);
LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
src += (7 * src_stride);
@@ -1847,64 +1849,68 @@ static void hevc_hv_uniwgt_8t_4w_msa(uint8_t *src,
vec8, vec9, vec10, vec11);
VSHF_B4_SB(src3, src6, mask0, mask1, mask2, mask3,
vec12, vec13, vec14, vec15);
- dst30 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst30, dst30, dst30, dst30);
- dst41 = const_vec;
- DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
- dst41, dst41, dst41, dst41);
- dst52 = const_vec;
- DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
- dst52, dst52, dst52, dst52);
- dst63 = const_vec;
- DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3,
- dst63, dst63, dst63, dst63);
-
- ILVR_H3_SH(dst41, dst30, dst52, dst41, dst63, dst52,
- dst10_r, dst21_r, dst32_r);
-
- dst43_r = __msa_ilvl_h(dst41, dst30);
- dst54_r = __msa_ilvl_h(dst52, dst41);
- dst65_r = __msa_ilvl_h(dst63, dst52);
+ dst30 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst41 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+ dst52 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
+ filt3);
+ dst63 = HEVC_FILT_8TAP_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2,
+ filt3);
+
+ ILVRL_H2_SH(dst41, dst30, dst10_r, dst43_r);
+ ILVRL_H2_SH(dst52, dst41, dst21_r, dst54_r);
+ ILVRL_H2_SH(dst63, dst52, dst32_r, dst65_r);
dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
- for (loop_cnt = height >> 1; loop_cnt--;) {
- LD_SB2(src, src_stride, src7, src8);
- src += (2 * src_stride);
- XORI_B2_128_SB(src7, src8);
+ for (loop_cnt = height >> 2; loop_cnt--;) {
+ LD_SB4(src, src_stride, src7, src8, src9, src10);
+ src += (4 * src_stride);
+ XORI_B4_128_SB(src7, src8, src9, src10);
- VSHF_B4_SB(src7, src8, mask0, mask1, mask2, mask3,
+ VSHF_B4_SB(src7, src9, mask0, mask1, mask2, mask3,
vec0, vec1, vec2, vec3);
- dst87 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst87, dst87, dst87, dst87);
- dst76_r = __msa_ilvr_h(dst87, dst66);
- dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r,
- filt_h0, filt_h1, filt_h2, filt_h3);
- dst87_r = __msa_vshf_h((v8i16) mask4, dst87, dst87);
- dst1_r = HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r,
- filt_h0, filt_h1, filt_h2, filt_h3);
-
- dst0_r >>= 6;
- dst1_r >>= 6;
+ VSHF_B4_SB(src8, src10, mask0, mask1, mask2, mask3,
+ vec4, vec5, vec6, vec7);
+ dst97 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst108 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+
+ dst76_r = __msa_ilvr_h(dst97, dst66);
+ ILVRL_H2_SH(dst108, dst97, dst87_r, dst109_r);
+ dst66 = (v8i16) __msa_splati_d((v2i64) dst97, 1);
+ dst98_r = __msa_ilvr_h(dst66, dst108);
+
+ dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r, filt_h0,
+ filt_h1, filt_h2, filt_h3);
+ dst1_r = HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r, filt_h0,
+ filt_h1, filt_h2, filt_h3);
+ dst2_r = HEVC_FILT_8TAP(dst32_r, dst54_r, dst76_r, dst98_r, filt_h0,
+ filt_h1, filt_h2, filt_h3);
+ dst3_r = HEVC_FILT_8TAP(dst43_r, dst65_r, dst87_r, dst109_r, filt_h0,
+ filt_h1, filt_h2, filt_h3);
+
+ SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
- SRAR_W2_SW(dst0_r, dst1_r, rnd_vec);
+ MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
+ SRAR_W4_SW(dst0_r, dst1_r, dst2_r, dst3_r, rnd_vec);
ADD2(dst0_r, offset_vec, dst1_r, offset_vec, dst0_r, dst1_r);
- dst0_r = CLIP_SW_0_255(dst0_r);
- dst1_r = CLIP_SW_0_255(dst1_r);
-
- HEVC_PCK_SW_SB2(dst1_r, dst0_r, dst0_r);
- ST4x2_UB(dst0_r, dst, dst_stride);
- dst += (2 * dst_stride);
+ ADD2(dst2_r, offset_vec, dst3_r, offset_vec, dst2_r, dst3_r);
+ CLIP_SW4_0_255_MAX_SATU(dst0_r, dst1_r, dst2_r, dst3_r);
+ PCKEV_H2_SW(dst1_r, dst0_r, dst3_r, dst2_r, dst0_r, dst1_r);
+ out = (v16u8) __msa_pckev_b((v16i8) dst1_r, (v16i8) dst0_r);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
- dst10_r = dst32_r;
- dst32_r = dst54_r;
- dst54_r = dst76_r;
- dst21_r = dst43_r;
- dst43_r = dst65_r;
- dst65_r = dst87_r;
- dst66 = (v8i16) __msa_splati_d((v2i64) dst87, 1);
+ dst10_r = dst54_r;
+ dst32_r = dst76_r;
+ dst54_r = dst98_r;
+ dst21_r = dst65_r;
+ dst43_r = dst87_r;
+ dst65_r = dst109_r;
+ dst66 = (v8i16) __msa_splati_d((v2i64) dst108, 1);
}
}
@@ -1927,7 +1933,7 @@ static void hevc_hv_uniwgt_8t_8multx2mult_msa(uint8_t *src,
v8i16 filt0, filt1, filt2, filt3;
v4i32 filt_h0, filt_h1, filt_h2, filt_h3;
v16i8 mask1, mask2, mask3;
- v8i16 filter_vec, const_vec;
+ v8i16 filter_vec;
v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
@@ -1936,23 +1942,25 @@ static void hevc_hv_uniwgt_8t_8multx2mult_msa(uint8_t *src,
v8i16 dst10_l, dst32_l, dst54_l, dst76_l;
v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
v8i16 dst21_l, dst43_l, dst65_l, dst87_l;
- v4i32 weight_vec, offset_vec, rnd_vec;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v4i32 weight_vec, offset_vec, rnd_vec, const_128, denom_vec;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
src -= ((3 * src_stride) + 3);
- const_vec = __msa_ldi_h(128);
- const_vec <<= 6;
weight_vec = __msa_fill_w(weight);
offset_vec = __msa_fill_w(offset);
rnd_vec = __msa_fill_w(rnd_val);
+ denom_vec = rnd_vec - 6;
+
+ const_128 = __msa_ldi_w(128);
+ const_128 *= weight_vec;
+ offset_vec += __msa_srar_w(const_128, denom_vec);
filter_vec = LD_SH(filter_x);
SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
SPLATI_W4_SW(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
mask1 = mask0 + 2;
@@ -1975,18 +1983,14 @@ static void hevc_hv_uniwgt_8t_8multx2mult_msa(uint8_t *src,
vec8, vec9, vec10, vec11);
VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
vec12, vec13, vec14, vec15);
- dst0 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst0, dst0, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
- dst1, dst1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
- dst2, dst2, dst2, dst2);
- dst3 = const_vec;
- DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3,
- dst3, dst3, dst3, dst3);
+ dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst1 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+ dst2 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
+ filt3);
+ dst3 = HEVC_FILT_8TAP_SH(vec12, vec13, vec14, vec15, filt0, filt1,
+ filt2, filt3);
VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
vec0, vec1, vec2, vec3);
@@ -1994,15 +1998,12 @@ static void hevc_hv_uniwgt_8t_8multx2mult_msa(uint8_t *src,
vec4, vec5, vec6, vec7);
VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3,
vec8, vec9, vec10, vec11);
- dst4 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst4, dst4, dst4, dst4);
- dst5 = const_vec;
- DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
- dst5, dst5, dst5, dst5);
- dst6 = const_vec;
- DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
- dst6, dst6, dst6, dst6);
+ dst4 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst5 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+ dst6 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
+ filt3);
ILVR_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst2, dst1,
dst10_r, dst32_r, dst54_r, dst21_r);
@@ -2018,9 +2019,8 @@ static void hevc_hv_uniwgt_8t_8multx2mult_msa(uint8_t *src,
VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3,
vec0, vec1, vec2, vec3);
- dst7 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst7, dst7, dst7, dst7);
+ dst7 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1,
+ filt2, filt3);
ILVRL_H2_SH(dst7, dst6, dst76_r, dst76_l);
dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r,
@@ -2033,9 +2033,8 @@ static void hevc_hv_uniwgt_8t_8multx2mult_msa(uint8_t *src,
/* row 8 */
VSHF_B4_SB(src8, src8, mask0, mask1, mask2, mask3,
vec0, vec1, vec2, vec3);
- dst8 = const_vec;
- DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
- dst8, dst8, dst8, dst8);
+ dst8 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1,
+ filt2, filt3);
ILVRL_H2_SH(dst8, dst7, dst87_r, dst87_l);
dst1_r = HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r,
@@ -2045,11 +2044,15 @@ static void hevc_hv_uniwgt_8t_8multx2mult_msa(uint8_t *src,
dst1_r >>= 6;
dst1_l >>= 6;
- HEVC_HV_UNIW_RND_CLIP4(dst0_r, dst1_r, dst0_l, dst1_l,
- weight_vec, offset_vec, rnd_vec,
- dst0_r, dst1_r, dst0_l, dst1_l);
+ MUL2(dst0_r, weight_vec, dst0_l, weight_vec, dst0_r, dst0_l);
+ MUL2(dst1_r, weight_vec, dst1_l, weight_vec, dst1_r, dst1_l);
+ SRAR_W4_SW(dst0_r, dst1_r, dst0_l, dst1_l, rnd_vec);
+ ADD2(dst0_r, offset_vec, dst0_l, offset_vec, dst0_r, dst0_l);
+ ADD2(dst1_r, offset_vec, dst1_l, offset_vec, dst1_r, dst1_l);
+ CLIP_SW4_0_255_MAX_SATU(dst0_r, dst1_r, dst0_l, dst1_l);
- HEVC_PCK_SW_SB4(dst0_l, dst0_r, dst1_l, dst1_r, dst0_r);
+ PCKEV_H2_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst0_r, dst1_r);
+ dst0_r = (v4i32) __msa_pckev_b((v16i8) dst1_r, (v16i8) dst0_r);
ST8x2_UB(dst0_r, dst_tmp, dst_stride);
dst_tmp += (2 * dst_stride);
@@ -2100,12 +2103,198 @@ static void hevc_hv_uniwgt_8t_12w_msa(uint8_t *src,
int32_t offset,
int32_t rnd_val)
{
- hevc_hv_uniwgt_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
- filter_x, filter_y, height, weight,
- offset, rnd_val, 8);
- hevc_hv_uniwgt_8t_4w_msa(src + 8, src_stride, dst + 8, dst_stride,
- filter_x, filter_y, height, weight, offset,
- rnd_val);
+ uint32_t loop_cnt;
+ uint8_t *src_tmp, *dst_tmp;
+ v16u8 out;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
+ v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ v8i16 dst30, dst41, dst52, dst63, dst66, dst97, dst108;
+ v8i16 filt0, filt1, filt2, filt3, filt_h0, filt_h1, filt_h2, filt_h3;
+ v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst10_l, dst32_l, dst54_l;
+ v8i16 dst98_r, dst21_r, dst43_r, dst65_r, dst87_r, dst109_r;
+ v8i16 dst76_l, filter_vec;
+ v4i32 dst0_r, dst0_l, dst1_r, dst2_r, dst3_r;
+ v4i32 weight_vec, offset_vec, rnd_vec, const_128, denom_vec;
+
+ src -= ((3 * src_stride) + 3);
+
+ filter_vec = LD_SH(filter_x);
+ SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
+
+ filter_vec = LD_SH(filter_y);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
+
+ SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
+
+ weight_vec = __msa_fill_w(weight);
+ offset_vec = __msa_fill_w(offset);
+ rnd_vec = __msa_fill_w(rnd_val);
+ denom_vec = rnd_vec - 6;
+
+ const_128 = __msa_ldi_w(128);
+ const_128 *= weight_vec;
+ offset_vec += __msa_srar_w(const_128, denom_vec);
+
+ mask0 = LD_SB(ff_hevc_mask_arr);
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+ mask3 = mask0 + 6;
+
+ src_tmp = src;
+ dst_tmp = dst;
+
+ LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
+ src_tmp += (7 * src_stride);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+
+ /* row 0 row 1 row 2 row 3 */
+ VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
+ VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
+ VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3, vec8, vec9, vec10,
+ vec11);
+ VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3, vec12, vec13, vec14,
+ vec15);
+ dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst1 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+ dst2 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
+ filt3);
+ dst3 = HEVC_FILT_8TAP_SH(vec12, vec13, vec14, vec15, filt0, filt1,
+ filt2, filt3);
+ VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
+ VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
+ VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3, vec8, vec9, vec10,
+ vec11);
+ dst4 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst5 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+ dst6 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
+ filt3);
+
+ for (loop_cnt = 16; loop_cnt--;) {
+ src7 = LD_SB(src_tmp);
+ src7 = (v16i8) __msa_xori_b((v16u8) src7, 128);
+ src_tmp += src_stride;
+
+ VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
+ vec3);
+ dst7 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
+ ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
+ ILVRL_H2_SH(dst7, dst6, dst76_r, dst76_l);
+
+ dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r,
+ filt_h0, filt_h1, filt_h2, filt_h3);
+ dst0_l = HEVC_FILT_8TAP(dst10_l, dst32_l, dst54_l, dst76_l,
+ filt_h0, filt_h1, filt_h2, filt_h3);
+ dst0_r >>= 6;
+ dst0_l >>= 6;
+
+ MUL2(dst0_r, weight_vec, dst0_l, weight_vec, dst0_r, dst0_l);
+ SRAR_W2_SW(dst0_r, dst0_l, rnd_vec);
+ ADD2(dst0_r, offset_vec, dst0_l, offset_vec, dst0_r, dst0_l);
+ CLIP_SW2_0_255_MAX_SATU(dst0_r, dst0_l);
+ dst0_r = (v4i32) __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
+ out = (v16u8) __msa_pckev_b((v16i8) dst0_r, (v16i8) dst0_r);
+ ST8x1_UB(out, dst_tmp);
+ dst_tmp += dst_stride;
+
+ dst0 = dst1;
+ dst1 = dst2;
+ dst2 = dst3;
+ dst3 = dst4;
+ dst4 = dst5;
+ dst5 = dst6;
+ dst6 = dst7;
+ }
+
+ src += 8;
+ dst += 8;
+
+ mask4 = LD_SB(ff_hevc_mask_arr + 16);
+ mask5 = mask4 + 2;
+ mask6 = mask4 + 4;
+ mask7 = mask4 + 6;
+
+ LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
+ src += (7 * src_stride);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+
+ VSHF_B4_SB(src0, src3, mask4, mask5, mask6, mask7, vec0, vec1, vec2, vec3);
+ VSHF_B4_SB(src1, src4, mask4, mask5, mask6, mask7, vec4, vec5, vec6, vec7);
+ VSHF_B4_SB(src2, src5, mask4, mask5, mask6, mask7, vec8, vec9, vec10,
+ vec11);
+ VSHF_B4_SB(src3, src6, mask4, mask5, mask6, mask7, vec12, vec13, vec14,
+ vec15);
+ dst30 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst41 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+ dst52 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
+ filt3);
+ dst63 = HEVC_FILT_8TAP_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2,
+ filt3);
+ ILVRL_H2_SH(dst41, dst30, dst10_r, dst43_r);
+ ILVRL_H2_SH(dst52, dst41, dst21_r, dst54_r);
+ ILVRL_H2_SH(dst63, dst52, dst32_r, dst65_r);
+
+ dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
+
+ for (loop_cnt = 4; loop_cnt--;) {
+ LD_SB4(src, src_stride, src7, src8, src9, src10);
+ src += (4 * src_stride);
+ XORI_B4_128_SB(src7, src8, src9, src10);
+
+ VSHF_B4_SB(src7, src9, mask4, mask5, mask6, mask7, vec0, vec1, vec2,
+ vec3);
+ VSHF_B4_SB(src8, src10, mask4, mask5, mask6, mask7, vec4, vec5, vec6,
+ vec7);
+ dst97 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
+ filt3);
+ dst108 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
+ filt3);
+
+ dst76_r = __msa_ilvr_h(dst97, dst66);
+ ILVRL_H2_SH(dst108, dst97, dst87_r, dst109_r);
+ dst66 = (v8i16) __msa_splati_d((v2i64) dst97, 1);
+ dst98_r = __msa_ilvr_h(dst66, dst108);
+
+ dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r, filt_h0,
+ filt_h1, filt_h2, filt_h3);
+ dst1_r = HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r, filt_h0,
+ filt_h1, filt_h2, filt_h3);
+ dst2_r = HEVC_FILT_8TAP(dst32_r, dst54_r, dst76_r, dst98_r, filt_h0,
+ filt_h1, filt_h2, filt_h3);
+ dst3_r = HEVC_FILT_8TAP(dst43_r, dst65_r, dst87_r, dst109_r, filt_h0,
+ filt_h1, filt_h2, filt_h3);
+
+ SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
+ MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
+ MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
+ SRAR_W4_SW(dst0_r, dst1_r, dst2_r, dst3_r, rnd_vec);
+ ADD2(dst0_r, offset_vec, dst1_r, offset_vec, dst0_r, dst1_r);
+ ADD2(dst2_r, offset_vec, dst3_r, offset_vec, dst2_r, dst3_r);
+ CLIP_SW4_0_255_MAX_SATU(dst0_r, dst1_r, dst2_r, dst3_r);
+ PCKEV_H2_SW(dst1_r, dst0_r, dst3_r, dst2_r, dst0_r, dst1_r);
+ out = (v16u8) __msa_pckev_b((v16i8) dst1_r, (v16i8) dst0_r);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
+ dst += (4 * dst_stride);
+
+ dst10_r = dst54_r;
+ dst32_r = dst76_r;
+ dst54_r = dst98_r;
+ dst21_r = dst65_r;
+ dst43_r = dst87_r;
+ dst65_r = dst109_r;
+ dst66 = (v8i16) __msa_splati_d((v2i64) dst108, 1);
+ }
}
static void hevc_hv_uniwgt_8t_16w_msa(uint8_t *src,
@@ -3977,22 +4166,20 @@ static void hevc_hv_uniwgt_4t_4x2_msa(uint8_t *src,
int32_t dst_stride,
const int8_t *filter_x,
const int8_t *filter_y,
- int32_t height,
int32_t weight,
int32_t offset,
int32_t rnd_val)
{
+ v16u8 out;
v16i8 src0, src1, src2, src3, src4;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
+ v8i16 filt_h0, filt_h1, filter_vec, tmp;
v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
- v8i16 dst0, dst1, dst2, dst3, dst4;
- v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
- v4i32 dst0_r, dst1_r;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v8i16 dst20, dst31, dst42, dst10, dst32, dst21, dst43;
+ v8i16 offset_vec, const_128, denom_vec;
+ v4i32 dst0, dst1, weight_vec, rnd_vec;
src -= (src_stride + 1);
@@ -4000,63 +4187,41 @@ static void hevc_hv_uniwgt_4t_4x2_msa(uint8_t *src,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
- const_vec = __msa_ldi_h(128);
- const_vec <<= 6;
-
weight_vec = __msa_fill_w(weight);
- offset_vec = __msa_fill_w(offset);
rnd_vec = __msa_fill_w(rnd_val);
- LD_SB3(src, src_stride, src0, src1, src2);
- src += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
-
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
-
- ILVR_H2_SH(dst1, dst0, dst2, dst1, dst10_r, dst21_r);
- LD_SB2(src, src_stride, src3, src4);
- XORI_B2_128_SB(src3, src4);
-
- /* row 3 */
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
-
- dst32_r = __msa_ilvr_h(dst3, dst2);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_r >>= 6;
-
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
-
- dst43_r = __msa_ilvr_h(dst4, dst3);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_r >>= 6;
-
- MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
- SRAR_W2_SW(dst0_r, dst1_r, rnd_vec);
- ADD2(dst0_r, offset_vec, dst1_r, offset_vec, dst0_r, dst1_r);
- dst0_r = CLIP_SW_0_255(dst0_r);
- dst1_r = CLIP_SW_0_255(dst1_r);
-
- HEVC_PCK_SW_SB2(dst1_r, dst0_r, dst0_r);
- ST4x2_UB(dst0_r, dst, dst_stride);
+ offset_vec = __msa_fill_h(offset);
+ denom_vec = __msa_fill_h(rnd_val - 6);
+ const_128 = __msa_fill_h((128 * weight));
+ offset_vec += __msa_srar_h(const_128, denom_vec);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ VSHF_B2_SB(src0, src2, src0, src2, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src1, src3, src1, src3, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src2, src4, src2, src4, mask0, mask1, vec4, vec5);
+ dst20 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst31 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst42 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ ILVRL_H2_SH(dst31, dst20, dst10, dst32);
+ ILVRL_H2_SH(dst42, dst31, dst21, dst43);
+ dst0 = HEVC_FILT_4TAP(dst10, dst32, filt_h0, filt_h1);
+ dst1 = HEVC_FILT_4TAP(dst21, dst43, filt_h0, filt_h1);
+ dst0 >>= 6;
+ dst1 >>= 6;
+ MUL2(dst0, weight_vec, dst1, weight_vec, dst0, dst1);
+ SRAR_W2_SW(dst0, dst1, rnd_vec);
+ tmp = __msa_pckev_h((v8i16) dst1, (v8i16) dst0);
+ tmp += offset_vec;
+ tmp = CLIP_SH_0_255_MAX_SATU(tmp);
+ out = (v16u8) __msa_pckev_b((v16i8) tmp, (v16i8) tmp);
+ ST4x2_UB(out, dst, dst_stride);
}
static void hevc_hv_uniwgt_4t_4x4_msa(uint8_t *src,
@@ -4065,22 +4230,20 @@ static void hevc_hv_uniwgt_4t_4x4_msa(uint8_t *src,
int32_t dst_stride,
const int8_t *filter_x,
const int8_t *filter_y,
- int32_t height,
int32_t weight,
int32_t offset,
int32_t rnd_val)
{
+ v16u8 out;
v16i8 src0, src1, src2, src3, src4, src5, src6;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v8i16 filt_h0, filt_h1, filter_vec, tmp0, tmp1;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
- v4i32 dst0_r, dst1_r, dst2_r, dst3_r;
- v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8i16 dst30, dst41, dst52, dst63, dst10, dst32, dst54, dst21, dst43, dst65;
+ v8i16 offset_vec, const_128, denom_vec;
+ v4i32 dst0, dst1, dst2, dst3, weight_vec, rnd_vec;
src -= (src_stride + 1);
@@ -4088,76 +4251,46 @@ static void hevc_hv_uniwgt_4t_4x4_msa(uint8_t *src,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
- const_vec = __msa_ldi_h(128);
- const_vec <<= 6;
-
weight_vec = __msa_fill_w(weight);
- offset_vec = __msa_fill_w(offset);
rnd_vec = __msa_fill_w(rnd_val);
- LD_SB3(src, src_stride, src0, src1, src2);
- src += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
-
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
-
- ILVR_H2_SH(dst1, dst0, dst2, dst1, dst10_r, dst21_r);
-
- LD_SB4(src, src_stride, src3, src4, src5, src6);
- XORI_B4_128_SB(src3, src4, src5, src6);
-
- /* row 3 */
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
- dst32_r = __msa_ilvr_h(dst3, dst2);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_r >>= 6;
-
- /* row 4 */
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
- dst43_r = __msa_ilvr_h(dst4, dst3);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_r >>= 6;
-
- /* row 5 */
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
- dst5 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst5, dst5);
- dst10_r = __msa_ilvr_h(dst5, dst4);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst10_r, filt_h0, filt_h1);
- dst2_r >>= 6;
+ offset_vec = __msa_fill_h(offset);
+ denom_vec = __msa_fill_h(rnd_val - 6);
+ const_128 = __msa_fill_h((128 * weight));
+ offset_vec += __msa_srar_h(const_128, denom_vec);
- /* row 6 */
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
- dst21_r = __msa_ilvr_h(dst2, dst5);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst21_r, filt_h0, filt_h1);
- dst3_r >>= 6;
-
- HEVC_HV_UNIW_RND_CLIP4(dst0_r, dst1_r, dst2_r, dst3_r,
- weight_vec, offset_vec, rnd_vec,
- dst0_r, dst1_r, dst2_r, dst3_r);
- HEVC_PCK_SW_SB4(dst1_r, dst0_r, dst3_r, dst2_r, dst0_r);
- ST4x4_UB(dst0_r, dst0_r, 0, 1, 2, 3, dst, dst_stride);
+ LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+ VSHF_B2_SB(src0, src3, src0, src3, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src1, src4, src1, src4, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src2, src5, src2, src5, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src3, src6, src3, src6, mask0, mask1, vec6, vec7);
+ dst30 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst41 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst52 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dst63 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+ ILVRL_H2_SH(dst41, dst30, dst10, dst43);
+ ILVRL_H2_SH(dst52, dst41, dst21, dst54);
+ ILVRL_H2_SH(dst63, dst52, dst32, dst65);
+ dst0 = HEVC_FILT_4TAP(dst10, dst32, filt_h0, filt_h1);
+ dst1 = HEVC_FILT_4TAP(dst21, dst43, filt_h0, filt_h1);
+ dst2 = HEVC_FILT_4TAP(dst32, dst54, filt_h0, filt_h1);
+ dst3 = HEVC_FILT_4TAP(dst43, dst65, filt_h0, filt_h1);
+ SRA_4V(dst0, dst1, dst2, dst3, 6);
+ MUL2(dst0, weight_vec, dst1, weight_vec, dst0, dst1);
+ MUL2(dst2, weight_vec, dst3, weight_vec, dst2, dst3);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp0, tmp1);
+ ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
+ CLIP_SH2_0_255_MAX_SATU(tmp0, tmp1);
+ out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
}
static void hevc_hv_uniwgt_4t_4multx8mult_msa(uint8_t *src,
@@ -4172,18 +4305,18 @@ static void hevc_hv_uniwgt_4t_4multx8mult_msa(uint8_t *src,
int32_t rnd_val)
{
uint32_t loop_cnt;
+ v16u8 out0, out1;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8, dst9;
- v4i32 dst0_r, dst1_r, dst2_r, dst3_r, dst4_r, dst5_r, dst6_r, dst7_r;
+ v8i16 filt_h0, filt_h1, filter_vec, tmp0, tmp1, tmp2, tmp3;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8i16 dst10, dst21, dst22, dst73, dst84, dst95, dst106;
v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v8i16 dst98_r, dst109_r, offset_vec, const_128, denom_vec;
+ v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, weight_vec, rnd_vec;
src -= (src_stride + 1);
@@ -4191,34 +4324,30 @@ static void hevc_hv_uniwgt_4t_4multx8mult_msa(uint8_t *src,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
- const_vec = __msa_ldi_h(128);
- const_vec <<= 6;
-
weight_vec = __msa_fill_w(weight);
- offset_vec = __msa_fill_w(offset);
rnd_vec = __msa_fill_w(rnd_val);
+ offset_vec = __msa_fill_h(offset);
+ denom_vec = __msa_fill_h(rnd_val - 6);
+ const_128 = __msa_fill_h((128 * weight));
+ offset_vec += __msa_srar_h(const_128, denom_vec);
+
LD_SB3(src, src_stride, src0, src1, src2);
src += (3 * src_stride);
XORI_B3_128_SB(src0, src1, src2);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
- ILVR_H2_SH(dst1, dst0, dst2, dst1, dst10_r, dst21_r);
+ VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src1, src2, src1, src2, mask0, mask1, vec2, vec3);
+ dst10 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst21 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ ILVRL_H2_SH(dst21, dst10, dst10_r, dst21_r);
+ dst22 = (v8i16) __msa_splati_d((v2i64) dst21, 1);
for (loop_cnt = height >> 3; loop_cnt--;) {
LD_SB8(src, src_stride,
@@ -4226,77 +4355,50 @@ static void hevc_hv_uniwgt_4t_4multx8mult_msa(uint8_t *src,
src += (8 * src_stride);
XORI_B8_128_SB(src3, src4, src5, src6, src7, src8, src9, src10);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
- dst32_r = __msa_ilvr_h(dst3, dst2);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_r >>= 6;
+ VSHF_B2_SB(src3, src7, src3, src7, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src4, src8, src4, src8, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src5, src9, src5, src9, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src6, src10, src6, src10, mask0, mask1, vec6, vec7);
+ dst73 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst84 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst95 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dst106 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+ dst32_r = __msa_ilvr_h(dst73, dst22);
+ ILVRL_H2_SH(dst84, dst73, dst43_r, dst87_r);
+ ILVRL_H2_SH(dst95, dst84, dst54_r, dst98_r);
+ ILVRL_H2_SH(dst106, dst95, dst65_r, dst109_r);
+ dst22 = (v8i16) __msa_splati_d((v2i64) dst73, 1);
+ dst76_r = __msa_ilvr_h(dst22, dst106);
+ dst0 = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
+ dst1 = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
+ dst2 = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
+ dst3 = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
+ dst4 = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
+ dst5 = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
+ dst6 = HEVC_FILT_4TAP(dst76_r, dst98_r, filt_h0, filt_h1);
+ dst7 = HEVC_FILT_4TAP(dst87_r, dst109_r, filt_h0, filt_h1);
+ SRA_4V(dst0, dst1, dst2, dst3, 6);
+ SRA_4V(dst4, dst5, dst6, dst7, 6);
+ MUL2(dst0, weight_vec, dst1, weight_vec, dst0, dst1);
+ MUL2(dst2, weight_vec, dst3, weight_vec, dst2, dst3);
+ MUL2(dst4, weight_vec, dst5, weight_vec, dst4, dst5);
+ MUL2(dst6, weight_vec, dst7, weight_vec, dst6, dst7);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
+ PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, tmp0, tmp1,
+ tmp2, tmp3);
+ ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
+ ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST4x8_UB(out0, out1, dst, dst_stride);
+ dst += (8 * dst_stride);
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
- dst43_r = __msa_ilvr_h(dst4, dst3);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_r >>= 6;
-
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
- dst5 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst5, dst5);
- dst54_r = __msa_ilvr_h(dst5, dst4);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
- dst2_r >>= 6;
-
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
- dst6 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst6, dst6);
- dst65_r = __msa_ilvr_h(dst6, dst5);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
- dst3_r >>= 6;
-
- VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
- dst7 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst7, dst7);
- dst76_r = __msa_ilvr_h(dst7, dst6);
- dst4_r = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
- dst4_r >>= 6;
-
- VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec0, vec1);
- dst8 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst8, dst8);
- dst87_r = __msa_ilvr_h(dst8, dst7);
- dst5_r = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
- dst5_r >>= 6;
-
- VSHF_B2_SB(src9, src9, src9, src9, mask0, mask1, vec0, vec1);
- dst9 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst9, dst9);
- dst10_r = __msa_ilvr_h(dst9, dst8);
- dst6_r = HEVC_FILT_4TAP(dst76_r, dst10_r, filt_h0, filt_h1);
- dst6_r >>= 6;
-
- VSHF_B2_SB(src10, src10, src10, src10, mask0, mask1, vec0, vec1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
- dst21_r = __msa_ilvr_h(dst2, dst9);
- dst7_r = HEVC_FILT_4TAP(dst87_r, dst21_r, filt_h0, filt_h1);
- dst7_r >>= 6;
-
- HEVC_HV_UNIW_RND_CLIP4(dst0_r, dst1_r, dst2_r, dst3_r,
- weight_vec, offset_vec, rnd_vec,
- dst0_r, dst1_r, dst2_r, dst3_r);
- HEVC_PCK_SW_SB4(dst1_r, dst0_r, dst3_r, dst2_r, dst0_r);
- ST4x4_UB(dst0_r, dst0_r, 0, 1, 2, 3, dst, dst_stride);
- dst += (4 * dst_stride);
-
- HEVC_HV_UNIW_RND_CLIP4(dst4_r, dst5_r, dst6_r, dst7_r,
- weight_vec, offset_vec, rnd_vec,
- dst4_r, dst5_r, dst6_r, dst7_r);
- HEVC_PCK_SW_SB4(dst5_r, dst4_r, dst7_r, dst6_r, dst0_r);
- ST4x4_UB(dst0_r, dst0_r, 0, 1, 2, 3, dst, dst_stride);
- dst += (4 * dst_stride);
- }
-}
+ dst10_r = dst98_r;
+ dst21_r = dst109_r;
+ dst22 = (v8i16) __msa_splati_d((v2i64) dst106, 1);
+ }
+}
static void hevc_hv_uniwgt_4t_4w_msa(uint8_t *src,
int32_t src_stride,
@@ -4311,11 +4413,11 @@ static void hevc_hv_uniwgt_4t_4w_msa(uint8_t *src,
{
if (2 == height) {
hevc_hv_uniwgt_4t_4x2_msa(src, src_stride, dst, dst_stride,
- filter_x, filter_y, height, weight,
+ filter_x, filter_y, weight,
offset, rnd_val);
} else if (4 == height) {
hevc_hv_uniwgt_4t_4x4_msa(src, src_stride, dst, dst_stride,
- filter_x, filter_y, height, weight,
+ filter_x,filter_y, weight,
offset, rnd_val);
} else if (0 == (height % 8)) {
hevc_hv_uniwgt_4t_4multx8mult_msa(src, src_stride, dst, dst_stride,
@@ -4335,20 +4437,22 @@ static void hevc_hv_uniwgt_4t_6w_msa(uint8_t *src,
int32_t offset,
int32_t rnd_val)
{
- uint32_t loop_cnt;
- v16i8 src0, src1, src2, src3, src4, src5, src6;
+ v16u8 out0, out1, out2;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
- v4i32 dst0_r, dst0_l, dst1_r, dst1_l;
- v4i32 weight_vec, offset_vec, rnd_vec;
- v4i32 dst2_r, dst2_l, dst3_r, dst3_l;
- v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
- v8i16 dst10_l, dst32_l, dst21_l, dst43_l;
+ v8i16 filt_h0, filt_h1, filter_vec;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6, dsth7, dsth8, dsth9;
+ v8i16 dsth10, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+ v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r, dst21_r, dst43_r;
+ v8i16 dst65_r, dst87_r, dst109_r, dst10_l, dst32_l, dst54_l, dst76_l;
+ v8i16 dst98_l, dst21_l, dst43_l, dst65_l, dst87_l, dst109_l;
+ v8i16 dst1021_l, dst3243_l, dst5465_l, dst7687_l, dst98109_l;
+ v8i16 offset_vec, const_128, denom_vec;
+ v4i32 dst0_r, dst1_r, dst2_r, dst3_r, dst4_r, dst5_r, dst6_r, dst7_r;
+ v4i32 dst0_l, dst1_l, dst2_l, dst3_l, weight_vec, rnd_vec;
src -= (src_stride + 1);
@@ -4356,20 +4460,20 @@ static void hevc_hv_uniwgt_4t_6w_msa(uint8_t *src,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
- const_vec = __msa_ldi_h(128);
- const_vec <<= 6;
-
weight_vec = __msa_fill_w(weight);
- offset_vec = __msa_fill_w(offset);
rnd_vec = __msa_fill_w(rnd_val);
+ offset_vec = __msa_fill_h(offset);
+ denom_vec = __msa_fill_h(rnd_val - 6);
+ const_128 = __msa_fill_h((128 * weight));
+ offset_vec += __msa_srar_h(const_128, denom_vec);
+
LD_SB3(src, src_stride, src0, src1, src2);
src += (3 * src_stride);
XORI_B3_128_SB(src0, src1, src2);
@@ -4377,72 +4481,78 @@ static void hevc_hv_uniwgt_4t_6w_msa(uint8_t *src,
VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
-
- ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
- ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
-
- for (loop_cnt = height >> 2; loop_cnt--;) {
- LD_SB4(src, src_stride, src3, src4, src5, src6);
- src += (4 * src_stride);
- XORI_B4_128_SB(src3, src4, src5, src6);
-
- /* row 3 */
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
- ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst0_r >>= 6;
- dst0_l >>= 6;
-
- /* row 4 */
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
- ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- dst1_r >>= 6;
- dst1_l >>= 6;
-
- /* row 5 */
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
- dst5 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst5, dst5);
- ILVRL_H2_SH(dst5, dst4, dst10_r, dst10_l);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst10_r, filt_h0, filt_h1);
- dst2_l = HEVC_FILT_4TAP(dst32_l, dst10_l, filt_h0, filt_h1);
- dst2_r >>= 6;
- dst2_l >>= 6;
-
- /* row 6 */
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
- ILVRL_H2_SH(dst2, dst5, dst21_r, dst21_l);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst21_r, filt_h0, filt_h1);
- dst3_l = HEVC_FILT_4TAP(dst43_l, dst21_l, filt_h0, filt_h1);
- dst3_r >>= 6;
- dst3_l >>= 6;
-
- HEVC_HV_UNIW_RND_CLIP4(dst0_r, dst1_r, dst0_l, dst1_l,
- weight_vec, offset_vec, rnd_vec,
- dst0_r, dst1_r, dst0_l, dst1_l);
- HEVC_HV_UNIW_RND_CLIP4(dst2_r, dst3_r, dst2_l, dst3_l,
- weight_vec, offset_vec, rnd_vec,
- dst2_r, dst3_r, dst2_l, dst3_l);
- HEVC_PCK_SW_SB8(dst0_l, dst0_r, dst1_l, dst1_r,
- dst2_l, dst2_r, dst3_l, dst3_r, dst0_r, dst1_r);
- ST6x4_UB(dst0_r, dst1_r, dst, dst_stride);
- dst += (4 * dst_stride);
- }
+ dsth0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
+ ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
+
+ LD_SB8(src, src_stride, src3, src4, src5, src6, src7, src8, src9, src10);
+ XORI_B8_128_SB(src3, src4, src5, src6, src7, src8, src9, src10);
+ VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
+ dsth3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth4 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth5 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dsth6 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+ VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src9, src9, src9, src9, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src10, src10, src10, src10, mask0, mask1, vec6, vec7);
+ dsth7 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth8 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth9 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dsth10 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+ ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
+ ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
+ ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
+ ILVRL_H2_SH(dsth7, dsth6, dst76_r, dst76_l);
+ ILVRL_H2_SH(dsth8, dsth7, dst87_r, dst87_l);
+ ILVRL_H2_SH(dsth9, dsth8, dst98_r, dst98_l);
+ ILVRL_H2_SH(dsth10, dsth9, dst109_r, dst109_l);
+ PCKEV_D2_SH(dst21_l, dst10_l, dst43_l, dst32_l, dst1021_l, dst3243_l);
+ PCKEV_D2_SH(dst65_l, dst54_l, dst87_l, dst76_l, dst5465_l, dst7687_l);
+ dst98109_l = (v8i16) __msa_pckev_d((v2i64) dst109_l, (v2i64) dst98_l);
+ dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
+ dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
+ dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
+ dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
+ dst4_r = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
+ dst5_r = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
+ dst6_r = HEVC_FILT_4TAP(dst76_r, dst98_r, filt_h0, filt_h1);
+ dst7_r = HEVC_FILT_4TAP(dst87_r, dst109_r, filt_h0, filt_h1);
+ dst0_l = HEVC_FILT_4TAP(dst1021_l, dst3243_l, filt_h0, filt_h1);
+ dst1_l = HEVC_FILT_4TAP(dst3243_l, dst5465_l, filt_h0, filt_h1);
+ dst2_l = HEVC_FILT_4TAP(dst5465_l, dst7687_l, filt_h0, filt_h1);
+ dst3_l = HEVC_FILT_4TAP(dst7687_l, dst98109_l, filt_h0, filt_h1);
+ SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
+ SRA_4V(dst4_r, dst5_r, dst6_r, dst7_r, 6);
+ SRA_4V(dst0_l, dst1_l, dst2_l, dst3_l, 6);
+ MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
+ MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
+ MUL2(dst4_r, weight_vec, dst5_r, weight_vec, dst4_r, dst5_r);
+ MUL2(dst6_r, weight_vec, dst7_r, weight_vec, dst6_r, dst7_r);
+ MUL2(dst0_l, weight_vec, dst1_l, weight_vec, dst0_l, dst1_l);
+ MUL2(dst2_l, weight_vec, dst3_l, weight_vec, dst2_l, dst3_l);
+ SRAR_W4_SW(dst0_r, dst1_r, dst2_r, dst3_r, rnd_vec);
+ SRAR_W4_SW(dst4_r, dst5_r, dst6_r, dst7_r, rnd_vec);
+ SRAR_W4_SW(dst0_l, dst1_l, dst2_l, dst3_l, rnd_vec);
+ PCKEV_H2_SH(dst1_r, dst0_r, dst3_r, dst2_r, tmp0, tmp1);
+ PCKEV_H2_SH(dst5_r, dst4_r, dst7_r, dst6_r, tmp2, tmp3);
+ PCKEV_H2_SH(dst1_l, dst0_l, dst3_l, dst2_l, tmp4, tmp5);
+ ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
+ ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
+ ADD2(tmp4, offset_vec, tmp5, offset_vec, tmp4, tmp5);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ CLIP_SH2_0_255_MAX_SATU(tmp4, tmp5);
+ PCKEV_B3_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, out0, out1, out2);
+ ST4x8_UB(out0, out1, dst, dst_stride);
+ ST2x4_UB(out2, 0, dst + 4, dst_stride);
+ dst += 4 * dst_stride;
+ ST2x4_UB(out2, 4, dst + 4, dst_stride);
}
static void hevc_hv_uniwgt_4t_8x2_msa(uint8_t *src,
@@ -4451,23 +4561,24 @@ static void hevc_hv_uniwgt_4t_8x2_msa(uint8_t *src,
int32_t dst_stride,
const int8_t *filter_x,
const int8_t *filter_y,
- int32_t height,
int32_t weight,
int32_t offset,
int32_t rnd_val)
{
+ v16u8 out;
v16i8 src0, src1, src2, src3, src4;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v8i16 filt_h0, filt_h1, filter_vec;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
v8i16 dst0, dst1, dst2, dst3, dst4;
v4i32 dst0_r, dst0_l, dst1_r, dst1_l;
v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
v8i16 dst10_l, dst32_l, dst21_l, dst43_l;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v8i16 tmp0, tmp1;
+ v8i16 offset_vec, const_128, denom_vec;
+ v4i32 weight_vec, rnd_vec;
src -= (src_stride + 1);
@@ -4475,65 +4586,144 @@ static void hevc_hv_uniwgt_4t_8x2_msa(uint8_t *src,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
- const_vec = __msa_ldi_h(128);
- const_vec <<= 6;
-
weight_vec = __msa_fill_w(weight);
- offset_vec = __msa_fill_w(offset);
rnd_vec = __msa_fill_w(rnd_val);
- LD_SB3(src, src_stride, src0, src1, src2);
- src += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
+ offset_vec = __msa_fill_h(offset);
+ denom_vec = __msa_fill_h(rnd_val - 6);
+ const_128 = __msa_fill_h((128 * weight));
+ offset_vec += __msa_srar_h(const_128, denom_vec);
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
-
+ VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
+ VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
+ dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dst3 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+ dst4 = HEVC_FILT_4TAP_SH(vec8, vec9, filt0, filt1);
ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
-
- LD_SB2(src, src_stride, src3, src4);
- src += (2 * src_stride);
- XORI_B2_128_SB(src3, src4);
-
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst0_r >>= 6;
- dst0_l >>= 6;
-
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
- ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- dst1_r >>= 6;
- dst1_l >>= 6;
+ SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
+ MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
+ MUL2(dst0_l, weight_vec, dst1_l, weight_vec, dst0_l, dst1_l);
+ SRAR_W4_SW(dst0_r, dst0_l, dst1_r, dst1_l, rnd_vec);
+ PCKEV_H2_SH(dst0_l, dst0_r, dst1_l, dst1_r, tmp0, tmp1);
+ ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
+ CLIP_SH2_0_255_MAX_SATU(tmp0, tmp1);
+ out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
+ ST8x2_UB(out, dst, dst_stride);
+}
- HEVC_HV_UNIW_RND_CLIP4(dst0_r, dst1_r, dst0_l, dst1_l,
- weight_vec, offset_vec, rnd_vec,
- dst0_r, dst1_r, dst0_l, dst1_l);
- HEVC_PCK_SW_SB4(dst0_l, dst0_r, dst1_l, dst1_r, dst0_r);
- ST8x2_UB(dst0_r, dst, dst_stride);
- dst += (2 * dst_stride);
+static void hevc_hv_uniwgt_4t_8multx4_msa(uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst,
+ int32_t dst_stride,
+ const int8_t *filter_x,
+ const int8_t *filter_y,
+ int32_t width8mult,
+ int32_t weight,
+ int32_t offset,
+ int32_t rnd_val)
+{
+ uint32_t cnt;
+ v16u8 out0, out1;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, mask0, mask1;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8i16 filt0, filt1, filt_h0, filt_h1, filter_vec;
+ v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, tmp0, tmp1, tmp2, tmp3;
+ v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
+ v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
+ v8i16 offset_vec, const_128, denom_vec;
+ v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
+ v4i32 weight_vec, rnd_vec;
+
+ src -= (src_stride + 1);
+
+ filter_vec = LD_SH(filter_x);
+ SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
+
+ filter_vec = LD_SH(filter_y);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
+
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
+
+ mask0 = LD_SB(ff_hevc_mask_arr);
+ mask1 = mask0 + 2;
+
+ weight_vec = __msa_fill_w(weight);
+ rnd_vec = __msa_fill_w(rnd_val);
+
+ offset_vec = __msa_fill_h(offset);
+ denom_vec = __msa_fill_h(rnd_val - 6);
+ const_128 = __msa_fill_h((128 * weight));
+ offset_vec += __msa_srar_h(const_128, denom_vec);
+
+ for (cnt = width8mult; cnt--;) {
+ LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
+ src += 8;
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+ VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
+ dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
+ ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
+ VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
+ dst3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst4 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst5 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dst6 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+ ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
+ ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
+ ILVRL_H2_SH(dst6, dst5, dst65_r, dst65_l);
+ dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
+ dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
+ dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
+ dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
+ dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
+ dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
+ dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
+ dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
+ SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
+ SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
+ MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
+ MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
+ MUL2(dst0_l, weight_vec, dst1_l, weight_vec, dst0_l, dst1_l);
+ MUL2(dst2_l, weight_vec, dst3_l, weight_vec, dst2_l, dst3_l);
+ SRAR_W4_SW(dst0_r, dst0_l, dst1_r, dst1_l, rnd_vec);
+ SRAR_W4_SW(dst2_r, dst2_l, dst3_r, dst3_l, rnd_vec);
+ PCKEV_H4_SH(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
+ dst3_r, tmp0, tmp1, tmp2, tmp3);
+ ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
+ ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += 8;
+ }
}
static void hevc_hv_uniwgt_4t_8x6_msa(uint8_t *src,
@@ -4542,26 +4732,27 @@ static void hevc_hv_uniwgt_4t_8x6_msa(uint8_t *src,
int32_t dst_stride,
const int8_t *filter_x,
const int8_t *filter_y,
- int32_t height,
int32_t weight,
int32_t offset,
int32_t rnd_val)
{
+ v16u8 out0, out1, out2;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v8i16 filt_h0, filt_h1, filter_vec;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
+ v16i8 vec10, vec11, vec12, vec13, vec14, vec15, vec16, vec17;
v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
- v4i32 dst4_r, dst4_l, dst5_r, dst5_l;
+ v4i32 dst4_r, dst4_l, dst5_r, dst5_l, weight_vec, rnd_vec;
v8i16 dst10_r, dst32_r, dst10_l, dst32_l;
v8i16 dst21_r, dst43_r, dst21_l, dst43_l;
v8i16 dst54_r, dst54_l, dst65_r, dst65_l;
v8i16 dst76_r, dst76_l, dst87_r, dst87_l;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+ v8i16 offset_vec, const_128, denom_vec;
src -= (src_stride + 1);
@@ -4569,126 +4760,87 @@ static void hevc_hv_uniwgt_4t_8x6_msa(uint8_t *src,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
- const_vec = __msa_ldi_h(128);
- const_vec <<= 6;
-
weight_vec = __msa_fill_w(weight);
- offset_vec = __msa_fill_w(offset);
rnd_vec = __msa_fill_w(rnd_val);
- LD_SB3(src, src_stride, src0, src1, src2);
- src += (3 * src_stride);
-
- XORI_B3_128_SB(src0, src1, src2);
-
+ offset_vec = __msa_fill_h(offset);
+ denom_vec = __msa_fill_h(rnd_val - 6);
+ const_128 = __msa_fill_h((128 * weight));
+ offset_vec += __msa_srar_h(const_128, denom_vec);
+
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
+ src += (5 * src_stride);
+ LD_SB4(src, src_stride, src5, src6, src7, src8);
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ XORI_B4_128_SB(src5, src6, src7, src8);
VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
-
+ VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
+ VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
+ VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec10, vec11);
+ VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec12, vec13);
+ VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec14, vec15);
+ VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec16, vec17);
+ dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dst3 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+ dst4 = HEVC_FILT_4TAP_SH(vec8, vec9, filt0, filt1);
+ dst5 = HEVC_FILT_4TAP_SH(vec10, vec11, filt0, filt1);
+ dst6 = HEVC_FILT_4TAP_SH(vec12, vec13, filt0, filt1);
+ dst7 = HEVC_FILT_4TAP_SH(vec14, vec15, filt0, filt1);
+ dst8 = HEVC_FILT_4TAP_SH(vec16, vec17, filt0, filt1);
ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
-
- LD_SB2(src, src_stride, src3, src4);
- src += (2 * src_stride);
- XORI_B2_128_SB(src3, src4);
-
- /* row 3 */
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
+ ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
+ ILVRL_H2_SH(dst6, dst5, dst65_r, dst65_l);
+ ILVRL_H2_SH(dst7, dst6, dst76_r, dst76_l);
+ ILVRL_H2_SH(dst8, dst7, dst87_r, dst87_l);
dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst0_r >>= 6;
- dst0_l >>= 6;
-
- /* row 4 */
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
- ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- dst1_r >>= 6;
- dst1_l >>= 6;
-
- LD_SB2(src, src_stride, src5, src6);
- src += (2 * src_stride);
- XORI_B2_128_SB(src5, src6);
-
- /* row 5 */
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
- dst5 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst5, dst5);
- ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
- dst2_r >>= 6;
- dst2_l >>= 6;
-
- /* row 6 */
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
- dst6 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst6, dst6);
- ILVRL_H2_SH(dst6, dst5, dst65_r, dst65_l);
dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
- dst3_r >>= 6;
- dst3_l >>= 6;
-
- LD_SB2(src, src_stride, src7, src8);
- src += (2 * src_stride);
- XORI_B2_128_SB(src7, src8);
-
- /* row 7 */
- VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
- dst7 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst7, dst7);
- ILVRL_H2_SH(dst7, dst6, dst76_r, dst76_l);
dst4_r = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
dst4_l = HEVC_FILT_4TAP(dst54_l, dst76_l, filt_h0, filt_h1);
-
- dst4_r >>= 6;
- dst4_l >>= 6;
-
- /* row 8 */
- VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec0, vec1);
- dst8 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst8, dst8);
- ILVRL_H2_SH(dst8, dst7, dst87_r, dst87_l);
dst5_r = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
dst5_l = HEVC_FILT_4TAP(dst65_l, dst87_l, filt_h0, filt_h1);
- dst5_r >>= 6;
- dst5_l >>= 6;
-
- HEVC_HV_UNIW_RND_CLIP4(dst0_r, dst1_r, dst0_l, dst1_l,
- weight_vec, offset_vec, rnd_vec,
- dst0_r, dst1_r, dst0_l, dst1_l);
- HEVC_HV_UNIW_RND_CLIP4(dst2_r, dst3_r, dst2_l, dst3_l,
- weight_vec, offset_vec, rnd_vec,
- dst2_r, dst3_r, dst2_l, dst3_l);
- HEVC_HV_UNIW_RND_CLIP4(dst4_r, dst5_r, dst4_l, dst5_l,
- weight_vec, offset_vec, rnd_vec,
- dst4_r, dst5_r, dst4_l, dst5_l);
- HEVC_PCK_SW_SB12(dst0_l, dst0_r, dst1_l, dst1_r,
- dst2_l, dst2_r, dst3_l, dst3_r,
- dst4_l, dst4_r, dst5_l, dst5_r, dst0_r, dst1_r, dst2_r);
- ST8x4_UB(dst0_r, dst1_r, dst, dst_stride);
+ SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
+ SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
+ SRA_4V(dst4_r, dst4_l, dst5_r, dst5_l, 6);
+ MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
+ MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
+ MUL2(dst4_r, weight_vec, dst5_r, weight_vec, dst4_r, dst5_r);
+ MUL2(dst0_l, weight_vec, dst1_l, weight_vec, dst0_l, dst1_l);
+ MUL2(dst2_l, weight_vec, dst3_l, weight_vec, dst2_l, dst3_l);
+ MUL2(dst4_l, weight_vec, dst5_l, weight_vec, dst4_l, dst5_l);
+ SRAR_W4_SW(dst0_r, dst0_l, dst1_r, dst1_l, rnd_vec);
+ SRAR_W4_SW(dst2_r, dst2_l, dst3_r, dst3_l, rnd_vec);
+ SRAR_W4_SW(dst4_r, dst4_l, dst5_r, dst5_l, rnd_vec);
+ PCKEV_H4_SH(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l, dst3_r,
+ tmp0, tmp1, tmp2, tmp3);
+ PCKEV_H2_SH(dst4_l, dst4_r, dst5_l, dst5_r, tmp4, tmp5);
+ ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
+ ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
+ ADD2(tmp4, offset_vec, tmp5, offset_vec, tmp4, tmp5);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ CLIP_SH2_0_255_MAX_SATU(tmp4, tmp5);
+ PCKEV_B3_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, out0, out1, out2);
+ ST8x4_UB(out0, out1, dst, dst_stride);
dst += (4 * dst_stride);
- ST8x2_UB(dst2_r, dst, dst_stride);
+ ST8x2_UB(out2, dst, dst_stride);
}
static void hevc_hv_uniwgt_4t_8multx4mult_msa(uint8_t *src,
@@ -4701,24 +4853,25 @@ static void hevc_hv_uniwgt_4t_8multx4mult_msa(uint8_t *src,
int32_t weight,
int32_t offset,
int32_t rnd_val,
- int32_t width)
+ int32_t width8mult)
{
uint32_t loop_cnt, cnt;
uint8_t *src_tmp;
uint8_t *dst_tmp;
+ v16u8 out0, out1;
v16i8 src0, src1, src2, src3, src4, src5, src6;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v8i16 filt_h0, filt_h1, filter_vec;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, tmp0, tmp1, tmp2, tmp3;
+ v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
+ v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
v4i32 dst0_r, dst0_l, dst1_r, dst1_l;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v8i16 offset_vec, const_128, denom_vec;
v4i32 dst2_r, dst2_l, dst3_r, dst3_l;
- v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
- v8i16 dst10_l, dst32_l, dst21_l, dst43_l;
+ v4i32 weight_vec, rnd_vec;
src -= (src_stride + 1);
@@ -4726,21 +4879,21 @@ static void hevc_hv_uniwgt_4t_8multx4mult_msa(uint8_t *src,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
- const_vec = __msa_ldi_h(128);
- const_vec <<= 6;
-
weight_vec = __msa_fill_w(weight);
- offset_vec = __msa_fill_w(offset);
rnd_vec = __msa_fill_w(rnd_val);
- for (cnt = width >> 3; cnt--;) {
+ offset_vec = __msa_fill_h(offset);
+ denom_vec = __msa_fill_h(rnd_val - 6);
+ const_128 = __msa_fill_h((128 * weight));
+ offset_vec += __msa_srar_h(const_128, denom_vec);
+
+ for (cnt = width8mult; cnt--;) {
src_tmp = src;
dst_tmp = dst;
@@ -4751,12 +4904,9 @@ static void hevc_hv_uniwgt_4t_8multx4mult_msa(uint8_t *src,
VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
+ dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
@@ -4767,51 +4917,47 @@ static void hevc_hv_uniwgt_4t_8multx4mult_msa(uint8_t *src,
XORI_B4_128_SB(src3, src4, src5, src6);
VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
+ VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
+ dst3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst4 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst5 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dst6 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
+ ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
+ ILVRL_H2_SH(dst6, dst5, dst65_r, dst65_l);
dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst0_r >>= 6;
- dst0_l >>= 6;
-
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
- ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- dst1_r >>= 6;
- dst1_l >>= 6;
-
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
- dst5 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst5, dst5);
- ILVRL_H2_SH(dst5, dst4, dst10_r, dst10_l);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst10_r, filt_h0, filt_h1);
- dst2_l = HEVC_FILT_4TAP(dst32_l, dst10_l, filt_h0, filt_h1);
- dst2_r >>= 6;
- dst2_l >>= 6;
-
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
- ILVRL_H2_SH(dst2, dst5, dst21_r, dst21_l);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst21_r, filt_h0, filt_h1);
- dst3_l = HEVC_FILT_4TAP(dst43_l, dst21_l, filt_h0, filt_h1);
- dst3_r >>= 6;
- dst3_l >>= 6;
-
- HEVC_HV_UNIW_RND_CLIP4(dst0_r, dst1_r, dst0_l, dst1_l,
- weight_vec, offset_vec, rnd_vec,
- dst0_r, dst1_r, dst0_l, dst1_l);
- HEVC_HV_UNIW_RND_CLIP4(dst2_r, dst3_r, dst2_l, dst3_l,
- weight_vec, offset_vec, rnd_vec,
- dst2_r, dst3_r, dst2_l, dst3_l);
- HEVC_PCK_SW_SB8(dst0_l, dst0_r, dst1_l, dst1_r,
- dst2_l, dst2_r, dst3_l, dst3_r, dst0_r, dst1_r);
- ST8x4_UB(dst0_r, dst1_r, dst_tmp, dst_stride);
+ dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
+ dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
+ dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
+ dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
+ SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
+ SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
+ MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
+ MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
+ MUL2(dst0_l, weight_vec, dst1_l, weight_vec, dst0_l, dst1_l);
+ MUL2(dst2_l, weight_vec, dst3_l, weight_vec, dst2_l, dst3_l);
+ SRAR_W4_SW(dst0_r, dst0_l, dst1_r, dst1_l, rnd_vec);
+ SRAR_W4_SW(dst2_r, dst2_l, dst3_r, dst3_l, rnd_vec);
+ PCKEV_H4_SH(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
+ dst3_r, tmp0, tmp1, tmp2, tmp3);
+ ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
+ ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst_tmp, dst_stride);
dst_tmp += (4 * dst_stride);
+
+ dst10_r = dst54_r;
+ dst10_l = dst54_l;
+ dst21_r = dst65_r;
+ dst21_l = dst65_l;
+ dst2 = dst6;
}
src += 8;
@@ -4833,16 +4979,20 @@ static void hevc_hv_uniwgt_4t_8w_msa(uint8_t *src,
if (2 == height) {
hevc_hv_uniwgt_4t_8x2_msa(src, src_stride, dst, dst_stride,
- filter_x, filter_y, height, weight,
+ filter_x, filter_y, weight,
offset, rnd_val);
+ } else if (4 == height) {
+ hevc_hv_uniwgt_4t_8multx4_msa(src, src_stride, dst, dst_stride,
+ filter_x, filter_y, 1, weight,
+ offset, rnd_val);
} else if (6 == height) {
hevc_hv_uniwgt_4t_8x6_msa(src, src_stride, dst, dst_stride,
- filter_x, filter_y, height, weight,
+ filter_x, filter_y, weight,
offset, rnd_val);
} else if (0 == (height % 4)) {
hevc_hv_uniwgt_4t_8multx4mult_msa(src, src_stride, dst, dst_stride,
filter_x, filter_y, height, weight,
- offset, rnd_val, 8);
+ offset, rnd_val, 1);
}
}
@@ -4857,12 +5007,170 @@ static void hevc_hv_uniwgt_4t_12w_msa(uint8_t *src,
int32_t offset,
int32_t rnd_val)
{
- hevc_hv_uniwgt_4t_8multx4mult_msa(src, src_stride, dst, dst_stride,
- filter_x, filter_y, height, weight,
- offset, rnd_val, 8);
- hevc_hv_uniwgt_4t_4w_msa(src + 8, src_stride, dst + 8, dst_stride,
- filter_x, filter_y, height, weight,
- offset, rnd_val);
+ uint32_t loop_cnt;
+ uint8_t *src_tmp, *dst_tmp;
+ v16u8 out0, out1;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v16i8 mask0, mask1, mask2, mask3;
+ v8i16 filt0, filt1, filt_h0, filt_h1, filter_vec, tmp0, tmp1, tmp2, tmp3;
+ v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6;
+ v8i16 dst10, dst21, dst22, dst73, dst84, dst95, dst106;
+ v8i16 dst76_r, dst98_r, dst87_r, dst109_r;
+ v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
+ v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
+ v8i16 offset_vec, const_128, denom_vec;
+ v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
+ v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, weight_vec, rnd_vec;
+
+ src -= (src_stride + 1);
+
+ filter_vec = LD_SH(filter_x);
+ SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
+
+ filter_vec = LD_SH(filter_y);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
+
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
+
+ mask0 = LD_SB(ff_hevc_mask_arr);
+ mask1 = mask0 + 2;
+
+ weight_vec = __msa_fill_w(weight);
+ rnd_vec = __msa_fill_w(rnd_val);
+
+ offset_vec = __msa_fill_h(offset);
+ denom_vec = __msa_fill_h(rnd_val - 6);
+ const_128 = __msa_fill_h((128 * weight));
+ offset_vec += __msa_srar_h(const_128, denom_vec);
+
+ src_tmp = src;
+ dst_tmp = dst;
+
+ LD_SB3(src_tmp, src_stride, src0, src1, src2);
+ src_tmp += (3 * src_stride);
+ XORI_B3_128_SB(src0, src1, src2);
+ VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
+ dsth0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
+ ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
+
+ for (loop_cnt = 4; loop_cnt--;) {
+ LD_SB4(src_tmp, src_stride, src3, src4, src5, src6);
+ src_tmp += (4 * src_stride);
+ XORI_B4_128_SB(src3, src4, src5, src6);
+ VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
+ dsth3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth4 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth5 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dsth6 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+ ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
+ ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
+ ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
+ dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
+ dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
+ dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
+ dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
+ dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
+ dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
+ dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
+ dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
+ SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
+ SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
+ MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
+ MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
+ MUL2(dst0_l, weight_vec, dst1_l, weight_vec, dst0_l, dst1_l);
+ MUL2(dst2_l, weight_vec, dst3_l, weight_vec, dst2_l, dst3_l);
+ SRAR_W4_SW(dst0_r, dst0_l, dst1_r, dst1_l, rnd_vec);
+ SRAR_W4_SW(dst2_r, dst2_l, dst3_r, dst3_l, rnd_vec);
+ PCKEV_H4_SH(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
+ dst3_r, tmp0, tmp1, tmp2, tmp3);
+ ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
+ ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst_tmp, dst_stride);
+ dst_tmp += (4 * dst_stride);
+
+ dst10_r = dst54_r;
+ dst10_l = dst54_l;
+ dst21_r = dst65_r;
+ dst21_l = dst65_l;
+ dsth2 = dsth6;
+ }
+
+ src += 8;
+ dst += 8;
+
+ mask2 = LD_SB(ff_hevc_mask_arr + 16);
+ mask3 = mask2 + 2;
+
+ LD_SB3(src, src_stride, src0, src1, src2);
+ src += (3 * src_stride);
+ XORI_B3_128_SB(src0, src1, src2);
+ VSHF_B2_SB(src0, src1, src0, src1, mask2, mask3, vec0, vec1);
+ VSHF_B2_SB(src1, src2, src1, src2, mask2, mask3, vec2, vec3);
+ dst10 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst21 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ ILVRL_H2_SH(dst21, dst10, dst10_r, dst21_r);
+ dst22 = (v8i16) __msa_splati_d((v2i64) dst21, 1);
+
+ for (loop_cnt = 2; loop_cnt--;) {
+ LD_SB8(src, src_stride, src3, src4, src5, src6, src7, src8, src9,
+ src10);
+ src += (8 * src_stride);
+ XORI_B8_128_SB(src3, src4, src5, src6, src7, src8, src9, src10);
+ VSHF_B2_SB(src3, src7, src3, src7, mask2, mask3, vec0, vec1);
+ VSHF_B2_SB(src4, src8, src4, src8, mask2, mask3, vec2, vec3);
+ VSHF_B2_SB(src5, src9, src5, src9, mask2, mask3, vec4, vec5);
+ VSHF_B2_SB(src6, src10, src6, src10, mask2, mask3, vec6, vec7);
+ dst73 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst84 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst95 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dst106 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+ dst32_r = __msa_ilvr_h(dst73, dst22);
+ ILVRL_H2_SH(dst84, dst73, dst43_r, dst87_r);
+ ILVRL_H2_SH(dst95, dst84, dst54_r, dst98_r);
+ ILVRL_H2_SH(dst106, dst95, dst65_r, dst109_r);
+ dst22 = (v8i16) __msa_splati_d((v2i64) dst73, 1);
+ dst76_r = __msa_ilvr_h(dst22, dst106);
+ dst0 = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
+ dst1 = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
+ dst2 = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
+ dst3 = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
+ dst4 = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
+ dst5 = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
+ dst6 = HEVC_FILT_4TAP(dst76_r, dst98_r, filt_h0, filt_h1);
+ dst7 = HEVC_FILT_4TAP(dst87_r, dst109_r, filt_h0, filt_h1);
+ SRA_4V(dst0, dst1, dst2, dst3, 6);
+ SRA_4V(dst4, dst5, dst6, dst7, 6);
+ MUL2(dst0, weight_vec, dst1, weight_vec, dst0, dst1);
+ MUL2(dst2, weight_vec, dst3, weight_vec, dst2, dst3);
+ MUL2(dst4, weight_vec, dst5, weight_vec, dst4, dst5);
+ MUL2(dst6, weight_vec, dst7, weight_vec, dst6, dst7);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
+ PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, tmp0, tmp1,
+ tmp2, tmp3);
+ ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
+ ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST4x8_UB(out0, out1, dst, dst_stride);
+ dst += (8 * dst_stride);
+
+ dst10_r = dst98_r;
+ dst21_r = dst109_r;
+ dst22 = (v8i16) __msa_splati_d((v2i64) dst106, 1);
+ }
}
static void hevc_hv_uniwgt_4t_16w_msa(uint8_t *src,
@@ -4876,9 +5184,15 @@ static void hevc_hv_uniwgt_4t_16w_msa(uint8_t *src,
int32_t offset,
int32_t rnd_val)
{
- hevc_hv_uniwgt_4t_8multx4mult_msa(src, src_stride, dst, dst_stride,
- filter_x, filter_y, height, weight,
- offset, rnd_val, 16);
+ if (4 == height) {
+ hevc_hv_uniwgt_4t_8multx4_msa(src, src_stride, dst, dst_stride,
+ filter_x, filter_y, 2, weight, offset,
+ rnd_val);
+ } else {
+ hevc_hv_uniwgt_4t_8multx4mult_msa(src, src_stride, dst, dst_stride,
+ filter_x, filter_y, height, weight,
+ offset, rnd_val, 2);
+ }
}
static void hevc_hv_uniwgt_4t_24w_msa(uint8_t *src,
@@ -4894,7 +5208,7 @@ static void hevc_hv_uniwgt_4t_24w_msa(uint8_t *src,
{
hevc_hv_uniwgt_4t_8multx4mult_msa(src, src_stride, dst, dst_stride,
filter_x, filter_y, height, weight,
- offset, rnd_val, 24);
+ offset, rnd_val, 3);
}
static void hevc_hv_uniwgt_4t_32w_msa(uint8_t *src,
@@ -4910,7 +5224,7 @@ static void hevc_hv_uniwgt_4t_32w_msa(uint8_t *src,
{
hevc_hv_uniwgt_4t_8multx4mult_msa(src, src_stride, dst, dst_stride,
filter_x, filter_y, height, weight,
- offset, rnd_val, 32);
+ offset, rnd_val, 4);
}
#define UNIWGT_MC_COPY(WIDTH) \