From patchwork Mon Sep 4 12:32:09 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: kaustubh.raste@imgtec.com X-Patchwork-Id: 4976 Delivered-To: ffmpegpatchwork@gmail.com Received: by 10.2.15.201 with SMTP id 70csp3072802jao; Mon, 4 Sep 2017 05:31:39 -0700 (PDT) X-Google-Smtp-Source: ADKCNb5AMYNDdyCrzgbcQX/VuiYzTt/J8BQvDW59hI7kIuBxEfHlczmGb3WpgAP9u3RJAfTSA1sr X-Received: by 10.223.163.158 with SMTP id l30mr211063wrb.290.1504528299523; Mon, 04 Sep 2017 05:31:39 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1504528299; cv=none; d=google.com; s=arc-20160816; b=fAu6LukeY+J8iK0g+ostKxhLm3uLmh3CgmvPJV7zWGQLPr/yZZV73vRzTQdBcjQEkL R0lcqVyr7dIAkEpn1PvI+Jr1nnUa05EhDi1TxYIt0dO/aXNsBag5XUVsP3/r33VmRLue QptnM5LkUnyoQloM9N+nFPdM9ttO1p97rbz+DRslyzC9vGIp5125j0dALQXFYGYQjj2K GfYwVQFAER6s3GXK9ghVmDiKJpiyCB3oeH9astnn+HhO1gd7FG21M4eR2Cmn2p6BWKS+ FrcDYjZ+kg3gsn9IKP6ekxLMY8dabNsF914BaGVf4VKAr/tODyzkVVsBB28NP8wLrKZG tkQA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=sender:errors-to:content-transfer-encoding:cc:reply-to :list-subscribe:list-help:list-post:list-archive:list-unsubscribe :list-id:precedence:subject:mime-version:message-id:date:to:from :delivered-to:arc-authentication-results; bh=15QEJAxYkWbvH+GI0TR/GOZOQ096cZdO0tGoCrCagxM=; b=yvg2pm7AItcE68zjI6ZfATXSfogq5tsVwdywUJ14R6lmi0KZwCd0+bWmVxuMMZEjh1 dEhsCbq2HDlF7FPKx30mJ91OMF1JsnOkstFGuMpBThPp7rqiIc7WRxYZb0SB362R2cwd +wo6sm6qGlO6O8pjAjc1ggpMliGkoSlkjrhGt5mvO9FyO8+mBEsHO2CO3cFridDfBWCv WWVkAjUMa1z60dWSENuCkM8piTajyNAbfFCzPF6DF8H/6l4UjF8ZM8n3iEYFEjuGhY7D TLjb53t1Sl30feaS0z4OVjcUewAlGxeIOJ19LgTkjWEXzJ3PUMmy9y1V0H82BdyPB0km 3c2A== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Return-Path: Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org. [79.124.17.100]) by mx.google.com with ESMTP id u12si4448981wrc.250.2017.09.04.05.31.38; Mon, 04 Sep 2017 05:31:39 -0700 (PDT) Received-SPF: pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) client-ip=79.124.17.100; Authentication-Results: mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 3456768A112; Mon, 4 Sep 2017 15:31:34 +0300 (EEST) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from mailapp01.imgtec.com (mailapp01.imgtec.com [195.59.15.196]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 4C728689BDA for ; Mon, 4 Sep 2017 15:31:27 +0300 (EEST) Received: from hhmail02.hh.imgtec.org (unknown [10.100.10.20]) by Forcepoint Email with ESMTPS id 02CE6E710E0F7 for ; Mon, 4 Sep 2017 13:31:25 +0100 (IST) Received: from pudesk204.pu.imgtec.org (192.168.91.13) by hhmail02.hh.imgtec.org (10.100.10.20) with Microsoft SMTP Server (TLS) id 14.3.294.0; Mon, 4 Sep 2017 13:31:27 +0100 From: To: Date: Mon, 4 Sep 2017 18:02:09 +0530 Message-ID: <1504528329-9678-1-git-send-email-kaustubh.raste@imgtec.com> X-Mailer: git-send-email 1.7.9.5 MIME-Version: 1.0 X-Originating-IP: [192.168.91.13] Subject: [FFmpeg-devel] [PATCH] avcodec/mips: Improve vp9 idct msa functions X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Cc: Kaustubh Raste Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" From: Kaustubh Raste Removed memset calls. Signed-off-by: Kaustubh Raste --- libavcodec/mips/vp9_idct_msa.c | 118 ++++++++++++++++++++++++---------------- 1 file changed, 70 insertions(+), 48 deletions(-) diff --git a/libavcodec/mips/vp9_idct_msa.c b/libavcodec/mips/vp9_idct_msa.c index 25ea16c..bd762f2 100644 --- a/libavcodec/mips/vp9_idct_msa.c +++ b/libavcodec/mips/vp9_idct_msa.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Shivraj Patil (Shivraj.Patil@imgtec.com) + * Copyright (c) 2015 - 2017 Shivraj Patil (Shivraj.Patil@imgtec.com) * * This file is part of FFmpeg. * @@ -352,6 +352,7 @@ static void vp9_idct4x4_1_add_msa(int16_t *input, uint8_t *dst, out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS); out = ROUND_POWER_OF_TWO(out, 4); vec = __msa_fill_h(out); + input[0] = 0; ADDBLK_ST4x4_UB(vec, vec, vec, vec, dst, dst_stride); } @@ -360,9 +361,11 @@ static void vp9_idct4x4_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride) { v8i16 in0, in1, in2, in3; + v8i16 zero = { 0 }; /* load vector elements of 4x4 block */ LD4x4_SH(input, in0, in1, in2, in3); + ST_SH2(zero, zero, input, 8); /* rows */ VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); /* columns */ @@ -377,9 +380,11 @@ static void vp9_iadst4x4_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride) { v8i16 in0, in1, in2, in3; + v8i16 zero = { 0 }; /* load vector elements of 4x4 block */ LD4x4_SH(input, in0, in1, in2, in3); + ST_SH2(zero, zero, input, 8); /* rows */ VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); /* columns */ @@ -394,9 +399,11 @@ static void vp9_iadst_idct_4x4_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride, int32_t eob) { v8i16 in0, in1, in2, in3; + v8i16 zero = { 0 }; /* load vector elements of 4x4 block */ LD4x4_SH(input, in0, in1, in2, in3); + ST_SH2(zero, zero, input, 8); /* cols */ VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); /* columns */ @@ -411,9 +418,11 @@ static void vp9_idct_iadst_4x4_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride, int32_t eob) { v8i16 in0, in1, in2, in3; + v8i16 zero = { 0 }; /* load vector elements of 4x4 block */ LD4x4_SH(input, in0, in1, in2, in3); + ST_SH2(zero, zero, input, 8); /* cols */ VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); /* columns */ @@ -585,6 +594,7 @@ static void vp9_idct8x8_1_add_msa(int16_t *input, uint8_t *dst, out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS); val = ROUND_POWER_OF_TWO(out, 5); vec = __msa_fill_h(val); + input[0] = 0; VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec); dst += (4 * dst_stride); @@ -601,9 +611,9 @@ static void vp9_idct8x8_12_colcol_addblk_msa(int16_t *input, uint8_t *dst, /* load vector elements of 8x8 block */ LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8); ILVR_D2_SH(in1, in0, in3, in2, in0, in1); ILVR_D2_SH(in5, in4, in7, in6, in2, in3); - //TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); /* stage1 */ ILVL_H2_SH(in3, in0, in2, in1, s0, s1); @@ -659,9 +669,11 @@ static void vp9_idct8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride) { v8i16 in0, in1, in2, in3, in4, in5, in6, in7; + v8i16 zero = { 0 }; /* load vector elements of 8x8 block */ LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8); /* 1D idct8x8 */ VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in5, in6, in7); @@ -689,10 +701,11 @@ static void vp9_iadst8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst, v8i16 out0, out1, out2, out3, out4, out5, out6, out7; v8i16 cnst0, cnst1, cnst2, cnst3, cnst4; v8i16 temp0, temp1, temp2, temp3, s0, s1; - v16i8 zero = { 0 }; + v8i16 zero = { 0 }; /* load vector elements of 8x8 block */ LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8); /* 1D adst8x8 */ VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, @@ -736,13 +749,13 @@ static void vp9_iadst8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst, dst0 = LD_UB(dst + 0 * dst_stride); dst7 = LD_UB(dst + 7 * dst_stride); - res0 = (v8i16) __msa_ilvr_b(zero, (v16i8) dst0); + res0 = (v8i16) __msa_ilvr_b((v16i8) zero, (v16i8) dst0); res0 += out0; res0 = CLIP_SH_0_255(res0); res0 = (v8i16) __msa_pckev_b((v16i8) res0, (v16i8) res0); ST8x1_UB(res0, dst); - res7 = (v8i16) __msa_ilvr_b(zero, (v16i8) dst7); + res7 = (v8i16) __msa_ilvr_b((v16i8) zero, (v16i8) dst7); res7 += out7; res7 = CLIP_SH_0_255(res7); res7 = (v8i16) __msa_pckev_b((v16i8) res7, (v16i8) res7); @@ -809,9 +822,11 @@ static void vp9_iadst_idct_8x8_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride, int32_t eob) { v8i16 in0, in1, in2, in3, in4, in5, in6, in7; + v8i16 zero = { 0 }; /* load vector elements of 8x8 block */ LD_SH8(input, 8, in1, in6, in3, in4, in5, in2, in7, in0); + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8); /* 1D idct8x8 */ VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in5, in6, in7); @@ -834,9 +849,11 @@ static void vp9_idct_iadst_8x8_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride, int32_t eob) { v8i16 in0, in1, in2, in3, in4, in5, in6, in7; + v8i16 zero = { 0 }; /* load vector elements of 8x8 block */ LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8); /* 1D idct8x8 */ VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, @@ -937,12 +954,16 @@ static void vp9_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14; v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15; v8i16 tmp5, tmp6, tmp7; + v8i16 zero = { 0 }; + + /* load up 8x16 */ + LD_SH16(input, 16, + reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, + reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15); - /* load up 8x8 */ - LD_SH8(input, 16, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16); input += 8 * 16; - /* load bottom 8x8 */ - LD_SH8(input, 16, reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15); + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16); VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14); VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6); @@ -1036,12 +1057,16 @@ static void vp9_idct16_1d_columns_msa(int16_t *input, int16_t *output) v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14; v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15; v8i16 tmp5, tmp6, tmp7; + v8i16 zero = { 0 }; - /* load up 8x8 */ - LD_SH8(input, 16, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); - input += 8 * 16; - /* load bottom 8x8 */ - LD_SH8(input, 16, reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15); + /* load up 8x16 */ + LD_SH16(input, 16, + reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, + reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15); + + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16); + input += 16 * 8; + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16); VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14); VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6); @@ -1141,11 +1166,11 @@ static void vp9_idct16x16_1_add_msa(int16_t *input, uint8_t *dst, out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS); out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS); out = ROUND_POWER_OF_TWO(out, 6); + input[0] = 0; vec = __msa_fill_h(out); - for (i = 4; i--;) - { + for (i = 4; i--;) { LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); UNPCK_UB_SH(dst0, res0, res4); UNPCK_UB_SH(dst1, res1, res5); @@ -1229,12 +1254,17 @@ static void vp9_iadst16_1d_columns_msa(int16_t *input, int16_t *output) { v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15; v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15; + v8i16 zero = { 0 }; /* load input data */ LD_SH16(input, 16, l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15); + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16); + input += 16 * 8; + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16); + /* ADST in horizontal */ VP9_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, @@ -1591,9 +1621,11 @@ static void vp9_idct8x32_column_even_process_store(int16_t *tmp_buf, v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7; + v8i16 zero = { 0 }; /* Even stage 1 */ LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, tmp_buf, (4 * 32)); tmp_buf += (2 * 32); VP9_DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7); @@ -1613,6 +1645,7 @@ static void vp9_idct8x32_column_even_process_store(int16_t *tmp_buf, /* Even stage 2 */ /* Load 8 */ LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); + ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, tmp_buf, (4 * 32)); VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7); VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3); @@ -1671,6 +1704,7 @@ static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf, { v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; + v8i16 zero = { 0 }; /* Odd stage 1 */ reg0 = LD_SH(tmp_buf + 32); @@ -1682,6 +1716,15 @@ static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf, reg6 = LD_SH(tmp_buf + 25 * 32); reg7 = LD_SH(tmp_buf + 31 * 32); + ST_SH(zero, tmp_buf + 32); + ST_SH(zero, tmp_buf + 7 * 32); + ST_SH(zero, tmp_buf + 9 * 32); + ST_SH(zero, tmp_buf + 15 * 32); + ST_SH(zero, tmp_buf + 17 * 32); + ST_SH(zero, tmp_buf + 23 * 32); + ST_SH(zero, tmp_buf + 25 * 32); + ST_SH(zero, tmp_buf + 31 * 32); + VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7); VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4); VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5); @@ -1723,6 +1766,15 @@ static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf, reg6 = LD_SH(tmp_buf + 27 * 32); reg7 = LD_SH(tmp_buf + 29 * 32); + ST_SH(zero, tmp_buf + 3 * 32); + ST_SH(zero, tmp_buf + 5 * 32); + ST_SH(zero, tmp_buf + 11 * 32); + ST_SH(zero, tmp_buf + 13 * 32); + ST_SH(zero, tmp_buf + 19 * 32); + ST_SH(zero, tmp_buf + 21 * 32); + ST_SH(zero, tmp_buf + 27 * 32); + ST_SH(zero, tmp_buf + 29 * 32); + VP9_DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6); VP9_DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5); VP9_DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4); @@ -1901,11 +1953,11 @@ static void vp9_idct32x32_1_add_msa(int16_t *input, uint8_t *dst, out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS); out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS); out = ROUND_POWER_OF_TWO(out, 6); + input[0] = 0; vec = __msa_fill_h(out); - for (i = 16; i--;) - { + for (i = 16; i--;) { LD_UB2(dst, 16, dst0, dst1); LD_UB2(dst + dst_stride, 16, dst2, dst3); @@ -2004,11 +2056,9 @@ void ff_idct_idct_4x4_add_msa(uint8_t *dst, ptrdiff_t stride, { if (eob > 1) { vp9_idct4x4_colcol_addblk_msa(block, dst, stride); - memset(block, 0, 4 * 4 * sizeof(*block)); } else { vp9_idct4x4_1_add_msa(block, dst, stride); - block[0] = 0; } } @@ -2017,60 +2067,41 @@ void ff_idct_idct_8x8_add_msa(uint8_t *dst, ptrdiff_t stride, { if (eob == 1) { vp9_idct8x8_1_add_msa(block, dst, stride); - block[0] = 0; } else if (eob <= 12) { vp9_idct8x8_12_colcol_addblk_msa(block, dst, stride); - memset(block, 0, 4 * 8 * sizeof(*block)); } else { vp9_idct8x8_colcol_addblk_msa(block, dst, stride); - memset(block, 0, 8 * 8 * sizeof(*block)); } } void ff_idct_idct_16x16_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob) { - int i; - if (eob == 1) { /* DC only DCT coefficient. */ vp9_idct16x16_1_add_msa(block, dst, stride); - block[0] = 0; } else if (eob <= 10) { vp9_idct16x16_10_colcol_addblk_msa(block, dst, stride); - for (i = 0; i < 4; ++i) { - memset(block, 0, 4 * sizeof(*block)); - block += 16; - } } else { vp9_idct16x16_colcol_addblk_msa(block, dst, stride); - memset(block, 0, 16 * 16 * sizeof(*block)); } } void ff_idct_idct_32x32_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob) { - int i; - if (eob == 1) { vp9_idct32x32_1_add_msa(block, dst, stride); - block[0] = 0; } else if (eob <= 34) { vp9_idct32x32_34_colcol_addblk_msa(block, dst, stride); - for (i = 0; i < 8; ++i) { - memset(block, 0, 8 * sizeof(*block)); - block += 32; - } } else { vp9_idct32x32_colcol_addblk_msa(block, dst, stride); - memset(block, 0, 32 * 32 * sizeof(*block)); } } @@ -2078,61 +2109,52 @@ void ff_iadst_iadst_4x4_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob) { vp9_iadst4x4_colcol_addblk_msa(block, dst, stride); - memset(block, 0, 4 * 4 * sizeof(*block)); } void ff_iadst_iadst_8x8_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob) { vp9_iadst8x8_colcol_addblk_msa(block, dst, stride); - memset(block, 0, 8 * 8 * sizeof(*block)); } void ff_iadst_iadst_16x16_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob) { vp9_iadst16x16_colcol_addblk_msa(block, dst, stride); - memset(block, 0, 16 * 16 * sizeof(*block)); } void ff_idct_iadst_4x4_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob) { vp9_idct_iadst_4x4_add_msa(block, dst, stride, eob); - memset(block, 0, 4 * 4 * sizeof(*block)); } void ff_idct_iadst_8x8_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob) { vp9_idct_iadst_8x8_add_msa(block, dst, stride, eob); - memset(block, 0, 8 * 8 * sizeof(*block)); } void ff_idct_iadst_16x16_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob) { vp9_idct_iadst_16x16_add_msa(block, dst, stride, eob); - memset(block, 0, 16 * 16 * sizeof(*block)); } void ff_iadst_idct_4x4_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob) { vp9_iadst_idct_4x4_add_msa(block, dst, stride, eob); - memset(block, 0, 4 * 4 * sizeof(*block)); } void ff_iadst_idct_8x8_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob) { vp9_iadst_idct_8x8_add_msa(block, dst, stride, eob); - memset(block, 0, 8 * 8 * sizeof(*block)); } void ff_iadst_idct_16x16_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob) { vp9_iadst_idct_16x16_add_msa(block, dst, stride, eob); - memset(block, 0, 16 * 16 * sizeof(*block)); }