From patchwork Fri Sep 15 11:36:34 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: kaustubh.raste@imgtec.com X-Patchwork-Id: 5149 Delivered-To: ffmpegpatchwork@gmail.com Received: by 10.2.36.26 with SMTP id f26csp533032jaa; Fri, 15 Sep 2017 04:35:55 -0700 (PDT) X-Google-Smtp-Source: AOwi7QBM2WACVA7UfIrETPMhEbr82KN0TdMOobyo61augPB5ADnSVFlGGO3ixqAB3EH7AeuPE/qi X-Received: by 10.28.73.133 with SMTP id w127mr2247401wma.55.1505475355174; Fri, 15 Sep 2017 04:35:55 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1505475355; cv=none; d=google.com; s=arc-20160816; b=VmnEUf+p0ETOIHpSZH6SzgsT6adTx4nTtBnibkjLRkB7Aw4eP5G5m29Q8mLiFPncUg Jt3/z7+c104yrvvEpWTQdgyrR87e/FhaaZqW2mYnFEruzqvvUDz925LunzvlBs+SEODT RbAq2y8JEdnUGKweTVuhFJpxQWrGDRS+FTHFQ9kLzUkmzHffOOI5bIpiUV64NqrCjIMr pYLj1PPce7wKmi268OIqM23oFaXTxPxUjiCVaTF4jEw/y8gNk1zQSbRhQAZD6Qhdhbwu E1R31Qgcypb0/vSBTbd8xU4K5q5BOU1DGmSBEwgHF6TN1HNkdPdpT5vGbcfe5WerxNqE hkPA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=sender:errors-to:content-transfer-encoding:cc:reply-to :list-subscribe:list-help:list-post:list-archive:list-unsubscribe :list-id:precedence:subject:mime-version:message-id:date:to:from :delivered-to:arc-authentication-results; bh=iz5yy5aLnZY0BXAbQpwUcun9wjOIrXncYeJtKlTu/nQ=; b=wcyLSrQ1eY8GId0HKAXse3tfY/3EHBkCbfLmVzfj1oIWLYN3N9alvhmNgVdedgdM2a GcKS7CPJ8aa1sqeM+2J2urwPnJFVU0wr6aOOPwhpS6Cq123ilan6nIRcrtdQyHe9GLYh 8PSeLhRYEC1rJX1hwDHxO1z8c4vrpR8h+THqvHyU9gz9cSUOKfxuy3pxsj0EpzTDF9yP MY45RrRCuBbK6ve+vERWqc7U70t5XhqjtYJq+6DQs52Rm4+sMsrBqAZWV4gKmTUEHldO 3jdXHyyaUuEzHwocV/08F2xIZtO4bW0A6jiLvh6LVTk6PjR+XzD0fyoTP5IJHtzeesF8 Vcww== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Return-Path: Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org. [79.124.17.100]) by mx.google.com with ESMTP id l133si843362wmb.24.2017.09.15.04.35.54; Fri, 15 Sep 2017 04:35:55 -0700 (PDT) Received-SPF: pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) client-ip=79.124.17.100; Authentication-Results: mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id E1EC7689A84; Fri, 15 Sep 2017 14:35:45 +0300 (EEST) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from mailapp01.imgtec.com (mailapp01.imgtec.com [195.59.15.196]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 7F432689A7C for ; Fri, 15 Sep 2017 14:35:39 +0300 (EEST) Received: from hhmail02.hh.imgtec.org (unknown [10.100.10.20]) by Forcepoint Email with ESMTPS id 974FCCCEF1CA5 for ; Fri, 15 Sep 2017 12:35:41 +0100 (IST) Received: from pudesk204.pu.imgtec.org (192.168.91.13) by hhmail02.hh.imgtec.org (10.100.10.20) with Microsoft SMTP Server (TLS) id 14.3.294.0; Fri, 15 Sep 2017 12:35:44 +0100 From: To: Date: Fri, 15 Sep 2017 17:06:34 +0530 Message-ID: <1505475394-29139-1-git-send-email-kaustubh.raste@imgtec.com> X-Mailer: git-send-email 1.7.9.5 MIME-Version: 1.0 X-Originating-IP: [192.168.91.13] Subject: [FFmpeg-devel] [PATCH] avcodec/mips: Improve hevc sao band filter msa functions X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Cc: Kaustubh Raste Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" From: Kaustubh Raste Preload data in band filter 0-8 for better pipeline parallelization. Signed-off-by: Kaustubh Raste --- libavcodec/mips/hevc_lpf_sao_msa.c | 174 ++++++++++++++++++++++------------- libavutil/mips/generic_macros_msa.h | 1 + 2 files changed, 112 insertions(+), 63 deletions(-) diff --git a/libavcodec/mips/hevc_lpf_sao_msa.c b/libavcodec/mips/hevc_lpf_sao_msa.c index 79b156f..1d77432 100644 --- a/libavcodec/mips/hevc_lpf_sao_msa.c +++ b/libavcodec/mips/hevc_lpf_sao_msa.c @@ -1049,29 +1049,28 @@ static void hevc_sao_band_filter_4width_msa(uint8_t *dst, int32_t dst_stride, int16_t *sao_offset_val, int32_t height) { - int32_t h_cnt; v16u8 src0, src1, src2, src3; v16i8 src0_r, src1_r; v16i8 offset, offset_val, mask; - v16i8 offset0 = { 0 }; - v16i8 offset1 = { 0 }; + v16i8 dst0, offset0, offset1; v16i8 zero = { 0 }; - v8i16 temp0, temp1, dst0, dst1; offset_val = LD_SB(sao_offset_val + 1); offset_val = (v16i8) __msa_pckev_d((v2i64) offset_val, (v2i64) offset_val); offset_val = __msa_pckev_b(offset_val, offset_val); - offset1 = (v16i8) __msa_insve_w((v4i32) offset1, 3, (v4i32) offset_val); - offset0 = __msa_sld_b(offset1, offset0, 28 - ((sao_left_class) & 31)); + offset1 = (v16i8) __msa_insve_w((v4i32) zero, 3, (v4i32) offset_val); + offset0 = __msa_sld_b(offset1, zero, 28 - ((sao_left_class) & 31)); offset1 = __msa_sld_b(zero, offset1, 28 - ((sao_left_class) & 31)); + /* load in advance. */ + LD_UB4(src, src_stride, src0, src1, src2, src3); + if (!((sao_left_class > 12) & (sao_left_class < 29))) { SWAP(offset0, offset1); } - for (h_cnt = height >> 2; h_cnt--;) { - LD_UB4(src, src_stride, src0, src1, src2, src3); + for (height -= 4; height; height -= 4) { src += (4 * src_stride); ILVEV_D2_SB(src0, src1, src2, src3, src0_r, src1_r); @@ -1080,14 +1079,30 @@ static void hevc_sao_band_filter_4width_msa(uint8_t *dst, int32_t dst_stride, mask = __msa_srli_b(src0_r, 3); offset = __msa_vshf_b(mask, offset1, offset0); - UNPCK_SB_SH(offset, temp0, temp1); - ILVRL_B2_SH(zero, src0_r, dst0, dst1); - ADD2(dst0, temp0, dst1, temp1, dst0, dst1); - CLIP_SH2_0_255(dst0, dst1); - dst0 = (v8i16) __msa_pckev_b((v16i8) dst1, (v16i8) dst0); + src0_r = (v16i8) __msa_xori_b((v16u8) src0_r, 128); + dst0 = __msa_adds_s_b(src0_r, offset); + dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); + + /* load in advance. */ + LD_UB4(src, src_stride, src0, src1, src2, src3); + + /* store results */ ST4x4_UB(dst0, dst0, 0, 1, 2, 3, dst, dst_stride); dst += (4 * dst_stride); } + + ILVEV_D2_SB(src0, src1, src2, src3, src0_r, src1_r); + + src0_r = (v16i8) __msa_pckev_w((v4i32) src1_r, (v4i32) src0_r); + mask = __msa_srli_b(src0_r, 3); + offset = __msa_vshf_b(mask, offset1, offset0); + + src0_r = (v16i8) __msa_xori_b((v16u8) src0_r, 128); + dst0 = __msa_adds_s_b(src0_r, offset); + dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); + + /* store results */ + ST4x4_UB(dst0, dst0, 0, 1, 2, 3, dst, dst_stride); } static void hevc_sao_band_filter_8width_msa(uint8_t *dst, int32_t dst_stride, @@ -1096,51 +1111,69 @@ static void hevc_sao_band_filter_8width_msa(uint8_t *dst, int32_t dst_stride, int16_t *sao_offset_val, int32_t height) { - int32_t h_cnt; v16u8 src0, src1, src2, src3; v16i8 src0_r, src1_r, mask0, mask1; - v16i8 offset, offset_val; - v16i8 offset0 = { 0 }; - v16i8 offset1 = { 0 }; + v16i8 offset_mask0, offset_mask1, offset_val; + v16i8 offset0, offset1, dst0, dst1; v16i8 zero = { 0 }; - v8i16 dst0, dst1, dst2, dst3; - v8i16 temp0, temp1, temp2, temp3; offset_val = LD_SB(sao_offset_val + 1); offset_val = (v16i8) __msa_pckev_d((v2i64) offset_val, (v2i64) offset_val); offset_val = __msa_pckev_b(offset_val, offset_val); - offset1 = (v16i8) __msa_insve_w((v4i32) offset1, 3, (v4i32) offset_val); - offset0 = __msa_sld_b(offset1, offset0, 28 - ((sao_left_class) & 31)); + offset1 = (v16i8) __msa_insve_w((v4i32) zero, 3, (v4i32) offset_val); + offset0 = __msa_sld_b(offset1, zero, 28 - ((sao_left_class) & 31)); offset1 = __msa_sld_b(zero, offset1, 28 - ((sao_left_class) & 31)); + /* load in advance. */ + LD_UB4(src, src_stride, src0, src1, src2, src3); + if (!((sao_left_class > 12) & (sao_left_class < 29))) { SWAP(offset0, offset1); } - for (h_cnt = height >> 2; h_cnt--;) { - LD_UB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); + for (height -= 4; height; height -= 4) { + src += src_stride << 2; ILVR_D2_SB(src1, src0, src3, src2, src0_r, src1_r); mask0 = __msa_srli_b(src0_r, 3); mask1 = __msa_srli_b(src1_r, 3); - offset = __msa_vshf_b(mask0, offset1, offset0); - UNPCK_SB_SH(offset, temp0, temp1); + offset_mask0 = __msa_vshf_b(mask0, offset1, offset0); + offset_mask1 = __msa_vshf_b(mask1, offset1, offset0); - offset = __msa_vshf_b(mask1, offset1, offset0); - UNPCK_SB_SH(offset, temp2, temp3); + /* load in advance. */ + LD_UB4(src, src_stride, src0, src1, src2, src3); - UNPCK_UB_SH(src0_r, dst0, dst1); - UNPCK_UB_SH(src1_r, dst2, dst3); - ADD4(dst0, temp0, dst1, temp1, dst2, temp2, dst3, temp3, - dst0, dst1, dst2, dst3); - CLIP_SH4_0_255(dst0, dst1, dst2, dst3); - PCKEV_B2_SH(dst1, dst0, dst3, dst2, dst0, dst2); - ST8x4_UB(dst0, dst2, dst, dst_stride); - dst += (4 * dst_stride); + XORI_B2_128_SB(src0_r, src1_r); + + dst0 = __msa_adds_s_b(src0_r, offset_mask0); + dst1 = __msa_adds_s_b(src1_r, offset_mask1); + + XORI_B2_128_SB(dst0, dst1); + + /* store results */ + ST8x4_UB(dst0, dst1, dst, dst_stride); + dst += dst_stride << 2; } + + ILVR_D2_SB(src1, src0, src3, src2, src0_r, src1_r); + + mask0 = __msa_srli_b(src0_r, 3); + mask1 = __msa_srli_b(src1_r, 3); + + offset_mask0 = __msa_vshf_b(mask0, offset1, offset0); + offset_mask1 = __msa_vshf_b(mask1, offset1, offset0); + + XORI_B2_128_SB(src0_r, src1_r); + + dst0 = __msa_adds_s_b(src0_r, offset_mask0); + dst1 = __msa_adds_s_b(src1_r, offset_mask1); + + XORI_B2_128_SB(dst0, dst1); + + /* store results */ + ST8x4_UB(dst0, dst1, dst, dst_stride); } static void hevc_sao_band_filter_16multiple_msa(uint8_t *dst, @@ -1151,32 +1184,30 @@ static void hevc_sao_band_filter_16multiple_msa(uint8_t *dst, int16_t *sao_offset_val, int32_t width, int32_t height) { - int32_t h_cnt, w_cnt; + int32_t w_cnt; v16u8 src0, src1, src2, src3; - v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; v16i8 out0, out1, out2, out3; v16i8 mask0, mask1, mask2, mask3; v16i8 tmp0, tmp1, tmp2, tmp3, offset_val; - v16i8 offset0 = { 0 }; - v16i8 offset1 = { 0 }; + v16i8 offset0, offset1; v16i8 zero = { 0 }; - v8i16 temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; offset_val = LD_SB(sao_offset_val + 1); offset_val = (v16i8) __msa_pckev_d((v2i64) offset_val, (v2i64) offset_val); offset_val = __msa_pckev_b(offset_val, offset_val); - offset1 = (v16i8) __msa_insve_w((v4i32) offset1, 3, (v4i32) offset_val); - offset0 = __msa_sld_b(offset1, offset0, 28 - ((sao_left_class) & 31)); + offset1 = (v16i8) __msa_insve_w((v4i32) zero, 3, (v4i32) offset_val); + offset0 = __msa_sld_b(offset1, zero, 28 - ((sao_left_class) & 31)); offset1 = __msa_sld_b(zero, offset1, 28 - ((sao_left_class) & 31)); if (!((sao_left_class > 12) & (sao_left_class < 29))) { SWAP(offset0, offset1); } - for (h_cnt = height >> 2; h_cnt--;) { - for (w_cnt = 0; w_cnt < (width >> 4); w_cnt++) { - LD_UB4(src + w_cnt * 16, src_stride, src0, src1, src2, src3); + while (height > 0) { + /* load in advance */ + LD_UB4(src, src_stride, src0, src1, src2, src3); + for (w_cnt = 16; w_cnt < width; w_cnt += 16) { mask0 = __msa_srli_b((v16i8) src0, 3); mask1 = __msa_srli_b((v16i8) src1, 3); mask2 = __msa_srli_b((v16i8) src2, 3); @@ -1186,27 +1217,44 @@ static void hevc_sao_band_filter_16multiple_msa(uint8_t *dst, tmp0, tmp1); VSHF_B2_SB(offset0, offset1, offset0, offset1, mask2, mask3, tmp2, tmp3); - UNPCK_SB_SH(tmp0, temp0, temp1); - UNPCK_SB_SH(tmp1, temp2, temp3); - UNPCK_SB_SH(tmp2, temp4, temp5); - UNPCK_SB_SH(tmp3, temp6, temp7); - ILVRL_B2_SH(zero, src0, dst0, dst1); - ILVRL_B2_SH(zero, src1, dst2, dst3); - ILVRL_B2_SH(zero, src2, dst4, dst5); - ILVRL_B2_SH(zero, src3, dst6, dst7); - ADD4(dst0, temp0, dst1, temp1, dst2, temp2, dst3, temp3, - dst0, dst1, dst2, dst3); - ADD4(dst4, temp4, dst5, temp5, dst6, temp6, dst7, temp7, - dst4, dst5, dst6, dst7); - CLIP_SH4_0_255(dst0, dst1, dst2, dst3); - CLIP_SH4_0_255(dst4, dst5, dst6, dst7); - PCKEV_B4_SB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, - out0, out1, out2, out3); - ST_SB4(out0, out1, out2, out3, dst + w_cnt * 16, dst_stride); + XORI_B4_128_UB(src0, src1, src2, src3); + + out0 = __msa_adds_s_b((v16i8) src0, tmp0); + out1 = __msa_adds_s_b((v16i8) src1, tmp1); + out2 = __msa_adds_s_b((v16i8) src2, tmp2); + out3 = __msa_adds_s_b((v16i8) src3, tmp3); + + /* load for next iteration */ + LD_UB4(src + w_cnt, src_stride, src0, src1, src2, src3); + + XORI_B4_128_SB(out0, out1, out2, out3); + + ST_SB4(out0, out1, out2, out3, dst + w_cnt - 16, dst_stride); } + mask0 = __msa_srli_b((v16i8) src0, 3); + mask1 = __msa_srli_b((v16i8) src1, 3); + mask2 = __msa_srli_b((v16i8) src2, 3); + mask3 = __msa_srli_b((v16i8) src3, 3); + + VSHF_B2_SB(offset0, offset1, offset0, offset1, mask0, mask1, tmp0, + tmp1); + VSHF_B2_SB(offset0, offset1, offset0, offset1, mask2, mask3, tmp2, + tmp3); + XORI_B4_128_UB(src0, src1, src2, src3); + + out0 = __msa_adds_s_b((v16i8) src0, tmp0); + out1 = __msa_adds_s_b((v16i8) src1, tmp1); + out2 = __msa_adds_s_b((v16i8) src2, tmp2); + out3 = __msa_adds_s_b((v16i8) src3, tmp3); + + XORI_B4_128_SB(out0, out1, out2, out3); + + ST_SB4(out0, out1, out2, out3, dst + w_cnt - 16, dst_stride); + src += src_stride << 2; dst += dst_stride << 2; + height -= 4; } } diff --git a/libavutil/mips/generic_macros_msa.h b/libavutil/mips/generic_macros_msa.h index ee7d663..3ff94fd 100644 --- a/libavutil/mips/generic_macros_msa.h +++ b/libavutil/mips/generic_macros_msa.h @@ -1574,6 +1574,7 @@ out0 = (RTYPE) __msa_ilvr_h((v8i16) in0, (v8i16) in1); \ out1 = (RTYPE) __msa_ilvl_h((v8i16) in0, (v8i16) in1); \ } +#define ILVRL_H2_UB(...) ILVRL_H2(v16u8, __VA_ARGS__) #define ILVRL_H2_SB(...) ILVRL_H2(v16i8, __VA_ARGS__) #define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__) #define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)