From patchwork Fri Sep 15 05:58:23 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: kaustubh.raste@imgtec.com X-Patchwork-Id: 5146 Delivered-To: ffmpegpatchwork@gmail.com Received: by 10.2.36.26 with SMTP id f26csp279702jaa; Thu, 14 Sep 2017 22:57:38 -0700 (PDT) X-Google-Smtp-Source: ADKCNb5BUyx3NOcvEP3Gd5VgJobrFQm8d3jHVBGQcbO84gJBUk7JDH6/EY5D0brmSKhR+PtnJ9p8 X-Received: by 10.223.139.211 with SMTP id w19mr19335487wra.199.1505455058054; Thu, 14 Sep 2017 22:57:38 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1505455058; cv=none; d=google.com; s=arc-20160816; b=BPCeO3uLIaBjex5oshg3V4NLoSAnj8wh2Csc7dT2jlJBYypUskmiKR6WEvkFrY+f8V hG4G8qqfUj3H85va4AFml9oKam1ttKsfhgLTA0Tn5U1OYf9mcCR2Yb7r9G+KDY+RW3ND 9T3AF6S1cYkP535TbN4CnyrprN/ryxNF1QCZ4CHINg6EfM+PcaYtfmSLH0vR4exEjqoC ywcuF8AN9/wQnV2ulxZYgDhsNAXK9SUXZBp8z43kfs+wsqxZuu5yF5KxPTm4MXZeUJw8 WXw0MDUQ+7mCK7Y+AWjXUqpFxM/pw1vk/nE9gn4jAINP5uoJO0RTeYrc3ejyGmhc9CBH sjOA== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=sender:errors-to:content-transfer-encoding:cc:reply-to :list-subscribe:list-help:list-post:list-archive:list-unsubscribe :list-id:precedence:subject:mime-version:message-id:date:to:from :delivered-to:arc-authentication-results; bh=FHjW9sGw49ZefLv0sLLruH5241SKHMrpc/nRf4rtAMc=; b=IclIQd+MvYdT4/D99QRFEFbQyvBon2wcaZibmkCy0H4qQM/CTC1o0SCNqkzFW6bwYc O3tJ/4bZtVQXe6V8xN2QGcicBLYcnZNOZ7wi7kErX5n6DPgk0nkcH/kPyXbOjYgs8U9B qgZe7dHTEdtohOWIWGr3KjSx8V6ZXBBJtbeQ3sgOn3mrbjUnn688ejPWdc2JgYGnuO2a Q+GdkVxvnIaOJIcm2rV8DHTeCyiMVS7c94pzBv91k4OWnL5PZBggmI16gQsKEp+u9/3h vuyw7ahQ2jWC2ZgG08jhksQ9o6ZLVvI11KnhOI/8uZYc+G/CpVcOW2ix0KuXzkyzwPLL gWDQ== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Return-Path: Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org. [79.124.17.100]) by mx.google.com with ESMTP id k12si143511wrg.323.2017.09.14.22.57.37; Thu, 14 Sep 2017 22:57:38 -0700 (PDT) Received-SPF: pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) client-ip=79.124.17.100; Authentication-Results: mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 2F439689B11; Fri, 15 Sep 2017 08:57:29 +0300 (EEST) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from mailapp01.imgtec.com (mailapp01.imgtec.com [195.59.15.196]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id DE4AE6898D4 for ; Fri, 15 Sep 2017 08:57:22 +0300 (EEST) Received: from hhmail02.hh.imgtec.org (unknown [10.100.10.20]) by Forcepoint Email with ESMTPS id 59AEEC1E67E1E for ; Fri, 15 Sep 2017 06:57:25 +0100 (IST) Received: from pudesk204.pu.imgtec.org (192.168.91.13) by hhmail02.hh.imgtec.org (10.100.10.20) with Microsoft SMTP Server (TLS) id 14.3.294.0; Fri, 15 Sep 2017 06:57:26 +0100 From: To: Date: Fri, 15 Sep 2017 11:28:23 +0530 Message-ID: <1505455103-27837-1-git-send-email-kaustubh.raste@imgtec.com> X-Mailer: git-send-email 1.7.9.5 MIME-Version: 1.0 X-Originating-IP: [192.168.91.13] Subject: [FFmpeg-devel] [PATCH] avcodec/mips: Improve avc lpf msa functions X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Cc: Kaustubh Raste Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" From: Kaustubh Raste Optimize luma intra case by reducing conditional cases. Signed-off-by: Kaustubh Raste --- libavcodec/mips/h264dsp_msa.c | 428 +++++++++++++---------------------------- 1 file changed, 138 insertions(+), 290 deletions(-) diff --git a/libavcodec/mips/h264dsp_msa.c b/libavcodec/mips/h264dsp_msa.c index 16e4858..a17eacb 100644 --- a/libavcodec/mips/h264dsp_msa.c +++ b/libavcodec/mips/h264dsp_msa.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Parag Salasakar (Parag.Salasakar@imgtec.com) + * Copyright (c) 2015 - 2017 Parag Salasakar (Parag.Salasakar@imgtec.com) * * This file is part of FFmpeg. * @@ -644,96 +644,69 @@ static void avc_loopfilter_luma_intra_edge_hor_msa(uint8_t *data, uint8_t beta_in, uint32_t img_width) { - v16u8 p2_asub_p0, q2_asub_q0, p0_asub_q0; - v16u8 alpha, beta; - v16u8 is_less_than, is_less_than_beta, negate_is_less_than_beta; - v16u8 p2, p1, p0, q0, q1, q2; - v16u8 p3_org, p2_org, p1_org, p0_org, q0_org, q1_org, q2_org, q3_org; - v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r; - v8i16 p1_org_l, p0_org_l, q0_org_l, q1_org_l; - v8i16 p2_r = { 0 }; - v8i16 p1_r = { 0 }; - v8i16 p0_r = { 0 }; - v8i16 q0_r = { 0 }; - v8i16 q1_r = { 0 }; - v8i16 q2_r = { 0 }; - v8i16 p2_l = { 0 }; - v8i16 p1_l = { 0 }; - v8i16 p0_l = { 0 }; - v8i16 q0_l = { 0 }; - v8i16 q1_l = { 0 }; - v8i16 q2_l = { 0 }; - v16u8 tmp_flag; - v16i8 zero = { 0 }; - - alpha = (v16u8) __msa_fill_b(alpha_in); - beta = (v16u8) __msa_fill_b(beta_in); + v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0; + v16u8 is_less_than, is_less_than_beta, is_less_than_alpha; + v16u8 p1_org, p0_org, q0_org, q1_org; LD_UB4(data - (img_width << 1), img_width, p1_org, p0_org, q0_org, q1_org); - { - v16u8 p1_asub_p0, q1_asub_q0, is_less_than_alpha; - - p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org); - p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org); - q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org); + p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org); + p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org); + q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org); - is_less_than_alpha = (p0_asub_q0 < alpha); - is_less_than_beta = (p1_asub_p0 < beta); - is_less_than = is_less_than_beta & is_less_than_alpha; - is_less_than_beta = (q1_asub_q0 < beta); - is_less_than = is_less_than_beta & is_less_than; - } + is_less_than_alpha = (p0_asub_q0 < alpha_in); + is_less_than_beta = (p1_asub_p0 < beta_in); + is_less_than = is_less_than_beta & is_less_than_alpha; + is_less_than_beta = (q1_asub_q0 < beta_in); + is_less_than = is_less_than_beta & is_less_than; if (!__msa_test_bz_v(is_less_than)) { - q2_org = LD_UB(data + (2 * img_width)); - p3_org = LD_UB(data - (img_width << 2)); - p2_org = LD_UB(data - (3 * img_width)); + v16u8 p2_asub_p0, q2_asub_q0, p0, q0, negate_is_less_than_beta; + v8i16 p0_r = { 0 }; + v8i16 q0_r = { 0 }; + v8i16 p0_l = { 0 }; + v8i16 q0_l = { 0 }; + v16i8 zero = { 0 }; + v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r; + v8i16 p1_org_l, p0_org_l, q0_org_l, q1_org_l; + v16u8 q2_org = LD_UB(data + (2 * img_width)); + v16u8 p2_org = LD_UB(data - (3 * img_width)); + v16u8 tmp_flag = (v16u8)__msa_fill_b((alpha_in >> 2) + 2); UNPCK_UB_SH(p1_org, p1_org_r, p1_org_l); UNPCK_UB_SH(p0_org, p0_org_r, p0_org_l); UNPCK_UB_SH(q0_org, q0_org_r, q0_org_l); - tmp_flag = alpha >> 2; - tmp_flag = tmp_flag + 2; tmp_flag = (p0_asub_q0 < tmp_flag); p2_asub_p0 = __msa_asub_u_b(p2_org, p0_org); - is_less_than_beta = (p2_asub_p0 < beta); + is_less_than_beta = (p2_asub_p0 < beta_in); is_less_than_beta = is_less_than_beta & tmp_flag; negate_is_less_than_beta = __msa_xori_b(is_less_than_beta, 0xff); is_less_than_beta = is_less_than_beta & is_less_than; negate_is_less_than_beta = negate_is_less_than_beta & is_less_than; - { - v8u16 is_less_than_beta_l, is_less_than_beta_r; - - q1_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) q1_org); - - is_less_than_beta_r = - (v8u16) __msa_sldi_b((v16i8) is_less_than_beta, zero, 8); - if (!__msa_test_bz_v((v16u8) is_less_than_beta_r)) { - v8i16 p3_org_r; - - ILVR_B2_SH(zero, p3_org, zero, p2_org, p3_org_r, p2_r); - AVC_LPF_P0P1P2_OR_Q0Q1Q2(p3_org_r, p0_org_r, q0_org_r, p1_org_r, - p2_r, q1_org_r, p0_r, p1_r, p2_r); - } - - q1_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) q1_org); - is_less_than_beta_l = - (v8u16) __msa_sldi_b(zero, (v16i8) is_less_than_beta, 8); + q1_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) q1_org); + q1_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) q1_org); - if (!__msa_test_bz_v((v16u8) is_less_than_beta_l)) { - v8i16 p3_org_l; - - ILVL_B2_SH(zero, p3_org, zero, p2_org, p3_org_l, p2_l); - AVC_LPF_P0P1P2_OR_Q0Q1Q2(p3_org_l, p0_org_l, q0_org_l, p1_org_l, - p2_l, q1_org_l, p0_l, p1_l, p2_l); - } - } /* combine and store */ if (!__msa_test_bz_v(is_less_than_beta)) { + v8i16 p3_org_l, p3_org_r; + v16u8 p3_org = LD_UB(data - (img_width << 2)); + v16u8 p2, p1; + v8i16 p2_r = { 0 }; + v8i16 p2_l = { 0 }; + v8i16 p1_r = { 0 }; + v8i16 p1_l = { 0 }; + + ILVR_B2_SH(zero, p3_org, zero, p2_org, p3_org_r, p2_r); + AVC_LPF_P0P1P2_OR_Q0Q1Q2(p3_org_r, p0_org_r, q0_org_r, p1_org_r, + p2_r, q1_org_r, p0_r, p1_r, p2_r); + + ILVL_B2_SH(zero, p3_org, zero, p2_org, p3_org_l, p2_l); + AVC_LPF_P0P1P2_OR_Q0Q1Q2(p3_org_l, p0_org_l, q0_org_l, p1_org_l, + p2_l, q1_org_l, p0_l, p1_l, p2_l); + PCKEV_B3_UB(p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, p0, p1, p2); p0_org = __msa_bmnz_v(p0_org, p0, is_less_than_beta); @@ -743,62 +716,42 @@ static void avc_loopfilter_luma_intra_edge_hor_msa(uint8_t *data, ST_UB(p1_org, data - (2 * img_width)); ST_UB(p2_org, data - (3 * img_width)); } - { - v8u16 negate_is_less_than_beta_r, negate_is_less_than_beta_l; - negate_is_less_than_beta_r = - (v8u16) __msa_sldi_b((v16i8) negate_is_less_than_beta, zero, 8); - if (!__msa_test_bz_v((v16u8) negate_is_less_than_beta_r)) { - AVC_LPF_P0_OR_Q0(p0_org_r, q1_org_r, p1_org_r, p0_r); - } + AVC_LPF_P0_OR_Q0(p0_org_r, q1_org_r, p1_org_r, p0_r); + AVC_LPF_P0_OR_Q0(p0_org_l, q1_org_l, p1_org_l, p0_l); - negate_is_less_than_beta_l = - (v8u16) __msa_sldi_b(zero, (v16i8) negate_is_less_than_beta, 8); - if (!__msa_test_bz_v((v16u8) negate_is_less_than_beta_l)) { - AVC_LPF_P0_OR_Q0(p0_org_l, q1_org_l, p1_org_l, p0_l); - } - } /* combine */ - if (!__msa_test_bz_v(negate_is_less_than_beta)) { - p0 = (v16u8) __msa_pckev_b((v16i8) p0_l, (v16i8) p0_r); - p0_org = __msa_bmnz_v(p0_org, p0, negate_is_less_than_beta); - } + p0 = (v16u8) __msa_pckev_b((v16i8) p0_l, (v16i8) p0_r); + p0_org = __msa_bmnz_v(p0_org, p0, negate_is_less_than_beta); ST_UB(p0_org, data - img_width); /* if (tmpFlag && (unsigned)ABS(q2-q0) < thresholds->beta_in) */ - q3_org = LD_UB(data + (3 * img_width)); q2_asub_q0 = __msa_asub_u_b(q2_org, q0_org); - is_less_than_beta = (q2_asub_q0 < beta); + is_less_than_beta = (q2_asub_q0 < beta_in); is_less_than_beta = is_less_than_beta & tmp_flag; negate_is_less_than_beta = __msa_xori_b(is_less_than_beta, 0xff); is_less_than_beta = is_less_than_beta & is_less_than; negate_is_less_than_beta = negate_is_less_than_beta & is_less_than; - { - v8u16 is_less_than_beta_l, is_less_than_beta_r; - is_less_than_beta_r = - (v8u16) __msa_sldi_b((v16i8) is_less_than_beta, zero, 8); - if (!__msa_test_bz_v((v16u8) is_less_than_beta_r)) { - v8i16 q3_org_r; - - ILVR_B2_SH(zero, q3_org, zero, q2_org, q3_org_r, q2_r); - AVC_LPF_P0P1P2_OR_Q0Q1Q2(q3_org_r, q0_org_r, p0_org_r, q1_org_r, - q2_r, p1_org_r, q0_r, q1_r, q2_r); - } - is_less_than_beta_l = - (v8u16) __msa_sldi_b(zero, (v16i8) is_less_than_beta, 8); - if (!__msa_test_bz_v((v16u8) is_less_than_beta_l)) { - v8i16 q3_org_l; - - ILVL_B2_SH(zero, q3_org, zero, q2_org, q3_org_l, q2_l); - AVC_LPF_P0P1P2_OR_Q0Q1Q2(q3_org_l, q0_org_l, p0_org_l, q1_org_l, - q2_l, p1_org_l, q0_l, q1_l, q2_l); - } - } - /* combine and store */ if (!__msa_test_bz_v(is_less_than_beta)) { + v8i16 q3_org_r, q3_org_l; + v16u8 q3_org = LD_UB(data + (3 * img_width)); + v16u8 q1, q2; + v8i16 q2_r = { 0 }; + v8i16 q2_l = { 0 }; + v8i16 q1_r = { 0 }; + v8i16 q1_l = { 0 }; + + ILVR_B2_SH(zero, q3_org, zero, q2_org, q3_org_r, q2_r); + AVC_LPF_P0P1P2_OR_Q0Q1Q2(q3_org_r, q0_org_r, p0_org_r, q1_org_r, + q2_r, p1_org_r, q0_r, q1_r, q2_r); + + ILVL_B2_SH(zero, q3_org, zero, q2_org, q3_org_l, q2_l); + AVC_LPF_P0P1P2_OR_Q0Q1Q2(q3_org_l, q0_org_l, p0_org_l, q1_org_l, + q2_l, p1_org_l, q0_l, q1_l, q2_l); + PCKEV_B3_UB(q0_l, q0_r, q1_l, q1_r, q2_l, q2_r, q0, q1, q2); q0_org = __msa_bmnz_v(q0_org, q0, is_less_than_beta); q1_org = __msa_bmnz_v(q1_org, q1, is_less_than_beta); @@ -807,25 +760,14 @@ static void avc_loopfilter_luma_intra_edge_hor_msa(uint8_t *data, ST_UB(q1_org, data + img_width); ST_UB(q2_org, data + 2 * img_width); } - { - v8u16 negate_is_less_than_beta_r, negate_is_less_than_beta_l; - negate_is_less_than_beta_r = - (v8u16) __msa_sldi_b((v16i8) negate_is_less_than_beta, zero, 8); - if (!__msa_test_bz_v((v16u8) negate_is_less_than_beta_r)) { - AVC_LPF_P0_OR_Q0(q0_org_r, p1_org_r, q1_org_r, q0_r); - } - negate_is_less_than_beta_l = - (v8u16) __msa_sldi_b(zero, (v16i8) negate_is_less_than_beta, 8); - if (!__msa_test_bz_v((v16u8) negate_is_less_than_beta_l)) { - AVC_LPF_P0_OR_Q0(q0_org_l, p1_org_l, q1_org_l, q0_l); - } - } + AVC_LPF_P0_OR_Q0(q0_org_r, p1_org_r, q1_org_r, q0_r); + AVC_LPF_P0_OR_Q0(q0_org_l, p1_org_l, q1_org_l, q0_l); + /* combine */ - if (!__msa_test_bz_v(negate_is_less_than_beta)) { - q0 = (v16u8) __msa_pckev_b((v16i8) q0_l, (v16i8) q0_r); - q0_org = __msa_bmnz_v(q0_org, q0, negate_is_less_than_beta); - } + q0 = (v16u8) __msa_pckev_b((v16i8) q0_l, (v16i8) q0_r); + q0_org = __msa_bmnz_v(q0_org, q0, negate_is_less_than_beta); + ST_UB(q0_org, data); } } @@ -835,29 +777,13 @@ static void avc_loopfilter_luma_intra_edge_ver_msa(uint8_t *data, uint8_t beta_in, uint32_t img_width) { - uint8_t *src; + uint8_t *src = data - 4; v16u8 alpha, beta, p0_asub_q0; - v16u8 is_less_than_alpha, is_less_than; - v16u8 is_less_than_beta, negate_is_less_than_beta; + v16u8 is_less_than_alpha, is_less_than, is_less_than_beta; v16u8 p3_org, p2_org, p1_org, p0_org, q0_org, q1_org, q2_org, q3_org; - v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r; - v8i16 p1_org_l, p0_org_l, q0_org_l, q1_org_l; - v8i16 p2_r = { 0 }; - v8i16 p1_r = { 0 }; - v8i16 p0_r = { 0 }; - v8i16 q0_r = { 0 }; - v8i16 q1_r = { 0 }; - v8i16 q2_r = { 0 }; - v8i16 p2_l = { 0 }; - v8i16 p1_l = { 0 }; - v8i16 p0_l = { 0 }; - v8i16 q0_l = { 0 }; - v8i16 q1_l = { 0 }; - v8i16 q2_l = { 0 }; - v16i8 zero = { 0 }; - v16u8 tmp_flag; + v16u8 p1_asub_p0, q1_asub_q0; + - src = data - 4; { v16u8 row0, row1, row2, row3, row4, row5, row6, row7; v16u8 row8, row9, row10, row11, row12, row13, row14, row15; @@ -873,119 +799,77 @@ static void avc_loopfilter_luma_intra_edge_ver_msa(uint8_t *data, p3_org, p2_org, p1_org, p0_org, q0_org, q1_org, q2_org, q3_org); } - UNPCK_UB_SH(p1_org, p1_org_r, p1_org_l); - UNPCK_UB_SH(p0_org, p0_org_r, p0_org_l); - UNPCK_UB_SH(q0_org, q0_org_r, q0_org_l); - UNPCK_UB_SH(q1_org, q1_org_r, q1_org_l); - - /* if ( ((unsigned)ABS(p0-q0) < thresholds->alpha_in) && - ((unsigned)ABS(p1-p0) < thresholds->beta_in) && - ((unsigned)ABS(q1-q0) < thresholds->beta_in) ) */ - { - v16u8 p1_asub_p0, q1_asub_q0; - p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org); - p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org); - q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org); + p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org); + p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org); + q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org); - alpha = (v16u8) __msa_fill_b(alpha_in); - beta = (v16u8) __msa_fill_b(beta_in); + alpha = (v16u8) __msa_fill_b(alpha_in); + beta = (v16u8) __msa_fill_b(beta_in); - is_less_than_alpha = (p0_asub_q0 < alpha); - is_less_than_beta = (p1_asub_p0 < beta); - is_less_than = is_less_than_beta & is_less_than_alpha; - is_less_than_beta = (q1_asub_q0 < beta); - is_less_than = is_less_than_beta & is_less_than; - } + is_less_than_alpha = (p0_asub_q0 < alpha); + is_less_than_beta = (p1_asub_p0 < beta); + is_less_than = is_less_than_beta & is_less_than_alpha; + is_less_than_beta = (q1_asub_q0 < beta); + is_less_than = is_less_than_beta & is_less_than; if (!__msa_test_bz_v(is_less_than)) { + v8i16 p0_r = { 0 }; + v8i16 q0_r = { 0 }; + v8i16 p0_l = { 0 }; + v8i16 q0_l = { 0 }; + v16i8 zero = { 0 }; + v16u8 tmp_flag, p0, q0, p2_asub_p0, q2_asub_q0; + v16u8 negate_is_less_than_beta; + v8i16 p1_org_r, p0_org_r, q0_org_r, q1_org_r; + v8i16 p1_org_l, p0_org_l, q0_org_l, q1_org_l; + + UNPCK_UB_SH(p1_org, p1_org_r, p1_org_l); + UNPCK_UB_SH(p0_org, p0_org_r, p0_org_l); + UNPCK_UB_SH(q0_org, q0_org_r, q0_org_l); + UNPCK_UB_SH(q1_org, q1_org_r, q1_org_l); + tmp_flag = alpha >> 2; tmp_flag = tmp_flag + 2; tmp_flag = (p0_asub_q0 < tmp_flag); - { - v16u8 p2_asub_p0; - - p2_asub_p0 = __msa_asub_u_b(p2_org, p0_org); - is_less_than_beta = (p2_asub_p0 < beta); - } + p2_asub_p0 = __msa_asub_u_b(p2_org, p0_org); + is_less_than_beta = (p2_asub_p0 < beta); is_less_than_beta = tmp_flag & is_less_than_beta; negate_is_less_than_beta = __msa_xori_b(is_less_than_beta, 0xff); is_less_than_beta = is_less_than_beta & is_less_than; negate_is_less_than_beta = negate_is_less_than_beta & is_less_than; - /* right */ - { - v16u8 is_less_than_beta_r; - - is_less_than_beta_r = - (v16u8) __msa_sldi_b((v16i8) is_less_than_beta, zero, 8); - if (!__msa_test_bz_v(is_less_than_beta_r)) { - v8i16 p3_org_r; - - ILVR_B2_SH(zero, p3_org, zero, p2_org, p3_org_r, p2_r); - AVC_LPF_P0P1P2_OR_Q0Q1Q2(p3_org_r, p0_org_r, q0_org_r, p1_org_r, - p2_r, q1_org_r, p0_r, p1_r, p2_r); - } - } - /* left */ - { - v16u8 is_less_than_beta_l; - - is_less_than_beta_l = - (v16u8) __msa_sldi_b(zero, (v16i8) is_less_than_beta, 8); - if (!__msa_test_bz_v(is_less_than_beta_l)) { - v8i16 p3_org_l; - - ILVL_B2_SH(zero, p3_org, zero, p2_org, p3_org_l, p2_l); - AVC_LPF_P0P1P2_OR_Q0Q1Q2(p3_org_l, p0_org_l, q0_org_l, p1_org_l, - p2_l, q1_org_l, p0_l, p1_l, p2_l); - } - } - /* combine and store */ if (!__msa_test_bz_v(is_less_than_beta)) { - v16u8 p0, p2, p1; + v16u8 p2, p1; + v8i16 p3_org_r, p3_org_l; + v8i16 p2_l = { 0 }; + v8i16 p2_r = { 0 }; + v8i16 p1_l = { 0 }; + v8i16 p1_r = { 0 }; + + ILVR_B2_SH(zero, p3_org, zero, p2_org, p3_org_r, p2_r); + AVC_LPF_P0P1P2_OR_Q0Q1Q2(p3_org_r, p0_org_r, q0_org_r, p1_org_r, + p2_r, q1_org_r, p0_r, p1_r, p2_r); + + ILVL_B2_SH(zero, p3_org, zero, p2_org, p3_org_l, p2_l); + AVC_LPF_P0P1P2_OR_Q0Q1Q2(p3_org_l, p0_org_l, q0_org_l, p1_org_l, + p2_l, q1_org_l, p0_l, p1_l, p2_l); PCKEV_B3_UB(p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, p0, p1, p2); p0_org = __msa_bmnz_v(p0_org, p0, is_less_than_beta); p1_org = __msa_bmnz_v(p1_org, p1, is_less_than_beta); p2_org = __msa_bmnz_v(p2_org, p2, is_less_than_beta); } - /* right */ - { - v16u8 negate_is_less_than_beta_r; - - negate_is_less_than_beta_r = - (v16u8) __msa_sldi_b((v16i8) negate_is_less_than_beta, zero, 8); - - if (!__msa_test_bz_v(negate_is_less_than_beta_r)) { - AVC_LPF_P0_OR_Q0(p0_org_r, q1_org_r, p1_org_r, p0_r); - } - } - /* left */ - { - v16u8 negate_is_less_than_beta_l; - negate_is_less_than_beta_l = - (v16u8) __msa_sldi_b(zero, (v16i8) negate_is_less_than_beta, 8); - if (!__msa_test_bz_v(negate_is_less_than_beta_l)) { - AVC_LPF_P0_OR_Q0(p0_org_l, q1_org_l, p1_org_l, p0_l); - } - } + AVC_LPF_P0_OR_Q0(p0_org_r, q1_org_r, p1_org_r, p0_r); + AVC_LPF_P0_OR_Q0(p0_org_l, q1_org_l, p1_org_l, p0_l); - if (!__msa_test_bz_v(negate_is_less_than_beta)) { - v16u8 p0; + p0 = (v16u8) __msa_pckev_b((v16i8) p0_l, (v16i8) p0_r); + p0_org = __msa_bmnz_v(p0_org, p0, negate_is_less_than_beta); - p0 = (v16u8) __msa_pckev_b((v16i8) p0_l, (v16i8) p0_r); - p0_org = __msa_bmnz_v(p0_org, p0, negate_is_less_than_beta); - } - - { - v16u8 q2_asub_q0; - - q2_asub_q0 = __msa_asub_u_b(q2_org, q0_org); - is_less_than_beta = (q2_asub_q0 < beta); - } + q2_asub_q0 = __msa_asub_u_b(q2_org, q0_org); + is_less_than_beta = (q2_asub_q0 < beta); is_less_than_beta = is_less_than_beta & tmp_flag; negate_is_less_than_beta = __msa_xori_b(is_less_than_beta, 0xff); @@ -993,37 +877,21 @@ static void avc_loopfilter_luma_intra_edge_ver_msa(uint8_t *data, is_less_than_beta = is_less_than_beta & is_less_than; negate_is_less_than_beta = negate_is_less_than_beta & is_less_than; - /* right */ - { - v16u8 is_less_than_beta_r; - - is_less_than_beta_r = - (v16u8) __msa_sldi_b((v16i8) is_less_than_beta, zero, 8); - if (!__msa_test_bz_v(is_less_than_beta_r)) { - v8i16 q3_org_r; - - ILVR_B2_SH(zero, q3_org, zero, q2_org, q3_org_r, q2_r); - AVC_LPF_P0P1P2_OR_Q0Q1Q2(q3_org_r, q0_org_r, p0_org_r, q1_org_r, - q2_r, p1_org_r, q0_r, q1_r, q2_r); - } - } - /* left */ - { - v16u8 is_less_than_beta_l; + if (!__msa_test_bz_v(is_less_than_beta)) { + v16u8 q1, q2; + v8i16 q3_org_r, q3_org_l; + v8i16 q1_l = { 0 }; + v8i16 q1_r = { 0 }; + v8i16 q2_l = { 0 }; + v8i16 q2_r = { 0 }; - is_less_than_beta_l = - (v16u8) __msa_sldi_b(zero, (v16i8) is_less_than_beta, 8); - if (!__msa_test_bz_v(is_less_than_beta_l)) { - v8i16 q3_org_l; + ILVR_B2_SH(zero, q3_org, zero, q2_org, q3_org_r, q2_r); + AVC_LPF_P0P1P2_OR_Q0Q1Q2(q3_org_r, q0_org_r, p0_org_r, q1_org_r, + q2_r, p1_org_r, q0_r, q1_r, q2_r); - ILVL_B2_SH(zero, q3_org, zero, q2_org, q3_org_l, q2_l); - AVC_LPF_P0P1P2_OR_Q0Q1Q2(q3_org_l, q0_org_l, p0_org_l, q1_org_l, - q2_l, p1_org_l, q0_l, q1_l, q2_l); - } - } - /* combine and store */ - if (!__msa_test_bz_v(is_less_than_beta)) { - v16u8 q0, q1, q2; + ILVL_B2_SH(zero, q3_org, zero, q2_org, q3_org_l, q2_l); + AVC_LPF_P0P1P2_OR_Q0Q1Q2(q3_org_l, q0_org_l, p0_org_l, q1_org_l, + q2_l, p1_org_l, q0_l, q1_l, q2_l); PCKEV_B3_UB(q0_l, q0_r, q1_l, q1_r, q2_l, q2_r, q0, q1, q2); q0_org = __msa_bmnz_v(q0_org, q0, is_less_than_beta); @@ -1031,33 +899,12 @@ static void avc_loopfilter_luma_intra_edge_ver_msa(uint8_t *data, q2_org = __msa_bmnz_v(q2_org, q2, is_less_than_beta); } - /* right */ - { - v16u8 negate_is_less_than_beta_r; + AVC_LPF_P0_OR_Q0(q0_org_r, p1_org_r, q1_org_r, q0_r); + AVC_LPF_P0_OR_Q0(q0_org_l, p1_org_l, q1_org_l, q0_l); - negate_is_less_than_beta_r = - (v16u8) __msa_sldi_b((v16i8) negate_is_less_than_beta, zero, 8); - if (!__msa_test_bz_v(negate_is_less_than_beta_r)) { - AVC_LPF_P0_OR_Q0(q0_org_r, p1_org_r, q1_org_r, q0_r); - } - } - /* left */ - { - v16u8 negate_is_less_than_beta_l; + q0 = (v16u8) __msa_pckev_b((v16i8) q0_l, (v16i8) q0_r); + q0_org = __msa_bmnz_v(q0_org, q0, negate_is_less_than_beta); - negate_is_less_than_beta_l = - (v16u8) __msa_sldi_b(zero, (v16i8) negate_is_less_than_beta, 8); - if (!__msa_test_bz_v(negate_is_less_than_beta_l)) { - AVC_LPF_P0_OR_Q0(q0_org_l, p1_org_l, q1_org_l, q0_l); - } - } - if (!__msa_test_bz_v(negate_is_less_than_beta)) { - v16u8 q0; - - q0 = (v16u8) __msa_pckev_b((v16i8) q0_l, (v16i8) q0_r); - q0_org = __msa_bmnz_v(q0_org, q0, negate_is_less_than_beta); - } - } { v8i16 tp0, tp1, tp2, tp3, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; @@ -1082,6 +929,7 @@ static void avc_loopfilter_luma_intra_edge_ver_msa(uint8_t *data, ST4x4_UB(tmp7, tmp7, 0, 1, 2, 3, src, img_width); ST2x4_UB(tmp5, 4, src + 4, img_width); } + } } static void avc_h_loop_filter_luma_mbaff_intra_msa(uint8_t *src, int32_t stride,