From patchwork Sat Oct 14 08:40:10 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Logan.Lyu" X-Patchwork-Id: 44254 Delivered-To: ffmpegpatchwork2@gmail.com Received: by 2002:a05:6a20:4b15:b0:15d:8365:d4b8 with SMTP id fp21csp1475122pzb; Sat, 14 Oct 2023 01:40:30 -0700 (PDT) X-Google-Smtp-Source: AGHT+IHQJbg6ZqF10fPCrvQUyjTiBk9aeSg3WCo40EAPg2Tswzttq8+ZtmFNZhlOfYrajwGNOyTc X-Received: by 2002:a05:6402:1cb1:b0:53b:3225:93c2 with SMTP id cz17-20020a0564021cb100b0053b322593c2mr18557800edb.8.1697272829634; Sat, 14 Oct 2023 01:40:29 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1697272829; cv=none; d=google.com; s=arc-20160816; b=XMrTkcA/jupCG3hCRDb7RkuF4JqGFLiKujB8Mx4HkpSS7aa7NfToACJftZ57kQRF+P 0emxK79MBQ19tTN8smKN6pSp4NQPHg53zkn4Bcy6NvgRaOJMyp4yoYdMKlE3ZCmAbUwf laqZeK2RiXWDZqhmHJlKK5MfJmgV/qJedWxXSBZZpY+TPNhsuIJVt4Xm/DtdbzE5gCSH OhPCoawqs4jyzscUjBZei5Irn/ht/gaX61/+FZCO/J3tifYz4ax6uJCNA1XSoZQujBuH izrkO+cof3BKIrPX+zk6xyBfdFHPvUaNvFU+6MfKYlnJWF83/5EVvcz+NZX/b2t2D/hq cUSQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=sender:errors-to:reply-to:list-subscribe:list-help:list-post :list-archive:list-unsubscribe:list-id:precedence:subject :organization:to:from:user-agent:mime-version:date:message-id :delivered-to; bh=KBBlge8LRYMREEmvq2K++rzDgqfmSkcaH+PExsAMDng=; fh=YOA8vD9MJZuwZ71F/05pj6KdCjf6jQRmzLS+CATXUQk=; b=UxS1HE8j8sR4DNlmCbCR/c14VRkKKZE3C3hNqvLhUDlyYDDq4rT43CnFh7NJyYF+Ez JG4XLIs7cVEmV+b3joNMp98PZVMCIAo2V+uBnzLV8XXxYvlmJ/YDCJzkFcTFDEuXPn1g BE7h1yhgUFrkhw82hWZaTjK3T12OnwUBp0KWdVSRMyzmT9skP2wxRp/lsfMSUpPzXW+m pP/J2skGd5H0D7bSGG0tv/QK9HMnfXOSi4ygWdvtUz63hvm8twKkBH79+zTRCQ3sGK0O Q6UPTQ1lhsWcA/jZcnL60DIHNyUQsI4EFHBJ3V60s1j8GvPYK02ak7JPUOYr5cgweYOa 1y7Q== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Return-Path: Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org. [79.124.17.100]) by mx.google.com with ESMTP id j17-20020a05640211d100b0053e1111fc1esi2706822edw.133.2023.10.14.01.40.29; Sat, 14 Oct 2023 01:40:29 -0700 (PDT) Received-SPF: pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) client-ip=79.124.17.100; Authentication-Results: mx.google.com; spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org designates 79.124.17.100 as permitted sender) smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 66BA768C95D; Sat, 14 Oct 2023 11:40:23 +0300 (EEST) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from smtp-my3-01p4.yunyou.top (smtp-my3-01p4.yunyou.top [60.247.169.4]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id 5E4B168C975 for ; Sat, 14 Oct 2023 11:40:14 +0300 (EEST) Received: from [192.168.15.106] (unknown [125.121.27.199]) by smtp-my-01.yunyou.top (WestCloudMail) with ESMTPA id 7418F1020E1 for ; Sat, 14 Oct 2023 16:40:10 +0800 (CST) Message-ID: Date: Sat, 14 Oct 2023 16:40:10 +0800 MIME-Version: 1.0 User-Agent: Mozilla Thunderbird From: "Logan.Lyu" To: ffmpeg-devel@ffmpeg.org Organization: myais Subject: [FFmpeg-devel] (no subject) X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" X-TUID: eMKZ44qLzvym checkasm bench: put_hevc_qpel_v4_8_c: 138.1 put_hevc_qpel_v4_8_neon: 41.1 put_hevc_qpel_v6_8_c: 276.6 put_hevc_qpel_v6_8_neon: 60.9 put_hevc_qpel_v8_8_c: 478.9 put_hevc_qpel_v8_8_neon: 72.9 put_hevc_qpel_v12_8_c: 1072.6 put_hevc_qpel_v12_8_neon: 203.9 put_hevc_qpel_v16_8_c: 1852.1 put_hevc_qpel_v16_8_neon: 264.1 put_hevc_qpel_v24_8_c: 4137.6 put_hevc_qpel_v24_8_neon: 586.9 put_hevc_qpel_v32_8_c: 7579.1 put_hevc_qpel_v32_8_neon: 1036.6 put_hevc_qpel_v48_8_c: 16355.6 put_hevc_qpel_v48_8_neon: 2326.4 put_hevc_qpel_v64_8_c: 33545.1 put_hevc_qpel_v64_8_neon: 4126.4 Co-Authored-By: J. Dekker Signed-off-by: Logan Lyu --- libavcodec/aarch64/hevcdsp_init_aarch64.c | 5 + libavcodec/aarch64/hevcdsp_qpel_neon.S | 347 +++++++++++++++++++--- 2 files changed, 314 insertions(+), 38 deletions(-) v17, v19, v21, v23, v25, v27, v29 + b.hi 1b +.endm + .macro put_hevc type .ifc \type, qpel // void put_hevc_qpel_h(int16_t *dst, @@ -558,6 +596,277 @@ put_hevc qpel put_hevc qpel_uni put_hevc qpel_bi +function ff_hevc_put_hevc_qpel_v4_8_neon, export=1 + load_qpel_filterb x5, x4 + sub x1, x1, x2, lsl #1 + mov x9, #(MAX_PB_SIZE * 2) + sub x1, x1, x2 + ldr s16, [x1] + ldr s17, [x1, x2] + add x1, x1, x2, lsl #1 + ldr s18, [x1] + ldr s19, [x1, x2] + add x1, x1, x2, lsl #1 + ldr s20, [x1] + ldr s21, [x1, x2] + add x1, x1, x2, lsl #1 + ldr s22, [x1] + add x1, x1, x2 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().s}[0], [x1], x2 + movi v24.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + st1 {v24.4h}, [x0], x9 + subs w3, w3, #1 + b.eq 2f +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_v6_8_neon, export=1 + load_qpel_filterb x5, x4 + sub x1, x1, x2, lsl #1 + mov x9, #(MAX_PB_SIZE * 2 - 8) + sub x1, x1, x2 + ldr d16, [x1] + ldr d17, [x1, x2] + add x1, x1, x2, lsl #1 + ldr d18, [x1] + ldr d19, [x1, x2] + add x1, x1, x2, lsl #1 + ldr d20, [x1] + ldr d21, [x1, x2] + add x1, x1, x2, lsl #1 + ldr d22, [x1] + add x1, x1, x2 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().8b}, [x1], x2 + movi v24.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + st1 {v24.4h}, [x0], #8 + st1 {v24.s}[2], [x0], x9 + subs w3, w3, #1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_v8_8_neon, export=1 + load_qpel_filterb x5, x4 + sub x1, x1, x2, lsl #1 + mov x9, #(MAX_PB_SIZE * 2) + sub x1, x1, x2 + ldr d16, [x1] + ldr d17, [x1, x2] + add x1, x1, x2, lsl #1 + ldr d18, [x1] + ldr d19, [x1, x2] + add x1, x1, x2, lsl #1 + ldr d20, [x1] + ldr d21, [x1, x2] + add x1, x1, x2, lsl #1 + ldr d22, [x1] + add x1, x1, x2 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().8b}, [x1], x2 + movi v24.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + st1 {v24.8h}, [x0], x9 + subs w3, w3, #1 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_v12_8_neon, export=1 + load_qpel_filterb x5, x4 + sub x1, x1, x2, lsl #1 + mov x9, #(MAX_PB_SIZE * 2 - 16) + sub x1, x1, x2 + ldr q16, [x1] + ldr q17, [x1, x2] + add x1, x1, x2, lsl #1 + ldr q18, [x1] + ldr q19, [x1, x2] + add x1, x1, x2, lsl #1 + ldr q20, [x1] + ldr q21, [x1, x2] + add x1, x1, x2, lsl #1 + ldr q22, [x1] + add x1, x1, x2 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().16b}, [x1], x2 + movi v24.8h, #0 + movi v25.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb2 v25, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + st1 {v24.8h}, [x0], #16 + subs w3, w3, #1 + st1 {v25.4h}, [x0], x9 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +function ff_hevc_put_hevc_qpel_v16_8_neon, export=1 + load_qpel_filterb x5, x4 + sub x1, x1, x2, lsl #1 + mov x9, #(MAX_PB_SIZE * 2) + sub x1, x1, x2 + ldr q16, [x1] + ldr q17, [x1, x2] + add x1, x1, x2, lsl #1 + ldr q18, [x1] + ldr q19, [x1, x2] + add x1, x1, x2, lsl #1 + ldr q20, [x1] + ldr q21, [x1, x2] + add x1, x1, x2, lsl #1 + ldr q22, [x1] + add x1, x1, x2 +.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7 + ld1 {\tmp\().16b}, [x1], x2 + movi v24.8h, #0 + movi v25.8h, #0 + calc_qpelb v24, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb2 v25, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + subs w3, w3, #1 + st1 {v24.8h, v25.8h}, [x0], x9 +.endm +1: calc_all +.purgem calc +2: ret +endfunc + +// todo: reads #32 bytes +function ff_hevc_put_hevc_qpel_v24_8_neon, export=1 + sub sp, sp, #32 + st1 {v8.8b, v9.8b, v10.8b}, [sp] + load_qpel_filterb x5, x4 + sub x1, x1, x2, lsl #1 + sub x1, x1, x2 + mov x9, #(MAX_PB_SIZE * 2) + ld1 {v16.16b, v17.16b}, [x1], x2 + ld1 {v18.16b, v19.16b}, [x1], x2 + ld1 {v20.16b, v21.16b}, [x1], x2 + ld1 {v22.16b, v23.16b}, [x1], x2 + ld1 {v24.16b, v25.16b}, [x1], x2 + ld1 {v26.16b, v27.16b}, [x1], x2 + ld1 {v28.16b, v29.16b}, [x1], x2 +.macro calc tmp0, tmp1, src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11, src12, src13, src14, src15 + ld1 {\tmp0\().16b, \tmp1\().16b}, [x1], x2 + movi v8.8h, #0 + movi v9.8h, #0 + movi v10.8h, #0 + calc_qpelb v8, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb2 v9, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb v10, \src8, \src9, \src10, \src11, \src12, \src13, \src14, \src15 + subs w3, w3, #1 + st1 {v8.8h, v9.8h, v10.8h}, [x0], x9 +.endm +1: calc_all2 +.purgem calc +2: ld1 {v8.8b, v9.8b, v10.8b}, [sp] + add sp, sp, #32 + ret +endfunc + +function ff_hevc_put_hevc_qpel_v32_8_neon, export=1 + sub sp, sp, #32 + st1 {v8.8b-v11.8b}, [sp] + load_qpel_filterb x5, x4 + sub x1, x1, x2, lsl #1 + mov x9, #(MAX_PB_SIZE * 2) + sub x1, x1, x2 + ld1 {v16.16b, v17.16b}, [x1], x2 + ld1 {v18.16b, v19.16b}, [x1], x2 + ld1 {v20.16b, v21.16b}, [x1], x2 + ld1 {v22.16b, v23.16b}, [x1], x2 + ld1 {v24.16b, v25.16b}, [x1], x2 + ld1 {v26.16b, v27.16b}, [x1], x2 + ld1 {v28.16b, v29.16b}, [x1], x2 +.macro calc tmp0, tmp1, src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11, src12, src13, src14, src15 + ld1 {\tmp0\().16b, \tmp1\().16b}, [x1], x2 + movi v8.8h, #0 + movi v9.8h, #0 + movi v10.8h, #0 + movi v11.8h, #0 + calc_qpelb v8, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb2 v9, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb v10, \src8, \src9, \src10, \src11, \src12, \src13, \src14, \src15 + calc_qpelb2 v11, \src8, \src9, \src10, \src11, \src12, \src13, \src14, \src15 + subs w3, w3, #1 + st1 {v8.8h-v11.8h}, [x0], x9 +.endm +1: calc_all2 +.purgem calc +2: ld1 {v8.8b-v11.8b}, [sp], #32 + ret +endfunc + +function ff_hevc_put_hevc_qpel_v48_8_neon, export=1 + stp x2, x3, [sp, #-48]! + stp x0, x1, [sp, #16] + stp x5, x30, [sp, #32] + bl X(ff_hevc_put_hevc_qpel_v24_8_neon) + ldp x2, x3, [sp] + ldp x0, x1, [sp, #16] + ldr x5, [sp, #32] + add sp, sp, #32 + add x0, x0, #48 + add x1, x1, #24 + bl X(ff_hevc_put_hevc_qpel_v24_8_neon) + ldr x30, [sp, #8] + add sp, sp, #16 + ret +endfunc + +function ff_hevc_put_hevc_qpel_v64_8_neon, export=1 + sub sp, sp, #32 + st1 {v8.8b-v11.8b}, [sp] + load_qpel_filterb x5, x4 + sub x1, x1, x2, lsl #1 + sub x1, x1, x2 + mov x9, #(MAX_PB_SIZE * 2) +0: mov x8, x1 // src + ld1 {v16.16b, v17.16b}, [x8], x2 + mov w11, w3 // height + ld1 {v18.16b, v19.16b}, [x8], x2 + mov x10, x0 // dst + ld1 {v20.16b, v21.16b}, [x8], x2 + ld1 {v22.16b, v23.16b}, [x8], x2 + ld1 {v24.16b, v25.16b}, [x8], x2 + ld1 {v26.16b, v27.16b}, [x8], x2 + ld1 {v28.16b, v29.16b}, [x8], x2 +.macro calc tmp0, tmp1, src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11, src12, src13, src14, src15 + ld1 {\tmp0\().16b, \tmp1\().16b}, [x8], x2 + movi v8.8h, #0 + movi v9.8h, #0 + movi v10.8h, #0 + movi v11.8h, #0 + calc_qpelb v8, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb2 v9, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7 + calc_qpelb v10, \src8, \src9, \src10, \src11, \src12, \src13, \src14, \src15 + calc_qpelb2 v11, \src8, \src9, \src10, \src11, \src12, \src13, \src14, \src15 + subs x11, x11, #1 + st1 {v8.8h-v11.8h}, [x10], x9 +.endm +1: calc_all2 +.purgem calc +2: add x0, x0, #64 + add x1, x1, #32 + subs w6, w6, #32 + b.hi 0b + ld1 {v8.8b-v11.8b}, [sp], #32 + ret +endfunc + + function ff_hevc_put_hevc_pel_uni_pixels4_8_neon, export=1 1: ldr s0, [x2] @@ -663,25 +972,6 @@ function ff_hevc_put_hevc_pel_uni_pixels64_8_neon, export=1 ret endfunc -.macro calc_all - calc v23, v16, v17, v18, v19, v20, v21, v22, v23 - b.eq 2f - calc v16, v17, v18, v19, v20, v21, v22, v23, v16 - b.eq 2f - calc v17, v18, v19, v20, v21, v22, v23, v16, v17 - b.eq 2f - calc v18, v19, v20, v21, v22, v23, v16, v17, v18 - b.eq 2f - calc v19, v20, v21, v22, v23, v16, v17, v18, v19 - b.eq 2f - calc v20, v21, v22, v23, v16, v17, v18, v19, v20 - b.eq 2f - calc v21, v22, v23, v16, v17, v18, v19, v20, v21 - b.eq 2f - calc v22, v23, v16, v17, v18, v19, v20, v21, v22 - b.hi 1b -.endm - function ff_hevc_put_hevc_qpel_uni_v4_8_neon, export=1 load_qpel_filterb x6, x5 sub x2, x2, x3, lsl #1 @@ -1559,25 +1849,6 @@ endfunc #if HAVE_I8MM -.macro calc_all2 - calc v30, v31, v16, v18, v20, v22, v24, v26, v28, v30, v17, v19, v21, v23, v25, v27, v29, v31 - b.eq 2f - calc v16, v17, v18, v20, v22, v24, v26, v28, v30, v16, v19, v21, v23, v25, v27, v29, v31, v17 - b.eq 2f - calc v18, v19, v20, v22, v24, v26, v28, v30, v16, v18, v21, v23, v25, v27, v29, v31, v17, v19 - b.eq 2f - calc v20, v21, v22, v24, v26, v28, v30, v16, v18, v20, v23, v25, v27, v29, v31, v17, v19, v21 - b.eq 2f - calc v22, v23, v24, v26, v28, v30, v16, v18, v20, v22, v25, v27, v29, v31, v17, v19, v21, v23 - b.eq 2f - calc v24, v25, v26, v28, v30, v16, v18, v20, v22, v24, v27, v29, v31, v17, v19, v21, v23, v25 - b.eq 2f - calc v26, v27, v28, v30, v16, v18, v20, v22, v24, v26, v29, v31, v17, v19, v21, v23, v25, v27 - b.eq 2f - calc v28, v29, v30, v16, v18, v20, v22, v24, v26, v28, v31, v17, v19, v21, v23, v25, v27, v29 - b.hi 1b -.endm - function ff_hevc_put_hevc_qpel_uni_hv4_8_neon_i8mm, export=1 add w10, w4, #7 lsl x10, x10, #7 diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c b/libavcodec/aarch64/hevcdsp_init_aarch64.c index e9a341ecb9..f6b4c31d17 100644 --- a/libavcodec/aarch64/hevcdsp_init_aarch64.c +++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c @@ -204,6 +204,10 @@ NEON8_FNPROTO(qpel_h, (int16_t *dst, const uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my, int width), _i8mm); +NEON8_FNPROTO(qpel_v, (int16_t *dst, + const uint8_t *src, ptrdiff_t srcstride, + int height, intptr_t mx, intptr_t my, int width),); + NEON8_FNPROTO(qpel_uni_v, (uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width),); @@ -315,6 +319,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth) NEON8_FNASSIGN(c->put_hevc_epel, 0, 0, pel_pixels,); NEON8_FNASSIGN(c->put_hevc_epel, 1, 0, epel_v,); NEON8_FNASSIGN(c->put_hevc_qpel, 0, 0, pel_pixels,); + NEON8_FNASSIGN(c->put_hevc_qpel, 1, 0, qpel_v,); NEON8_FNASSIGN(c->put_hevc_epel_uni, 0, 0, pel_uni_pixels,); NEON8_FNASSIGN(c->put_hevc_epel_uni, 1, 0, epel_uni_v,); NEON8_FNASSIGN(c->put_hevc_qpel_uni, 0, 0, pel_uni_pixels,); diff --git a/libavcodec/aarch64/hevcdsp_qpel_neon.S b/libavcodec/aarch64/hevcdsp_qpel_neon.S index 4132d7a8a9..eff70d70a4 100644 --- a/libavcodec/aarch64/hevcdsp_qpel_neon.S +++ b/libavcodec/aarch64/hevcdsp_qpel_neon.S @@ -112,6 +112,44 @@ endconst .endif .endm +.macro calc_all + calc v23, v16, v17, v18, v19, v20, v21, v22, v23 + b.eq 2f + calc v16, v17, v18, v19, v20, v21, v22, v23, v16 + b.eq 2f + calc v17, v18, v19, v20, v21, v22, v23, v16, v17 + b.eq 2f + calc v18, v19, v20, v21, v22, v23, v16, v17, v18 + b.eq 2f + calc v19, v20, v21, v22, v23, v16, v17, v18, v19 + b.eq 2f + calc v20, v21, v22, v23, v16, v17, v18, v19, v20 + b.eq 2f + calc v21, v22, v23, v16, v17, v18, v19, v20, v21 + b.eq 2f + calc v22, v23, v16, v17, v18, v19, v20, v21, v22 + b.hi 1b +.endm + +.macro calc_all2 + calc v30, v31, v16, v18, v20, v22, v24, v26, v28, v30, v17, v19, v21, v23, v25, v27, v29, v31 + b.eq 2f + calc v16, v17, v18, v20, v22, v24, v26, v28, v30, v16, v19, v21, v23, v25, v27, v29, v31, v17 + b.eq 2f + calc v18, v19, v20, v22, v24, v26, v28, v30, v16, v18, v21, v23, v25, v27, v29, v31, v17, v19 + b.eq 2f + calc v20, v21, v22, v24, v26, v28, v30, v16, v18, v20, v23, v25, v27, v29, v31, v17, v19, v21 + b.eq 2f + calc v22, v23, v24, v26, v28, v30, v16, v18, v20, v22, v25, v27, v29, v31, v17, v19, v21, v23 + b.eq 2f + calc v24, v25, v26, v28, v30, v16, v18, v20, v22, v24, v27, v29, v31, v17, v19, v21, v23, v25 + b.eq 2f + calc v26, v27, v28, v30, v16, v18, v20, v22, v24, v26, v29, v31, v17, v19, v21, v23, v25, v27 + b.eq 2f + calc v28, v29, v30, v16, v18, v20, v22, v24, v26, v28, v31,