diff mbox series

[FFmpeg-devel,21/21] aarch64: hevc: Produce plain neon versions of qpel_bi_hv

Message ID 20240325150243.59058-22-martin@martin.st
State Accepted
Commit f872b1971401817356708b8863dff4ee6bd02600
Headers show
Series aarch64: hevc: Add missing hevc_pel NEON functions | expand

Checks

Context Check Description
yinshiyou/make_loongarch64 success Make finished
yinshiyou/make_fate_loongarch64 success Make fate finished
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Martin Storsjö March 25, 2024, 3:02 p.m. UTC
As the plain neon qpel_h functions process two rows at a time,
we need to allocate storage for h+8 rows instead of h+7.

By allocating storage for h+8 rows, incrementing the stack
pointer won't end up at the right spot in the end. Store the
intended final stack pointer value in a register x14 which we
store on the stack.

AWS Graviton 3:
put_hevc_qpel_bi_hv4_8_c: 385.7
put_hevc_qpel_bi_hv4_8_neon: 131.0
put_hevc_qpel_bi_hv4_8_i8mm: 92.2
put_hevc_qpel_bi_hv6_8_c: 701.0
put_hevc_qpel_bi_hv6_8_neon: 239.5
put_hevc_qpel_bi_hv6_8_i8mm: 191.0
put_hevc_qpel_bi_hv8_8_c: 1162.0
put_hevc_qpel_bi_hv8_8_neon: 228.0
put_hevc_qpel_bi_hv8_8_i8mm: 225.2
put_hevc_qpel_bi_hv12_8_c: 2305.0
put_hevc_qpel_bi_hv12_8_neon: 558.0
put_hevc_qpel_bi_hv12_8_i8mm: 483.2
put_hevc_qpel_bi_hv16_8_c: 3965.2
put_hevc_qpel_bi_hv16_8_neon: 732.7
put_hevc_qpel_bi_hv16_8_i8mm: 656.5
put_hevc_qpel_bi_hv24_8_c: 8709.7
put_hevc_qpel_bi_hv24_8_neon: 1555.2
put_hevc_qpel_bi_hv24_8_i8mm: 1448.7
put_hevc_qpel_bi_hv32_8_c: 14818.0
put_hevc_qpel_bi_hv32_8_neon: 2763.7
put_hevc_qpel_bi_hv32_8_i8mm: 2468.0
put_hevc_qpel_bi_hv48_8_c: 32855.5
put_hevc_qpel_bi_hv48_8_neon: 6107.2
put_hevc_qpel_bi_hv48_8_i8mm: 5452.7
put_hevc_qpel_bi_hv64_8_c: 57591.5
put_hevc_qpel_bi_hv64_8_neon: 10660.2
put_hevc_qpel_bi_hv64_8_i8mm: 9580.0
---
 libavcodec/aarch64/hevcdsp_init_aarch64.c |   5 +
 libavcodec/aarch64/hevcdsp_qpel_neon.S    | 164 +++++++++++++---------
 2 files changed, 103 insertions(+), 66 deletions(-)
diff mbox series

Patch

diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c b/libavcodec/aarch64/hevcdsp_init_aarch64.c
index e9ee901322..e24dd0cbda 100644
--- a/libavcodec/aarch64/hevcdsp_init_aarch64.c
+++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c
@@ -319,6 +319,10 @@  NEON8_FNPROTO(qpel_bi_v, (uint8_t *dst, ptrdiff_t dststride,
         const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
         int height, intptr_t mx, intptr_t my, int width),);
 
+NEON8_FNPROTO(qpel_bi_hv, (uint8_t *dst, ptrdiff_t dststride,
+        const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
+        int height, intptr_t mx, intptr_t my, int width),);
+
 NEON8_FNPROTO(qpel_bi_hv, (uint8_t *dst, ptrdiff_t dststride,
         const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
         int height, intptr_t mx, intptr_t my, int width), _i8mm);
@@ -452,6 +456,7 @@  av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth)
         NEON8_FNASSIGN(c->put_hevc_qpel, 1, 1, qpel_hv,);
         NEON8_FNASSIGN(c->put_hevc_qpel_uni, 1, 1, qpel_uni_hv,);
         NEON8_FNASSIGN_PARTIAL_5(c->put_hevc_qpel_uni_w, 1, 1, qpel_uni_w_hv,);
+        NEON8_FNASSIGN(c->put_hevc_qpel_bi, 1, 1, qpel_bi_hv,);
 
         if (have_i8mm(cpu_flags)) {
             NEON8_FNASSIGN(c->put_hevc_epel, 0, 1, epel_h, _i8mm);
diff --git a/libavcodec/aarch64/hevcdsp_qpel_neon.S b/libavcodec/aarch64/hevcdsp_qpel_neon.S
index df7032b692..8ddaa32b70 100644
--- a/libavcodec/aarch64/hevcdsp_qpel_neon.S
+++ b/libavcodec/aarch64/hevcdsp_qpel_neon.S
@@ -4590,14 +4590,6 @@  endfunc
 
 qpel_uni_w_hv neon
 
-#if HAVE_I8MM
-ENABLE_I8MM
-
-qpel_uni_w_hv neon_i8mm
-
-DISABLE_I8MM
-#endif
-
 function hevc_put_hevc_qpel_bi_hv4_8_end_neon
         mov             x9, #(MAX_PB_SIZE * 2)
         load_qpel_filterh x7, x6
@@ -4620,7 +4612,8 @@  function hevc_put_hevc_qpel_bi_hv4_8_end_neon
 .endm
 1:      calc_all
 .purgem calc
-2:      ret
+2:      mov             sp, x14
+        ret
 endfunc
 
 function hevc_put_hevc_qpel_bi_hv6_8_end_neon
@@ -4650,7 +4643,8 @@  function hevc_put_hevc_qpel_bi_hv6_8_end_neon
 .endm
 1:      calc_all
 .purgem calc
-2:      ret
+2:      mov             sp, x14
+        ret
 endfunc
 
 function hevc_put_hevc_qpel_bi_hv8_8_end_neon
@@ -4678,7 +4672,8 @@  function hevc_put_hevc_qpel_bi_hv8_8_end_neon
 .endm
 1:      calc_all
 .purgem calc
-2:      ret
+2:      mov             sp, x14
+        ret
 endfunc
 
 function hevc_put_hevc_qpel_bi_hv16_8_end_neon
@@ -4723,83 +4718,87 @@  function hevc_put_hevc_qpel_bi_hv16_8_end_neon
         subs            x10, x10, #16
         add             x4, x4, #32
         b.ne            0b
-        add             w10, w5, #7
-        lsl             x10, x10, #7
-        sub             x10, x10, x6, lsl #1 // part of first line
-        add             sp, sp, x10         // tmp_array without first line
+        mov             sp, x14
         ret
 endfunc
 
-#if HAVE_I8MM
-ENABLE_I8MM
-
-function ff_hevc_put_hevc_qpel_bi_hv4_8_neon_i8mm, export=1
-        add             w10, w5, #7
+.macro qpel_bi_hv suffix
+function ff_hevc_put_hevc_qpel_bi_hv4_8_\suffix, export=1
+        add             w10, w5, #8
         lsl             x10, x10, #7
+        mov             x14, sp
         sub             sp, sp, x10 // tmp_array
-        stp             x7, x30, [sp, #-48]!
+        stp             x7, x30, [sp, #-64]!
         stp             x4, x5, [sp, #16]
         stp             x0, x1, [sp, #32]
+        str             x14,    [sp, #48]
         sub             x1, x2, x3, lsl #1
         sub             x1, x1, x3
-        add             x0, sp, #48
+        add             x0, sp, #64
         mov             x2, x3
         add             w3, w5, #7
         mov             x4, x6
-        bl              X(ff_hevc_put_hevc_qpel_h4_8_neon_i8mm)
+        bl              X(ff_hevc_put_hevc_qpel_h4_8_\suffix)
         ldp             x4, x5, [sp, #16]
         ldp             x0, x1, [sp, #32]
-        ldp             x7, x30, [sp], #48
+        ldr             x14,    [sp, #48]
+        ldp             x7, x30, [sp], #64
         b               hevc_put_hevc_qpel_bi_hv4_8_end_neon
 endfunc
 
-function ff_hevc_put_hevc_qpel_bi_hv6_8_neon_i8mm, export=1
-        add             w10, w5, #7
+function ff_hevc_put_hevc_qpel_bi_hv6_8_\suffix, export=1
+        add             w10, w5, #8
         lsl             x10, x10, #7
+        mov             x14, sp
         sub             sp, sp, x10         // tmp_array
-        stp             x7, x30, [sp, #-48]!
+        stp             x7, x30, [sp, #-64]!
         stp             x4, x5, [sp, #16]
         stp             x0, x1, [sp, #32]
+        str             x14,    [sp, #48]
         sub             x1, x2, x3, lsl #1
         sub             x1, x1, x3
-        add             x0, sp, #48
+        add             x0, sp, #64
         mov             x2, x3
         add             x3, x5, #7
         mov             x4, x6
-        bl              X(ff_hevc_put_hevc_qpel_h6_8_neon_i8mm)
+        bl              X(ff_hevc_put_hevc_qpel_h6_8_\suffix)
         ldp             x4, x5, [sp, #16]
         ldp             x0, x1, [sp, #32]
-        ldp             x7, x30, [sp], #48
+        ldr             x14,    [sp, #48]
+        ldp             x7, x30, [sp], #64
         b               hevc_put_hevc_qpel_bi_hv6_8_end_neon
 endfunc
 
-function ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm, export=1
-        add             w10, w5, #7
+function ff_hevc_put_hevc_qpel_bi_hv8_8_\suffix, export=1
+        add             w10, w5, #8
         lsl             x10, x10, #7
+        mov             x14, sp
         sub             sp, sp, x10         // tmp_array
-        stp             x7, x30, [sp, #-48]!
+        stp             x7, x30, [sp, #-64]!
         stp             x4, x5, [sp, #16]
         stp             x0, x1, [sp, #32]
+        str             x14,    [sp, #48]
         sub             x1, x2, x3, lsl #1
         sub             x1, x1, x3
-        add             x0, sp, #48
+        add             x0, sp, #64
         mov             x2, x3
         add             x3, x5, #7
         mov             x4, x6
-        bl              X(ff_hevc_put_hevc_qpel_h8_8_neon_i8mm)
+        bl              X(ff_hevc_put_hevc_qpel_h8_8_\suffix)
         ldp             x4, x5, [sp, #16]
         ldp             x0, x1, [sp, #32]
-        ldp             x7, x30, [sp], #48
+        ldr             x14,    [sp, #48]
+        ldp             x7, x30, [sp], #64
         b               hevc_put_hevc_qpel_bi_hv8_8_end_neon
 endfunc
 
-function ff_hevc_put_hevc_qpel_bi_hv12_8_neon_i8mm, export=1
+function ff_hevc_put_hevc_qpel_bi_hv12_8_\suffix, export=1
         stp             x6, x7, [sp, #-80]!
         stp             x4, x5, [sp, #16]
         stp             x2, x3, [sp, #32]
         stp             x0, x1, [sp, #48]
         str             x30, [sp, #64]
-        bl              X(ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm)
+        bl              X(ff_hevc_put_hevc_qpel_bi_hv8_8_\suffix)
         ldp             x4, x5, [sp, #16]
         ldp             x2, x3, [sp, #32]
         ldp             x0, x1, [sp, #48]
@@ -4807,39 +4806,42 @@  function ff_hevc_put_hevc_qpel_bi_hv12_8_neon_i8mm, export=1
         add             x4, x4, #16
         add             x2, x2, #8
         add             x0, x0, #8
-        bl              X(ff_hevc_put_hevc_qpel_bi_hv4_8_neon_i8mm)
+        bl              X(ff_hevc_put_hevc_qpel_bi_hv4_8_\suffix)
         ldr             x30, [sp], #16
         ret
 endfunc
 
-function ff_hevc_put_hevc_qpel_bi_hv16_8_neon_i8mm, export=1
-        add             w10, w5, #7
+function ff_hevc_put_hevc_qpel_bi_hv16_8_\suffix, export=1
+        add             w10, w5, #8
         lsl             x10, x10, #7
+        mov             x14, sp
         sub             sp, sp, x10         // tmp_array
-        stp             x7, x30, [sp, #-48]!
+        stp             x7, x30, [sp, #-64]!
         stp             x4, x5, [sp, #16]
         stp             x0, x1, [sp, #32]
-        add             x0, sp, #48
+        str             x14,    [sp, #48]
+        add             x0, sp, #64
         sub             x1, x2, x3, lsl #1
         sub             x1, x1, x3
         mov             x2, x3
         add             w3, w5, #7
         mov             x4, x6
-        bl              X(ff_hevc_put_hevc_qpel_h16_8_neon_i8mm)
+        bl              X(ff_hevc_put_hevc_qpel_h16_8_\suffix)
         ldp             x4, x5, [sp, #16]
         ldp             x0, x1, [sp, #32]
-        ldp             x7, x30, [sp], #48
+        ldr             x14,    [sp, #48]
+        ldp             x7, x30, [sp], #64
         mov             x6, #16          // width
         b               hevc_put_hevc_qpel_bi_hv16_8_end_neon
 endfunc
 
-function ff_hevc_put_hevc_qpel_bi_hv24_8_neon_i8mm, export=1
+function ff_hevc_put_hevc_qpel_bi_hv24_8_\suffix, export=1
         stp             x6, x7, [sp, #-80]!
         stp             x4, x5, [sp, #16]
         stp             x2, x3, [sp, #32]
         stp             x0, x1, [sp, #48]
         str             x30, [sp, #64]
-        bl              X(ff_hevc_put_hevc_qpel_bi_hv16_8_neon_i8mm)
+        bl              X(ff_hevc_put_hevc_qpel_bi_hv16_8_\suffix)
         ldp             x4, x5, [sp, #16]
         ldp             x2, x3, [sp, #32]
         ldp             x0, x1, [sp, #48]
@@ -4847,73 +4849,103 @@  function ff_hevc_put_hevc_qpel_bi_hv24_8_neon_i8mm, export=1
         add             x4, x4, #32
         add             x2, x2, #16
         add             x0, x0, #16
-        bl              X(ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm)
+        bl              X(ff_hevc_put_hevc_qpel_bi_hv8_8_\suffix)
         ldr             x30, [sp], #16
         ret
 endfunc
 
-function ff_hevc_put_hevc_qpel_bi_hv32_8_neon_i8mm, export=1
-        add             w10, w5, #7
+function ff_hevc_put_hevc_qpel_bi_hv32_8_\suffix, export=1
+        add             w10, w5, #8
         lsl             x10, x10, #7
+        mov             x14, sp
         sub             sp, sp, x10         // tmp_array
-        stp             x7, x30, [sp, #-48]!
+        stp             x7, x30, [sp, #-64]!
         stp             x4, x5, [sp, #16]
         stp             x0, x1, [sp, #32]
-        add             x0, sp, #48
+        str             x14,    [sp, #48]
+        add             x0, sp, #64
         sub             x1, x2, x3, lsl #1
         mov             x2, x3
         sub             x1, x1, x3
         add             w3, w5, #7
         mov             x4, x6
-        bl              X(ff_hevc_put_hevc_qpel_h32_8_neon_i8mm)
+        mov             w6, #32
+        bl              X(ff_hevc_put_hevc_qpel_h32_8_\suffix)
         ldp             x4, x5, [sp, #16]
         ldp             x0, x1, [sp, #32]
-        ldp             x7, x30, [sp], #48
+        ldr             x14,    [sp, #48]
+        ldp             x7, x30, [sp], #64
         mov             x6, #32 // width
         b               hevc_put_hevc_qpel_bi_hv16_8_end_neon
 endfunc
 
-function ff_hevc_put_hevc_qpel_bi_hv48_8_neon_i8mm, export=1
-        add             w10, w5, #7
+function ff_hevc_put_hevc_qpel_bi_hv48_8_\suffix, export=1
+        add             w10, w5, #8
         lsl             x10, x10, #7
+        mov             x14, sp
         sub             sp, sp, x10 // tmp_array
-        stp             x7, x30, [sp, #-48]!
+        stp             x7, x30, [sp, #-64]!
         stp             x4, x5, [sp, #16]
         stp             x0, x1, [sp, #32]
-        add             x0, sp, #48
+        str             x14,    [sp, #48]
+        add             x0, sp, #64
         sub             x1, x2, x3, lsl #1
         mov             x2, x3
         sub             x1, x1, x3
         add             w3, w5, #7
         mov             x4, x6
-        bl              X(ff_hevc_put_hevc_qpel_h48_8_neon_i8mm)
+.ifc \suffix, neon
+        mov             w6, #48
+        bl              X(ff_hevc_put_hevc_qpel_h32_8_\suffix)
+.else
+        bl              X(ff_hevc_put_hevc_qpel_h48_8_\suffix)
+.endif
         ldp             x4, x5, [sp, #16]
         ldp             x0, x1, [sp, #32]
-        ldp             x7, x30, [sp], #48
+        ldr             x14,    [sp, #48]
+        ldp             x7, x30, [sp], #64
         mov             x6, #48 // width
         b               hevc_put_hevc_qpel_bi_hv16_8_end_neon
 endfunc
 
-function ff_hevc_put_hevc_qpel_bi_hv64_8_neon_i8mm, export=1
-        add             w10, w5, #7
+function ff_hevc_put_hevc_qpel_bi_hv64_8_\suffix, export=1
+        add             w10, w5, #8
         lsl             x10, x10, #7
+        mov             x14, sp
         sub             sp, sp, x10 // tmp_array
-        stp             x7, x30, [sp, #-48]!
+        stp             x7, x30, [sp, #-64]!
         stp             x4, x5, [sp, #16]
         stp             x0, x1, [sp, #32]
-        add             x0, sp, #48
+        str             x14,    [sp, #48]
+        add             x0, sp, #64
         sub             x1, x2, x3, lsl #1
         mov             x2, x3
         sub             x1, x1, x3
         add             w3, w5, #7
         mov             x4, x6
-        bl              X(ff_hevc_put_hevc_qpel_h64_8_neon_i8mm)
+.ifc \suffix, neon
+        mov             w6, #64
+        bl              X(ff_hevc_put_hevc_qpel_h32_8_\suffix)
+.else
+        bl              X(ff_hevc_put_hevc_qpel_h64_8_\suffix)
+.endif
         ldp             x4, x5, [sp, #16]
         ldp             x0, x1, [sp, #32]
-        ldp             x7, x30, [sp], #48
+        ldr             x14,    [sp, #48]
+        ldp             x7, x30, [sp], #64
         mov             x6, #64          // width
         b               hevc_put_hevc_qpel_bi_hv16_8_end_neon
 endfunc
+.endm
+
+qpel_bi_hv neon
+
+#if HAVE_I8MM
+ENABLE_I8MM
+
+qpel_uni_w_hv neon_i8mm
+
+qpel_bi_hv neon_i8mm
 
 DISABLE_I8MM
 #endif // HAVE_I8MM