diff mbox series

[FFmpeg-devel,1/1] libswscale/aarch64: add another hscale specialization

Message ID 80e3a8b0fd7244e59cf543faa9896d7f@amazon.com
State Superseded
Headers show
Series [FFmpeg-devel,1/1] libswscale/aarch64: add another hscale specialization | expand

Checks

Context Check Description
yinshiyou/make_loongarch64 success Make finished
yinshiyou/make_fate_loongarch64 success Make fate finished
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Swinney, Jonathan July 22, 2022, 2:02 a.m. UTC
This specialization handles the case where filtersize is 4 mod 8, e.g.
12, 20, etc. Aarch64 was previously using the c function for this case.
This implementation speeds up that case significantly.

hscale_8_to_15__fs_12_dstW_512_c: 6234.1
hscale_8_to_15__fs_12_dstW_512_neon: 1505.6

Signed-off-by: Jonathan Swinney <jswinney@amazon.com>
---
 libswscale/aarch64/hscale.S  | 107 +++++++++++++++++++++++++++++++++++
 libswscale/aarch64/swscale.c |  15 ++---
 2 files changed, 115 insertions(+), 7 deletions(-)

Comments

Martin Storsjö Aug. 4, 2022, 11:30 a.m. UTC | #1
On Fri, 22 Jul 2022, Swinney, Jonathan wrote:

> This specialization handles the case where filtersize is 4 mod 8, e.g.
> 12, 20, etc. Aarch64 was previously using the c function for this case.
> This implementation speeds up that case significantly.
>
> hscale_8_to_15__fs_12_dstW_512_c: 6234.1
> hscale_8_to_15__fs_12_dstW_512_neon: 1505.6
>
> Signed-off-by: Jonathan Swinney <jswinney@amazon.com>
> ---
> libswscale/aarch64/hscale.S  | 107 +++++++++++++++++++++++++++++++++++
> libswscale/aarch64/swscale.c |  15 ++---
> 2 files changed, 115 insertions(+), 7 deletions(-)
>
> diff --git a/libswscale/aarch64/hscale.S b/libswscale/aarch64/hscale.S
> index b7b21b7a0f..93b9094ded 100644
> --- a/libswscale/aarch64/hscale.S
> +++ b/libswscale/aarch64/hscale.S
> @@ -91,6 +91,113 @@ function ff_hscale8to15_X8_neon, export=1
>         ret
> endfunc
>
> +function ff_hscale8to15_X4_neon, export=1
> +// x0  SwsContext *c (not used)
> +// x1  int16_t *dst
> +// x2  int dstW
> +// x3  const uint8_t *src
> +// x4  const int16_t *filter
> +// x5  const int32_t *filterPos
> +// x6  int filterSize

Here, x2 and x6 should be w2 and w6

> +
> +// This function for filter sizes that are 4 mod 8. In other words, anything that's 0 mod 4 but not
> +// 0 mod 8. It also assumes that dstW is 0 mod 4.
> +
> +        lsl                 w7, w6, #1                  // w7 = filterSize * 2
> +1:
> +        ldp                 w8, w9,  [x5]               // filterPos[idx + 0], [idx + 1]
> +        ldp                 w10, w11, [x5, 8]           // filterPos[idx + 2], [idx + 3]

With MS armasm64, this produces the following error:

libswscale\aarch64\hscale.o.asm(1034) : error A2079: improper line syntax; 
symbol expected
         ldp                 w10, w11, [x5, 8]

(The error is that the immediate offset should be written #8.)

> +
> +        movi                v16.2d, #0                  // initialize accumulator for idx + 0
> +        movi                v17.2d, #0                  // initialize accumulator for idx + 1
> +        movi                v18.2d, #0                  // initialize accumulator for idx + 2
> +        movi                v19.2d, #0                  // initialize accumulator for idx + 3
> +
> +        mov                 x12, x4                     // filter pointer for idx + 0
> +        add                 x13, x4, x7                 // filter pointer for idx + 1
> +        add                 x8, x3, w8, uxtw            // srcp + filterPos[idx + 0]
> +        add                 x9, x3, w9, uxtw            // srcp + filterPos[idx + 1]
> +
> +        add                 x14, x13, x7                // filter pointer for idx + 2
> +        add                 x10, x3, w10, uxtw          // srcp + filterPos[idx + 2]
> +        add                 x11, x3, w11, uxtw          // srcp + filterPos[idx + 3]
> +
> +        mov                 w0, w6                      // copy filterSize to a temp register, w0
> +        add                 x5, x5, #16                 // advance the filterPos pointer
> +        add                 x15, x14, x7                // filter pointer for idx + 3
> +        mov                 x16, xzr                    // temp register for offsetting filter pointers
> +
> +2:
> +        // This section loops over 8-wide chunks of filter size
> +        ldr                 d4, [x8], #8                // load 8 bytes from srcp for idx + 0
> +        ldr                 q0, [x12, x16]              // load 8 values, 16 bytes from filter for idx + 0
> +
> +        ldr                 d5, [x9], #8                // load 8 bytes from srcp for idx + 1
> +        ldr                 q1, [x13, x16]              // load 8 values, 16 bytes from filter for idx + 1
> +
> +        uxtl                v4.8h, v4.8b                // unsigned extend long for idx + 0
> +        uxtl                v5.8h, v5.8b                // unsigned extend long for idx + 1
> +
> +        ldr                 d6, [x10], #8               // load 8 bytes from srcp for idx + 2
> +        ldr                 q2, [x14, x16]              // load 8 values, 16 bytes from filter for idx + 2
> +
> +        smlal               v16.4s, v0.4h, v4.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 0
> +        smlal               v17.4s, v1.4h, v5.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 1
> +
> +        ldr                 d7, [x11], #8               // load 8 bytes from srcp for idx + 3
> +        ldr                 q3, [x15, x16]              // load 8 values, 16 bytes from filter for idx + 3
> +
> +        sub                 w0, w0, #8                  // decrement the remaining filterSize counter
> +        smlal2              v16.4s, v0.8h, v4.8h        // val += src[srcPos + j + 4..7] * filter[fs * i + j + 4..7], idx + 0
> +        smlal2              v17.4s, v1.8h, v5.8h        // val += src[srcPos + j + 4..7] * filter[fs * i + j + 4..7], idx + 1
> +        uxtl                v6.8h, v6.8b                // unsigned extend long for idx + 2
> +        uxtl                v7.8h, v7.8b                // unsigned extend long for idx + 3
> +        smlal               v18.4s, v2.4h, v6.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 2
> +        smlal               v19.4s, v3.4h, v7.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 3
> +
> +        cmp                 w0, #8                      // are there at least 8 more elements in filter to consume?
> +        add                 x16, x16, #16               // advance the offsetting register for filter values
> +
> +        smlal2              v18.4s, v2.8h, v6.8h        // val += src[srcPos + j + 4..7] * filter[fs * i + j + 4..7], idx + 2
> +        smlal2              v19.4s, v3.8h, v7.8h        // val += src[srcPos + j + 4..7] * filter[fs * i + j + 4..7], idx + 3
> +
> +        b.ge                2b                          // branch back to inner loop
> +
> +        // complete the remaining 4 filter elements
> +        sub                 x17, x7, #8                 // calculate the offset of the filter pointer for the remaining 4 elements
> +
> +        ldr                 s4, [x8]                    // load 4 bytes from srcp for idx + 0
> +        ldr                 d0, [x12, x17]              // load 4 values, 8 bytes from filter for idx + 0
> +        ldr                 s5, [x9]                    // load 4 bytes from srcp for idx + 1
> +        ldr                 d1, [x13, x17]              // load 4 values, 8 bytes from filter for idx + 1
> +
> +        uxtl                v4.8h, v4.8b                // unsigned extend long for idx + 0
> +        uxtl                v5.8h, v5.8b                // unsigned extend long for idx + 1
> +
> +        ldr                 s6, [x10]                   // load 4 bytes from srcp for idx + 2
> +        ldr                 d2, [x14, x17]              // load 4 values, 8 bytes from filter for idx + 2
> +        smlal               v16.4s, v0.4h, v4.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 0
> +        smlal               v17.4s, v1.4h, v5.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 1
> +        ldr                 s7, [x11]                   // load 4 bytes from srcp for idx + 3
> +        ldr                 d3, [x15, x17]              // load 4 values, 8 bytes from filter for idx + 3
> +
> +        uxtl                v6.8h, v6.8b                // unsigned extend long for idx + 2
> +        uxtl                v7.8h, v7.8b                // unsigned extend long for idx + 3
> +        addp                v16.4s, v16.4s, v17.4s      // horizontal pair adding for idx 0,1
> +        smlal               v18.4s, v2.4h, v6.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 2
> +        smlal               v19.4s, v3.4h, v7.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 3
> +
> +        addp                v18.4s, v18.4s, v19.4s      // horizontal pair adding for idx 2,3
> +        addp                v16.4s, v16.4s, v18.4s      // final horizontal pair adding producing one vector with results for idx = 0..3
> +
> +        subs                w2, w2, #4                  // dstW -= 4
> +        sqshrn              v0.4h, v16.4s, #7           // shift and clip the 2x16-bit final values
> +        st1                 {v0.4h}, [x1], #8           // write to destination idx 0..3
> +        add                 x4, x4, x7, lsl 2           // filter += (filterSize*2) * 4

This also fails to build with MS armasm64:

libswscale\aarch64\hscale.o.asm(1122) : error A2173: syntax error in 
expression
         add                 x4, x4, x7, lsl 2

Same issue here; it should be #2.

> #define ASSIGN_SCALE_FUNC(hscalefn, filtersize, opt)                    \
> -  switch (filtersize) {                                                 \
> -  case 4:  ASSIGN_SCALE_FUNC2(hscalefn, 4, opt); break;                 \
> -  default: if (filtersize % 8 == 0)                                     \
> -               ASSIGN_SCALE_FUNC2(hscalefn, X8, opt);                   \
> -           break;                                                       \
> -  }
> +    if (filtersize == 4)                                                \
> +        ASSIGN_SCALE_FUNC2(hscalefn, 4, opt);                           \
> +    else if (filtersize % 8 == 0)                                       \
> +        ASSIGN_SCALE_FUNC2(hscalefn, X8, opt);                          \
> +    else if (filtersize % 4 == 0 && filtersize % 8 != 0)                \
> +        ASSIGN_SCALE_FUNC2(hscalefn, X4, opt);
>

Would it be safest to wrap this in an do { } while (0)?

Other than that, I think the patch seems fine to me!

// Martin
diff mbox series

Patch

diff --git a/libswscale/aarch64/hscale.S b/libswscale/aarch64/hscale.S
index b7b21b7a0f..93b9094ded 100644
--- a/libswscale/aarch64/hscale.S
+++ b/libswscale/aarch64/hscale.S
@@ -91,6 +91,113 @@  function ff_hscale8to15_X8_neon, export=1
         ret
 endfunc
 
+function ff_hscale8to15_X4_neon, export=1
+// x0  SwsContext *c (not used)
+// x1  int16_t *dst
+// x2  int dstW
+// x3  const uint8_t *src
+// x4  const int16_t *filter
+// x5  const int32_t *filterPos
+// x6  int filterSize
+
+// This function for filter sizes that are 4 mod 8. In other words, anything that's 0 mod 4 but not
+// 0 mod 8. It also assumes that dstW is 0 mod 4.
+
+        lsl                 w7, w6, #1                  // w7 = filterSize * 2
+1:
+        ldp                 w8, w9,  [x5]               // filterPos[idx + 0], [idx + 1]
+        ldp                 w10, w11, [x5, 8]           // filterPos[idx + 2], [idx + 3]
+
+        movi                v16.2d, #0                  // initialize accumulator for idx + 0
+        movi                v17.2d, #0                  // initialize accumulator for idx + 1
+        movi                v18.2d, #0                  // initialize accumulator for idx + 2
+        movi                v19.2d, #0                  // initialize accumulator for idx + 3
+
+        mov                 x12, x4                     // filter pointer for idx + 0
+        add                 x13, x4, x7                 // filter pointer for idx + 1
+        add                 x8, x3, w8, uxtw            // srcp + filterPos[idx + 0]
+        add                 x9, x3, w9, uxtw            // srcp + filterPos[idx + 1]
+
+        add                 x14, x13, x7                // filter pointer for idx + 2
+        add                 x10, x3, w10, uxtw          // srcp + filterPos[idx + 2]
+        add                 x11, x3, w11, uxtw          // srcp + filterPos[idx + 3]
+
+        mov                 w0, w6                      // copy filterSize to a temp register, w0
+        add                 x5, x5, #16                 // advance the filterPos pointer
+        add                 x15, x14, x7                // filter pointer for idx + 3
+        mov                 x16, xzr                    // temp register for offsetting filter pointers
+
+2:
+        // This section loops over 8-wide chunks of filter size
+        ldr                 d4, [x8], #8                // load 8 bytes from srcp for idx + 0
+        ldr                 q0, [x12, x16]              // load 8 values, 16 bytes from filter for idx + 0
+
+        ldr                 d5, [x9], #8                // load 8 bytes from srcp for idx + 1
+        ldr                 q1, [x13, x16]              // load 8 values, 16 bytes from filter for idx + 1
+
+        uxtl                v4.8h, v4.8b                // unsigned extend long for idx + 0
+        uxtl                v5.8h, v5.8b                // unsigned extend long for idx + 1
+
+        ldr                 d6, [x10], #8               // load 8 bytes from srcp for idx + 2
+        ldr                 q2, [x14, x16]              // load 8 values, 16 bytes from filter for idx + 2
+
+        smlal               v16.4s, v0.4h, v4.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 0
+        smlal               v17.4s, v1.4h, v5.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 1
+
+        ldr                 d7, [x11], #8               // load 8 bytes from srcp for idx + 3
+        ldr                 q3, [x15, x16]              // load 8 values, 16 bytes from filter for idx + 3
+
+        sub                 w0, w0, #8                  // decrement the remaining filterSize counter
+        smlal2              v16.4s, v0.8h, v4.8h        // val += src[srcPos + j + 4..7] * filter[fs * i + j + 4..7], idx + 0
+        smlal2              v17.4s, v1.8h, v5.8h        // val += src[srcPos + j + 4..7] * filter[fs * i + j + 4..7], idx + 1
+        uxtl                v6.8h, v6.8b                // unsigned extend long for idx + 2
+        uxtl                v7.8h, v7.8b                // unsigned extend long for idx + 3
+        smlal               v18.4s, v2.4h, v6.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 2
+        smlal               v19.4s, v3.4h, v7.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 3
+
+        cmp                 w0, #8                      // are there at least 8 more elements in filter to consume?
+        add                 x16, x16, #16               // advance the offsetting register for filter values
+
+        smlal2              v18.4s, v2.8h, v6.8h        // val += src[srcPos + j + 4..7] * filter[fs * i + j + 4..7], idx + 2
+        smlal2              v19.4s, v3.8h, v7.8h        // val += src[srcPos + j + 4..7] * filter[fs * i + j + 4..7], idx + 3
+
+        b.ge                2b                          // branch back to inner loop
+
+        // complete the remaining 4 filter elements
+        sub                 x17, x7, #8                 // calculate the offset of the filter pointer for the remaining 4 elements
+
+        ldr                 s4, [x8]                    // load 4 bytes from srcp for idx + 0
+        ldr                 d0, [x12, x17]              // load 4 values, 8 bytes from filter for idx + 0
+        ldr                 s5, [x9]                    // load 4 bytes from srcp for idx + 1
+        ldr                 d1, [x13, x17]              // load 4 values, 8 bytes from filter for idx + 1
+
+        uxtl                v4.8h, v4.8b                // unsigned extend long for idx + 0
+        uxtl                v5.8h, v5.8b                // unsigned extend long for idx + 1
+
+        ldr                 s6, [x10]                   // load 4 bytes from srcp for idx + 2
+        ldr                 d2, [x14, x17]              // load 4 values, 8 bytes from filter for idx + 2
+        smlal               v16.4s, v0.4h, v4.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 0
+        smlal               v17.4s, v1.4h, v5.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 1
+        ldr                 s7, [x11]                   // load 4 bytes from srcp for idx + 3
+        ldr                 d3, [x15, x17]              // load 4 values, 8 bytes from filter for idx + 3
+
+        uxtl                v6.8h, v6.8b                // unsigned extend long for idx + 2
+        uxtl                v7.8h, v7.8b                // unsigned extend long for idx + 3
+        addp                v16.4s, v16.4s, v17.4s      // horizontal pair adding for idx 0,1
+        smlal               v18.4s, v2.4h, v6.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 2
+        smlal               v19.4s, v3.4h, v7.4h        // val += src[srcPos + j + 0..3] * filter[fs * i + j + 0..3], idx + 3
+
+        addp                v18.4s, v18.4s, v19.4s      // horizontal pair adding for idx 2,3
+        addp                v16.4s, v16.4s, v18.4s      // final horizontal pair adding producing one vector with results for idx = 0..3
+
+        subs                w2, w2, #4                  // dstW -= 4
+        sqshrn              v0.4h, v16.4s, #7           // shift and clip the 2x16-bit final values
+        st1                 {v0.4h}, [x1], #8           // write to destination idx 0..3
+        add                 x4, x4, x7, lsl 2           // filter += (filterSize*2) * 4
+        b.gt                1b                          // loop until end of line
+        ret
+endfunc
+
 function ff_hscale8to15_4_neon, export=1
 // x0  SwsContext *c (not used)
 // x1  int16_t *dst
diff --git a/libswscale/aarch64/swscale.c b/libswscale/aarch64/swscale.c
index ab28be4da6..f943d431d9 100644
--- a/libswscale/aarch64/swscale.c
+++ b/libswscale/aarch64/swscale.c
@@ -32,7 +32,8 @@  void ff_hscale ## from_bpc ## to ## to_bpc ## _ ## filter_n ## _ ## opt( \
     SCALE_FUNC(filter_n,  8, 15, opt);
 #define ALL_SCALE_FUNCS(opt) \
     SCALE_FUNCS(4, opt); \
-    SCALE_FUNCS(X8, opt)
+    SCALE_FUNCS(X8, opt); \
+    SCALE_FUNCS(X4, opt)
 
 ALL_SCALE_FUNCS(neon);
 
@@ -48,12 +49,12 @@  void ff_yuv2planeX_8_neon(const int16_t *filter, int filterSize,
 } while (0)
 
 #define ASSIGN_SCALE_FUNC(hscalefn, filtersize, opt)                    \
-  switch (filtersize) {                                                 \
-  case 4:  ASSIGN_SCALE_FUNC2(hscalefn, 4, opt); break;                 \
-  default: if (filtersize % 8 == 0)                                     \
-               ASSIGN_SCALE_FUNC2(hscalefn, X8, opt);                   \
-           break;                                                       \
-  }
+    if (filtersize == 4)                                                \
+        ASSIGN_SCALE_FUNC2(hscalefn, 4, opt);                           \
+    else if (filtersize % 8 == 0)                                       \
+        ASSIGN_SCALE_FUNC2(hscalefn, X8, opt);                          \
+    else if (filtersize % 4 == 0 && filtersize % 8 != 0)                \
+        ASSIGN_SCALE_FUNC2(hscalefn, X4, opt);
 
 av_cold void ff_sws_init_swscale_aarch64(SwsContext *c)
 {