diff mbox series

[FFmpeg-devel,3/5] lavc/aarch64: Add neon implementation for pix_abs16_y2

Message ID 20220816122016.64929-4-hum@semihalf.com
State New
Headers show
Series Provide neon implementation for me_cmp functions | expand

Checks

Context Check Description
yinshiyou/make_loongarch64 success Make finished
yinshiyou/make_fate_loongarch64 success Make fate finished
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Hubert Mazur Aug. 16, 2022, 12:20 p.m. UTC
Provide optimized implementation of pix_abs16_y2 function for arm64.

Performance comparison tests are shown below.
pix_abs_0_2_c: 317.2
pix_abs_0_2_neon: 37.5

Benchmarks and tests run with checkasm tool on AWS Graviton 3.

Signed-off-by: Hubert Mazur <hum@semihalf.com>
---
 libavcodec/aarch64/me_cmp_init_aarch64.c |  3 +
 libavcodec/aarch64/me_cmp_neon.S         | 75 ++++++++++++++++++++++++
 2 files changed, 78 insertions(+)

Comments

Martin Storsjö Aug. 18, 2022, 9:16 a.m. UTC | #1
On Tue, 16 Aug 2022, Hubert Mazur wrote:

> Provide optimized implementation of pix_abs16_y2 function for arm64.
>
> Performance comparison tests are shown below.
> pix_abs_0_2_c: 317.2
> pix_abs_0_2_neon: 37.5
>
> Benchmarks and tests run with checkasm tool on AWS Graviton 3.
>
> Signed-off-by: Hubert Mazur <hum@semihalf.com>
> ---
> libavcodec/aarch64/me_cmp_init_aarch64.c |  3 +
> libavcodec/aarch64/me_cmp_neon.S         | 75 ++++++++++++++++++++++++
> 2 files changed, 78 insertions(+)
>
> diff --git a/libavcodec/aarch64/me_cmp_init_aarch64.c b/libavcodec/aarch64/me_cmp_init_aarch64.c
> index 955592625a..1c36d3d7cb 100644
> --- a/libavcodec/aarch64/me_cmp_init_aarch64.c
> +++ b/libavcodec/aarch64/me_cmp_init_aarch64.c
> @@ -29,6 +29,8 @@ int ff_pix_abs16_xy2_neon(MpegEncContext *s, const uint8_t *blk1, const uint8_t
>                       ptrdiff_t stride, int h);
> int ff_pix_abs16_x2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
>                       ptrdiff_t stride, int h);
> +int ff_pix_abs16_y2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
> +                      ptrdiff_t stride, int h);

Misaligned function declaration.

> diff --git a/libavcodec/aarch64/me_cmp_neon.S b/libavcodec/aarch64/me_cmp_neon.S
> index 367924b3c2..0ec9c0465b 100644
> --- a/libavcodec/aarch64/me_cmp_neon.S
> +++ b/libavcodec/aarch64/me_cmp_neon.S
> @@ -404,3 +404,78 @@ function sse4_neon, export=1
>
>         ret
> endfunc
> +
> +function ff_pix_abs16_y2_neon, export=1

Why place this new function at the bottom of the file, instead of 
logically following the other preexisting pix_abs16 function? In the 
version I pushed, I moved it further up

> +        // x0           unused
> +        // x1           uint8_t *pix1
> +        // x2           uint8_t *pix2
> +        // x3           ptrdiff_t stride
> +        // x4           int h

This should be w4. You had fixed this in a couple patches, but missed this 
one.

> +
> +        // initialize buffers
> +        movi            v29.8h, #0                      // clear the accumulator
> +        movi            v28.8h, #0                      // clear the accumulator
> +        movi            d18, #0

Unused d18 here too


> +        add             x5, x2, x3                      // pix2 + stride
> +        cmp             w4, #4
> +        b.lt            2f
> +
> +// make 4 iterations at once
> +1:
> +
> +        // abs(pix1[0], avg2(pix2[0], pix2[0 + stride]))
> +        // avg2(a, b) = (((a) + (b) + 1) >> 1)
> +        // abs(x) = (x < 0 ? (-x) : (x))
> +
> +        ld1             {v1.16b}, [x2], x3              // Load pix2 for first iteration
> +        ld1             {v2.16b}, [x5], x3              // Load pix3 for first iteration
> +        urhadd          v30.16b, v1.16b, v2.16b         // Rounding halving add, first iteration
> +        ld1             {v0.16b}, [x1], x3              // Load pix1 for first iteration
> +        uabal           v29.8h, v0.8b, v30.8b           // Absolute difference of lower half, first iteration

This whole first sequence is almost entirely blocking, waiting for the 
result of the previous operation - did you miss to interleave this with 
the rest of the operations?

Normally I wouldn't bother with minor interleaving details, but here the 
impact was rather big. I manually reinterleaved the whole function, and 
got this speedup:

Before:       Cortex A53    A72     A73
pix_abs_0_2_neon:  153.0   63.7    52.7
After:
pix_abs_0_2_neon:  141.0   61.7    51.7

// Martin
diff mbox series

Patch

diff --git a/libavcodec/aarch64/me_cmp_init_aarch64.c b/libavcodec/aarch64/me_cmp_init_aarch64.c
index 955592625a..1c36d3d7cb 100644
--- a/libavcodec/aarch64/me_cmp_init_aarch64.c
+++ b/libavcodec/aarch64/me_cmp_init_aarch64.c
@@ -29,6 +29,8 @@  int ff_pix_abs16_xy2_neon(MpegEncContext *s, const uint8_t *blk1, const uint8_t
                       ptrdiff_t stride, int h);
 int ff_pix_abs16_x2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
                       ptrdiff_t stride, int h);
+int ff_pix_abs16_y2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
+                      ptrdiff_t stride, int h);
 
 int sse16_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
                       ptrdiff_t stride, int h);
@@ -42,6 +44,7 @@  av_cold void ff_me_cmp_init_aarch64(MECmpContext *c, AVCodecContext *avctx)
     if (have_neon(cpu_flags)) {
         c->pix_abs[0][0] = ff_pix_abs16_neon;
         c->pix_abs[0][1] = ff_pix_abs16_x2_neon;
+        c->pix_abs[0][2] = ff_pix_abs16_y2_neon;
         c->pix_abs[0][3] = ff_pix_abs16_xy2_neon;
 
         c->sad[0] = ff_pix_abs16_neon;
diff --git a/libavcodec/aarch64/me_cmp_neon.S b/libavcodec/aarch64/me_cmp_neon.S
index 367924b3c2..0ec9c0465b 100644
--- a/libavcodec/aarch64/me_cmp_neon.S
+++ b/libavcodec/aarch64/me_cmp_neon.S
@@ -404,3 +404,78 @@  function sse4_neon, export=1
 
         ret
 endfunc
+
+function ff_pix_abs16_y2_neon, export=1
+        // x0           unused
+        // x1           uint8_t *pix1
+        // x2           uint8_t *pix2
+        // x3           ptrdiff_t stride
+        // x4           int h
+
+        // initialize buffers
+        movi            v29.8h, #0                      // clear the accumulator
+        movi            v28.8h, #0                      // clear the accumulator
+        movi            d18, #0
+        add             x5, x2, x3                      // pix2 + stride
+        cmp             w4, #4
+        b.lt            2f
+
+// make 4 iterations at once
+1:
+
+        // abs(pix1[0], avg2(pix2[0], pix2[0 + stride]))
+        // avg2(a, b) = (((a) + (b) + 1) >> 1)
+        // abs(x) = (x < 0 ? (-x) : (x))
+
+        ld1             {v1.16b}, [x2], x3              // Load pix2 for first iteration
+        ld1             {v2.16b}, [x5], x3              // Load pix3 for first iteration
+        urhadd          v30.16b, v1.16b, v2.16b         // Rounding halving add, first iteration
+        ld1             {v0.16b}, [x1], x3              // Load pix1 for first iteration
+        uabal           v29.8h, v0.8b, v30.8b           // Absolute difference of lower half, first iteration
+        ld1             {v4.16b}, [x2], x3              // Load pix2 for second iteration
+        uabal2          v28.8h, v0.16b, v30.16b         // Absolute difference of upper half, first iteration
+        ld1             {v5.16b}, [x5], x3              // Load pix3 for second iteartion
+        urhadd          v27.16b, v4.16b, v5.16b         // Rounding halving add, second iteration
+        ld1             {v3.16b}, [x1], x3              // Load pix1 for second iteration
+        uabal           v29.8h, v3.8b, v27.8b           // Absolute difference of lower half for second iteration
+        ld1             {v7.16b}, [x2], x3              // Load pix2 for third iteration
+        ld1             {v20.16b}, [x5], x3             // Load pix3 for third iteration
+        uabal2          v28.8h, v3.16b, v27.16b         // Absolute difference of upper half for second iteration
+        ld1             {v6.16b}, [x1], x3              // Load pix1 for third iteration
+        urhadd          v26.16b, v7.16b, v20.16b        // Rounding halving add, third iteration
+        uabal           v29.8h, v6.8b, v26.8b           // Absolute difference of lower half for third iteration
+        ld1             {v22.16b}, [x2], x3             // Load pix2 for fourth iteration
+        uabal2          v28.8h, v6.16b, v26.16b         // Absolute difference of upper half for third iteration
+        ld1             {v23.16b}, [x5], x3             // Load pix3 for fourth iteration
+        sub             w4, w4, #4                      // h-= 4
+        urhadd          v25.16b, v22.16b, v23.16b       // Rounding halving add
+        ld1             {v21.16b}, [x1], x3             // Load pix1 for fourth iteration
+        cmp             w4, #4
+        uabal           v29.8h, v21.8b, v25.8b          // Absolute difference of lower half for fourth iteration
+        uabal2          v28.8h, v21.16b, v25.16b        // Absolute difference of upper half for fourth iteration
+
+        b.ge            1b
+        cbz             w4, 3f
+
+// iterate by one
+2:
+
+        ld1             {v1.16b}, [x2], x3              // Load pix2
+        ld1             {v2.16b}, [x5], x3              // Load pix3
+        subs            w4, w4, #1
+        urhadd          v30.16b, v1.16b, v2.16b         // Rounding halving add
+        ld1             {v0.16b}, [x1], x3              // Load pix1
+        uabal           v29.8h, v30.8b, v0.8b
+        uabal2          v28.8h, v30.16b, v0.16b
+
+        b.ne            2b
+
+3:
+        add             v29.8h, v29.8h, v28.8h          // Add vectors together
+        uaddlv          s16, v29.8h                     // Add up vector values
+        add             d18, d18, d16
+
+        fmov            w0, s18
+
+        ret
+endfunc