Message ID | 20220816122016.64929-6-hum@semihalf.com |
---|---|
State | New |
Headers | show |
Series | Provide neon implementation for me_cmp functions | expand |
Context | Check | Description |
---|---|---|
yinshiyou/make_loongarch64 | success | Make finished |
yinshiyou/make_fate_loongarch64 | success | Make fate finished |
andriy/make_x86 | success | Make finished |
andriy/make_fate_x86 | success | Make fate finished |
On Tue, 16 Aug 2022, Hubert Mazur wrote: > Provide optimized implementation of pix_abs8 function for arm64. > > Performance comparison tests are shown below. > - pix_abs_1_0_c: 101.2 > - pix_abs_1_0_neon: 22.5 > - sad_1_c: 101.2 > - sad_1_neon: 22.5 > > Benchmarks and tests are run with checkasm tool on AWS Graviton 3. > --- > libavcodec/aarch64/me_cmp_init_aarch64.c | 4 ++ > libavcodec/aarch64/me_cmp_neon.S | 49 ++++++++++++++++++++++++ > 2 files changed, 53 insertions(+) > > diff --git a/libavcodec/aarch64/me_cmp_init_aarch64.c b/libavcodec/aarch64/me_cmp_init_aarch64.c > index 2f51f0497e..e7dbd4cbc5 100644 > --- a/libavcodec/aarch64/me_cmp_init_aarch64.c > +++ b/libavcodec/aarch64/me_cmp_init_aarch64.c > @@ -31,6 +31,8 @@ int ff_pix_abs16_x2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t * > ptrdiff_t stride, int h); > int ff_pix_abs16_y2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, > ptrdiff_t stride, int h); > +int ff_pix_abs8_neon(MpegEncContext *s, const uint8_t *blk1, const uint8_t *blk2, > + ptrdiff_t stride, int h); Alignment > diff --git a/libavcodec/aarch64/me_cmp_neon.S b/libavcodec/aarch64/me_cmp_neon.S > index 3f4266d4d5..8c396cad21 100644 > --- a/libavcodec/aarch64/me_cmp_neon.S > +++ b/libavcodec/aarch64/me_cmp_neon.S > @@ -72,6 +72,55 @@ function ff_pix_abs16_neon, export=1 > ret > endfunc > > +function ff_pix_abs8_neon, export=1 > + // x0 unused > + // x1 uint8_t *pix1 > + // x2 uint8_t *pix2 > + // x3 ptrdiff_t stride > + // x4 int h w4, not x4 > + > + movi d18, #0 Unused d18 > + movi v30.8h, #0 > + cmp w4, #4 > + b.lt 2f > + > +// make 4 iterations at once > +1: > + ld1 {v0.8b}, [x1], x3 // Load pix1 for first iteration > + ld1 {v1.8b}, [x2], x3 // Load pix2 for first iteration > + ld1 {v2.8b}, [x1], x3 // Load pix1 for second iteration > + uabal v30.8h, v0.8b, v1.8b // Absolute difference, first iteration > + ld1 {v3.8b}, [x2], x3 // Load pix2 for second iteration > + ld1 {v4.8b}, [x1], x3 // Load pix1 for third iteration > + uabal v30.8h, v2.8b, v3.8b // Absolute difference, second iteration > + ld1 {v5.8b}, [x2], x3 // Load pix2 for third iteration > + sub w4, w4, #4 // h -= 4 > + uabal v30.8h, v4.8b, v5.8b // Absolute difference, third iteration > + ld1 {v6.8b}, [x1], x3 // Load pix1 for foruth iteration > + ld1 {v7.8b}, [x2], x3 // Load pix2 for fourth iteration > + cmp w4, #4 > + uabal v30.8h, v6.8b, v7.8b // Absolute difference, foruth iteration The interleaving here looks mostly quite good, but the last uabal comes almost directly after the two loads; I moved the second-last uabal from before the two ld1s to between ld1 and cmp, and got a rather notable speedup. Before: Cortex A53 A72 A73 pix_abs_1_0_neon: 65.7 33.7 21.5 After: pix_abs_1_0_neon: 57.7 33.5 21.5 So this is a 13% speedup on Cortex A53, just by moving one single instruction. This is why paying attention to scheduling matters, sometimes a lot. > + uaddlv s20, v30.8h // Add up vector > + add d18, d18, d20 > + fmov w0, s18 And finally, by removing the unnecessary add of d18 here, I got this further reduced to the following runtimes: Cortex A53 A72 A73 pix_abs_1_0_neon: 54.7 30.7 20.2 // Martin
diff --git a/libavcodec/aarch64/me_cmp_init_aarch64.c b/libavcodec/aarch64/me_cmp_init_aarch64.c index 2f51f0497e..e7dbd4cbc5 100644 --- a/libavcodec/aarch64/me_cmp_init_aarch64.c +++ b/libavcodec/aarch64/me_cmp_init_aarch64.c @@ -31,6 +31,8 @@ int ff_pix_abs16_x2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t * ptrdiff_t stride, int h); int ff_pix_abs16_y2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h); +int ff_pix_abs8_neon(MpegEncContext *s, const uint8_t *blk1, const uint8_t *blk2, + ptrdiff_t stride, int h); int sse16_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h); @@ -49,8 +51,10 @@ av_cold void ff_me_cmp_init_aarch64(MECmpContext *c, AVCodecContext *avctx) c->pix_abs[0][1] = ff_pix_abs16_x2_neon; c->pix_abs[0][2] = ff_pix_abs16_y2_neon; c->pix_abs[0][3] = ff_pix_abs16_xy2_neon; + c->pix_abs[1][0] = ff_pix_abs8_neon; c->sad[0] = ff_pix_abs16_neon; + c->sad[1] = ff_pix_abs8_neon; c->sse[0] = sse16_neon; c->sse[1] = sse8_neon; c->sse[2] = sse4_neon; diff --git a/libavcodec/aarch64/me_cmp_neon.S b/libavcodec/aarch64/me_cmp_neon.S index 3f4266d4d5..8c396cad21 100644 --- a/libavcodec/aarch64/me_cmp_neon.S +++ b/libavcodec/aarch64/me_cmp_neon.S @@ -72,6 +72,55 @@ function ff_pix_abs16_neon, export=1 ret endfunc +function ff_pix_abs8_neon, export=1 + // x0 unused + // x1 uint8_t *pix1 + // x2 uint8_t *pix2 + // x3 ptrdiff_t stride + // x4 int h + + movi d18, #0 + movi v30.8h, #0 + cmp w4, #4 + b.lt 2f + +// make 4 iterations at once +1: + ld1 {v0.8b}, [x1], x3 // Load pix1 for first iteration + ld1 {v1.8b}, [x2], x3 // Load pix2 for first iteration + ld1 {v2.8b}, [x1], x3 // Load pix1 for second iteration + uabal v30.8h, v0.8b, v1.8b // Absolute difference, first iteration + ld1 {v3.8b}, [x2], x3 // Load pix2 for second iteration + ld1 {v4.8b}, [x1], x3 // Load pix1 for third iteration + uabal v30.8h, v2.8b, v3.8b // Absolute difference, second iteration + ld1 {v5.8b}, [x2], x3 // Load pix2 for third iteration + sub w4, w4, #4 // h -= 4 + uabal v30.8h, v4.8b, v5.8b // Absolute difference, third iteration + ld1 {v6.8b}, [x1], x3 // Load pix1 for foruth iteration + ld1 {v7.8b}, [x2], x3 // Load pix2 for fourth iteration + cmp w4, #4 + uabal v30.8h, v6.8b, v7.8b // Absolute difference, foruth iteration + b.ge 1b + + cbz w4, 3f + +// iterate by one +2: + ld1 {v0.8b}, [x1], x3 // Load pix1 + ld1 {v1.8b}, [x2], x3 // Load pix2 + + subs w4, w4, #1 + uabal v30.8h, v0.8b, v1.8b + b.ne 2b + +3: + uaddlv s20, v30.8h // Add up vector + add d18, d18, d20 + fmov w0, s18 + + ret +endfunc + function ff_pix_abs16_xy2_neon, export=1 // x0 unused // x1 uint8_t *pix1