Message ID | 20221024200351.15126-1-jamrial@gmail.com |
---|---|
State | New |
Headers | show |
Series | [FFmpeg-devel] x86/intreadwrite: use intrinsics instead of inline asm for AV_ZERO128 | expand |
Context | Check | Description |
---|---|---|
yinshiyou/make_loongarch64 | success | Make finished |
yinshiyou/make_fate_loongarch64 | success | Make fate finished |
On 10/24/2022 5:03 PM, James Almer wrote: > When called inside a loop, the inline asm version results in one pxor > unnecessarely emitted per iteration, as the contents of the __asm__() block are > opaque to the compiler's instruction scheduler. > This is not the case with intrinsics, where pxor will be emitted once with any > half decent compiler. > > The code can be adapted to also work with MSVC, but for now, it will work with > the same compilers previously supported (GCC, Clang, etc). > > Signed-off-by: James Almer <jamrial@gmail.com> > --- > configure | 3 +++ > libavutil/x86/intreadwrite.h | 15 +++++++-------- > 2 files changed, 10 insertions(+), 8 deletions(-) > > diff --git a/configure b/configure > index c5a466657f..5bb83f5b5a 100755 > --- a/configure > +++ b/configure > @@ -2222,6 +2222,7 @@ HEADERS_LIST=" > > INTRINSICS_LIST=" > intrinsics_neon > + intrinsics_sse2 > " > > COMPLEX_FUNCS=" > @@ -2636,6 +2637,7 @@ armv6t2_deps="arm" > armv8_deps="aarch64" > neon_deps_any="aarch64 arm" > intrinsics_neon_deps="neon" > +intrinsics_sse2_deps="sse2" > vfp_deps_any="aarch64 arm" > vfpv3_deps="vfp" > setend_deps="arm" > @@ -6207,6 +6209,7 @@ elif enabled loongarch; then > fi > > check_cc intrinsics_neon arm_neon.h "int16x8_t test = vdupq_n_s16(0)" > +check_cc intrinsics_sse2 emmintrin.h "__m128i test = _mm_setzero_si128()" > > check_ldflags -Wl,--as-needed > check_ldflags -Wl,-z,noexecstack > diff --git a/libavutil/x86/intreadwrite.h b/libavutil/x86/intreadwrite.h > index 40f375b013..4a03e60fc6 100644 > --- a/libavutil/x86/intreadwrite.h > +++ b/libavutil/x86/intreadwrite.h > @@ -21,6 +21,9 @@ > #ifndef AVUTIL_X86_INTREADWRITE_H > #define AVUTIL_X86_INTREADWRITE_H > > +#if HAVE_INTRINSICS_SSE2 > +#include <emmintrin.h> > +#endif > #include <stdint.h> > #include "config.h" > #include "libavutil/attributes.h" > @@ -79,20 +82,16 @@ static av_always_inline void AV_COPY128(void *d, const void *s) > > #endif /* __SSE__ */ > > -#ifdef __SSE2__ > +#if HAVE_INTRINSICS_SSE2 && defined(__SSE2__) > > #define AV_ZERO128 AV_ZERO128 > static av_always_inline void AV_ZERO128(void *d) > { > - struct v {uint64_t v[2];}; > - > - __asm__("pxor %%xmm0, %%xmm0 \n\t" > - "movdqa %%xmm0, %0 \n\t" > - : "=m"(*(struct v*)d) > - :: "xmm0"); > + __m128i zero = _mm_setzero_si128(); > + _mm_store_si128(d, zero); > } > > -#endif /* __SSE2__ */ > +#endif /* HAVE_INTRINSICS_SSE2 && defined(__SSE2__) */ > > #endif /* HAVE_MMX */ Will apply.
diff --git a/configure b/configure index c5a466657f..5bb83f5b5a 100755 --- a/configure +++ b/configure @@ -2222,6 +2222,7 @@ HEADERS_LIST=" INTRINSICS_LIST=" intrinsics_neon + intrinsics_sse2 " COMPLEX_FUNCS=" @@ -2636,6 +2637,7 @@ armv6t2_deps="arm" armv8_deps="aarch64" neon_deps_any="aarch64 arm" intrinsics_neon_deps="neon" +intrinsics_sse2_deps="sse2" vfp_deps_any="aarch64 arm" vfpv3_deps="vfp" setend_deps="arm" @@ -6207,6 +6209,7 @@ elif enabled loongarch; then fi check_cc intrinsics_neon arm_neon.h "int16x8_t test = vdupq_n_s16(0)" +check_cc intrinsics_sse2 emmintrin.h "__m128i test = _mm_setzero_si128()" check_ldflags -Wl,--as-needed check_ldflags -Wl,-z,noexecstack diff --git a/libavutil/x86/intreadwrite.h b/libavutil/x86/intreadwrite.h index 40f375b013..4a03e60fc6 100644 --- a/libavutil/x86/intreadwrite.h +++ b/libavutil/x86/intreadwrite.h @@ -21,6 +21,9 @@ #ifndef AVUTIL_X86_INTREADWRITE_H #define AVUTIL_X86_INTREADWRITE_H +#if HAVE_INTRINSICS_SSE2 +#include <emmintrin.h> +#endif #include <stdint.h> #include "config.h" #include "libavutil/attributes.h" @@ -79,20 +82,16 @@ static av_always_inline void AV_COPY128(void *d, const void *s) #endif /* __SSE__ */ -#ifdef __SSE2__ +#if HAVE_INTRINSICS_SSE2 && defined(__SSE2__) #define AV_ZERO128 AV_ZERO128 static av_always_inline void AV_ZERO128(void *d) { - struct v {uint64_t v[2];}; - - __asm__("pxor %%xmm0, %%xmm0 \n\t" - "movdqa %%xmm0, %0 \n\t" - : "=m"(*(struct v*)d) - :: "xmm0"); + __m128i zero = _mm_setzero_si128(); + _mm_store_si128(d, zero); } -#endif /* __SSE2__ */ +#endif /* HAVE_INTRINSICS_SSE2 && defined(__SSE2__) */ #endif /* HAVE_MMX */
When called inside a loop, the inline asm version results in one pxor unnecessarely emitted per iteration, as the contents of the __asm__() block are opaque to the compiler's instruction scheduler. This is not the case with intrinsics, where pxor will be emitted once with any half decent compiler. The code can be adapted to also work with MSVC, but for now, it will work with the same compilers previously supported (GCC, Clang, etc). Signed-off-by: James Almer <jamrial@gmail.com> --- configure | 3 +++ libavutil/x86/intreadwrite.h | 15 +++++++-------- 2 files changed, 10 insertions(+), 8 deletions(-)