Message ID | 20211220135627.615097-1-alankelly@google.com |
---|---|
State | New |
Headers | show |
Series | [FFmpeg-devel,1/2] libavutil/cpu: Add AV_CPU_FLAG_SLOW_GATHER. | expand |
Context | Check | Description |
---|---|---|
andriy/make_x86 | success | Make finished |
andriy/make_fate_x86 | success | Make fate finished |
andriy/make_ppc | success | Make finished |
andriy/make_fate_ppc | success | Make fate finished |
On 12/20/2021 10:56 AM, Alan Kelly wrote: > This flag is set on Haswell and earlier and all AMD cpus. > --- > As discussed on IRC last week. > libavutil/cpu.h | 57 +++++++++++++++++++++++---------------------- > libavutil/x86/cpu.c | 13 ++++++++++- > 2 files changed, 41 insertions(+), 29 deletions(-) > > diff --git a/libavutil/cpu.h b/libavutil/cpu.h > index ae443eccad..4272d11d73 100644 > --- a/libavutil/cpu.h > +++ b/libavutil/cpu.h > @@ -26,34 +26,35 @@ > #define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */ > > /* lower 16 bits - CPU features */ > -#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX > -#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext > -#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext > -#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW > -#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions > -#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions > -#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster > - ///< than regular MMX/SSE (e.g. Core1) > -#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt > -#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions > -#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster > - ///< than regular MMX/SSE (e.g. Core1) > -#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions > -#define AV_CPU_FLAG_SSSE3SLOW 0x4000000 ///< SSSE3 supported, but usually not faster > -#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower > -#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions > -#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions > -#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions > -#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used > -#define AV_CPU_FLAG_AVXSLOW 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g. Bulldozer) > -#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions > -#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions > -#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction > -#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used > -#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions > -#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1 > -#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2 > -#define AV_CPU_FLAG_AVX512 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM registers aren't used > +#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX > +#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext > +#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext > +#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW > +#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions > +#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions > +#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster > + ///< than regular MMX/SSE (e.g. Core1) > +#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt > +#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions > +#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster > + ///< than regular MMX/SSE (e.g. Core1) > +#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions > +#define AV_CPU_FLAG_SSSE3SLOW 0x4000000 ///< SSSE3 supported, but usually not faster > +#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower > +#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions > +#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions > +#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions > +#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used > +#define AV_CPU_FLAG_AVXSLOW 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g. Bulldozer) > +#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions > +#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions > +#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction > +#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used > +#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions > +#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1 > +#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2 > +#define AV_CPU_FLAG_AVX512 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM registers aren't used > +#define AV_CPU_FLAG_SLOW_GATHER 0x2000000 ///< CPU has slow gathers. Don't re-indent the other flags. It will affect git blame output. > > #define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard > #define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06 > diff --git a/libavutil/x86/cpu.c b/libavutil/x86/cpu.c > index bcd41a50a2..5770ecec72 100644 > --- a/libavutil/x86/cpu.c > +++ b/libavutil/x86/cpu.c > @@ -146,8 +146,16 @@ int ff_get_cpu_flags_x86(void) > if (max_std_level >= 7) { > cpuid(7, eax, ebx, ecx, edx); > #if HAVE_AVX2 > - if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020)) > + if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020)) { > rval |= AV_CPU_FLAG_AVX2; > + cpuid(1, eax, ebx, ecx, std_caps); > + family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); > + model = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0); > + /* Haswell and earlier has slow gather */ Afaik Haswell is the first CPU with gathers, so this comment is odd. Is there another CPU this check below will trigger for? > + if(family == 6 && model < 70) > + rval |= AV_CPU_FLAG_SLOW_GATHER; > + } > + > #if HAVE_AVX512 /* F, CD, BW, DQ, VL */ > if ((xcr0_lo & 0xe0) == 0xe0) { /* OPMASK/ZMM state */ > if ((rval & AV_CPU_FLAG_AVX2) && (ebx & 0xd0030000) == 0xd0030000) > @@ -196,6 +204,9 @@ int ff_get_cpu_flags_x86(void) > used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. */ > if ((family == 0x15 || family == 0x16) && (rval & AV_CPU_FLAG_AVX)) > rval |= AV_CPU_FLAG_AVXSLOW; > + > + /* AMD cpus have slow gather */ > + rval |= AV_CPU_FLAG_SLOW_GATHER; Don't unconditionally enable this for every CPU. Do it only for those with AVX2. if (rval & AV_CPU_FLAG_AVX2) rval |= AV_CPU_FLAG_SLOW_GATHER; > } > > /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
diff --git a/libavutil/cpu.h b/libavutil/cpu.h index ae443eccad..4272d11d73 100644 --- a/libavutil/cpu.h +++ b/libavutil/cpu.h @@ -26,34 +26,35 @@ #define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */ /* lower 16 bits - CPU features */ -#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX -#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext -#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext -#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW -#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions -#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions -#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster - ///< than regular MMX/SSE (e.g. Core1) -#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt -#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions -#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster - ///< than regular MMX/SSE (e.g. Core1) -#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions -#define AV_CPU_FLAG_SSSE3SLOW 0x4000000 ///< SSSE3 supported, but usually not faster -#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower -#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions -#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions -#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions -#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used -#define AV_CPU_FLAG_AVXSLOW 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g. Bulldozer) -#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions -#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions -#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction -#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used -#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions -#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1 -#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2 -#define AV_CPU_FLAG_AVX512 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM registers aren't used +#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX +#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW +#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions +#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions +#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt +#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions +#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions +#define AV_CPU_FLAG_SSSE3SLOW 0x4000000 ///< SSSE3 supported, but usually not faster +#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower +#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions +#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions +#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions +#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_AVXSLOW 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g. Bulldozer) +#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions +#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions +#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction +#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions +#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1 +#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2 +#define AV_CPU_FLAG_AVX512 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM registers aren't used +#define AV_CPU_FLAG_SLOW_GATHER 0x2000000 ///< CPU has slow gathers. #define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard #define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06 diff --git a/libavutil/x86/cpu.c b/libavutil/x86/cpu.c index bcd41a50a2..5770ecec72 100644 --- a/libavutil/x86/cpu.c +++ b/libavutil/x86/cpu.c @@ -146,8 +146,16 @@ int ff_get_cpu_flags_x86(void) if (max_std_level >= 7) { cpuid(7, eax, ebx, ecx, edx); #if HAVE_AVX2 - if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020)) + if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020)) { rval |= AV_CPU_FLAG_AVX2; + cpuid(1, eax, ebx, ecx, std_caps); + family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); + model = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0); + /* Haswell and earlier has slow gather */ + if(family == 6 && model < 70) + rval |= AV_CPU_FLAG_SLOW_GATHER; + } + #if HAVE_AVX512 /* F, CD, BW, DQ, VL */ if ((xcr0_lo & 0xe0) == 0xe0) { /* OPMASK/ZMM state */ if ((rval & AV_CPU_FLAG_AVX2) && (ebx & 0xd0030000) == 0xd0030000) @@ -196,6 +204,9 @@ int ff_get_cpu_flags_x86(void) used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. */ if ((family == 0x15 || family == 0x16) && (rval & AV_CPU_FLAG_AVX)) rval |= AV_CPU_FLAG_AVXSLOW; + + /* AMD cpus have slow gather */ + rval |= AV_CPU_FLAG_SLOW_GATHER; } /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be