| 1 | /* |
| 2 | * CPU detection code, extracted from mmx.h |
| 3 | * (c)1997-99 by H. Dietz and R. Fisher |
| 4 | * Converted to C and improved by Fabrice Bellard. |
| 5 | * |
| 6 | * This file is part of FFmpeg. |
| 7 | * |
| 8 | * FFmpeg is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU Lesser General Public |
| 10 | * License as published by the Free Software Foundation; either |
| 11 | * version 2.1 of the License, or (at your option) any later version. |
| 12 | * |
| 13 | * FFmpeg is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * Lesser General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU Lesser General Public |
| 19 | * License along with FFmpeg; if not, write to the Free Software |
| 20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 21 | */ |
| 22 | |
| 23 | #include <stdlib.h> |
| 24 | #include <string.h> |
| 25 | |
| 26 | #include "libavutil/x86/asm.h" |
| 27 | #include "libavutil/x86/cpu.h" |
| 28 | #include "libavutil/cpu.h" |
| 29 | #include "libavutil/cpu_internal.h" |
| 30 | |
| 31 | #if HAVE_YASM |
| 32 | |
| 33 | #define cpuid(index, eax, ebx, ecx, edx) \ |
| 34 | ff_cpu_cpuid(index, &eax, &ebx, &ecx, &edx) |
| 35 | |
| 36 | #define xgetbv(index, eax, edx) \ |
| 37 | ff_cpu_xgetbv(index, &eax, &edx) |
| 38 | |
| 39 | #elif HAVE_INLINE_ASM |
| 40 | |
| 41 | /* ebx saving is necessary for PIC. gcc seems unable to see it alone */ |
| 42 | #define cpuid(index, eax, ebx, ecx, edx) \ |
| 43 | __asm__ volatile ( \ |
| 44 | "mov %%"REG_b", %%"REG_S" \n\t" \ |
| 45 | "cpuid \n\t" \ |
| 46 | "xchg %%"REG_b", %%"REG_S \ |
| 47 | : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \ |
| 48 | : "0" (index), "2"(0)) |
| 49 | |
| 50 | #define xgetbv(index, eax, edx) \ |
| 51 | __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index)) |
| 52 | |
| 53 | #define get_eflags(x) \ |
| 54 | __asm__ volatile ("pushfl \n" \ |
| 55 | "pop %0 \n" \ |
| 56 | : "=r"(x)) |
| 57 | |
| 58 | #define set_eflags(x) \ |
| 59 | __asm__ volatile ("push %0 \n" \ |
| 60 | "popfl \n" \ |
| 61 | :: "r"(x)) |
| 62 | |
| 63 | #endif /* HAVE_INLINE_ASM */ |
| 64 | |
| 65 | #if ARCH_X86_64 |
| 66 | |
| 67 | #define cpuid_test() 1 |
| 68 | |
| 69 | #elif HAVE_YASM |
| 70 | |
| 71 | #define cpuid_test ff_cpu_cpuid_test |
| 72 | |
| 73 | #elif HAVE_INLINE_ASM |
| 74 | |
| 75 | static int cpuid_test(void) |
| 76 | { |
| 77 | x86_reg a, c; |
| 78 | |
| 79 | /* Check if CPUID is supported by attempting to toggle the ID bit in |
| 80 | * the EFLAGS register. */ |
| 81 | get_eflags(a); |
| 82 | set_eflags(a ^ 0x200000); |
| 83 | get_eflags(c); |
| 84 | |
| 85 | return a != c; |
| 86 | } |
| 87 | #endif |
| 88 | |
| 89 | /* Function to test if multimedia instructions are supported... */ |
| 90 | int ff_get_cpu_flags_x86(void) |
| 91 | { |
| 92 | int rval = 0; |
| 93 | |
| 94 | #ifdef cpuid |
| 95 | |
| 96 | int eax, ebx, ecx, edx; |
| 97 | int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0; |
| 98 | int family = 0, model = 0; |
| 99 | union { int i[3]; char c[12]; } vendor; |
| 100 | |
| 101 | if (!cpuid_test()) |
| 102 | return 0; /* CPUID not supported */ |
| 103 | |
| 104 | cpuid(0, max_std_level, vendor.i[0], vendor.i[2], vendor.i[1]); |
| 105 | |
| 106 | if (max_std_level >= 1) { |
| 107 | cpuid(1, eax, ebx, ecx, std_caps); |
| 108 | family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); |
| 109 | model = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0); |
| 110 | if (std_caps & (1 << 15)) |
| 111 | rval |= AV_CPU_FLAG_CMOV; |
| 112 | if (std_caps & (1 << 23)) |
| 113 | rval |= AV_CPU_FLAG_MMX; |
| 114 | if (std_caps & (1 << 25)) |
| 115 | rval |= AV_CPU_FLAG_MMXEXT; |
| 116 | #if HAVE_SSE |
| 117 | if (std_caps & (1 << 25)) |
| 118 | rval |= AV_CPU_FLAG_SSE; |
| 119 | if (std_caps & (1 << 26)) |
| 120 | rval |= AV_CPU_FLAG_SSE2; |
| 121 | if (ecx & 1) |
| 122 | rval |= AV_CPU_FLAG_SSE3; |
| 123 | if (ecx & 0x00000200 ) |
| 124 | rval |= AV_CPU_FLAG_SSSE3; |
| 125 | if (ecx & 0x00080000 ) |
| 126 | rval |= AV_CPU_FLAG_SSE4; |
| 127 | if (ecx & 0x00100000 ) |
| 128 | rval |= AV_CPU_FLAG_SSE42; |
| 129 | #if HAVE_AVX |
| 130 | /* Check OXSAVE and AVX bits */ |
| 131 | if ((ecx & 0x18000000) == 0x18000000) { |
| 132 | /* Check for OS support */ |
| 133 | xgetbv(0, eax, edx); |
| 134 | if ((eax & 0x6) == 0x6) { |
| 135 | rval |= AV_CPU_FLAG_AVX; |
| 136 | if (ecx & 0x00001000) |
| 137 | rval |= AV_CPU_FLAG_FMA3; |
| 138 | } |
| 139 | } |
| 140 | #endif /* HAVE_AVX */ |
| 141 | #endif /* HAVE_SSE */ |
| 142 | } |
| 143 | if (max_std_level >= 7) { |
| 144 | cpuid(7, eax, ebx, ecx, edx); |
| 145 | #if HAVE_AVX2 |
| 146 | if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020)) |
| 147 | rval |= AV_CPU_FLAG_AVX2; |
| 148 | #endif /* HAVE_AVX2 */ |
| 149 | /* BMI1/2 don't need OS support */ |
| 150 | if (ebx & 0x00000008) { |
| 151 | rval |= AV_CPU_FLAG_BMI1; |
| 152 | if (ebx & 0x00000100) |
| 153 | rval |= AV_CPU_FLAG_BMI2; |
| 154 | } |
| 155 | } |
| 156 | |
| 157 | cpuid(0x80000000, max_ext_level, ebx, ecx, edx); |
| 158 | |
| 159 | if (max_ext_level >= 0x80000001) { |
| 160 | cpuid(0x80000001, eax, ebx, ecx, ext_caps); |
| 161 | if (ext_caps & (1U << 31)) |
| 162 | rval |= AV_CPU_FLAG_3DNOW; |
| 163 | if (ext_caps & (1 << 30)) |
| 164 | rval |= AV_CPU_FLAG_3DNOWEXT; |
| 165 | if (ext_caps & (1 << 23)) |
| 166 | rval |= AV_CPU_FLAG_MMX; |
| 167 | if (ext_caps & (1 << 22)) |
| 168 | rval |= AV_CPU_FLAG_MMXEXT; |
| 169 | |
| 170 | /* Allow for selectively disabling SSE2 functions on AMD processors |
| 171 | with SSE2 support but not SSE4a. This includes Athlon64, some |
| 172 | Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster |
| 173 | than SSE2 often enough to utilize this special-case flag. |
| 174 | AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case |
| 175 | so that SSE2 is used unless explicitly disabled by checking |
| 176 | AV_CPU_FLAG_SSE2SLOW. */ |
| 177 | if (!strncmp(vendor.c, "AuthenticAMD", 12) && |
| 178 | rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040)) { |
| 179 | rval |= AV_CPU_FLAG_SSE2SLOW; |
| 180 | } |
| 181 | |
| 182 | /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be |
| 183 | * used unless the OS has AVX support. */ |
| 184 | if (rval & AV_CPU_FLAG_AVX) { |
| 185 | if (ecx & 0x00000800) |
| 186 | rval |= AV_CPU_FLAG_XOP; |
| 187 | if (ecx & 0x00010000) |
| 188 | rval |= AV_CPU_FLAG_FMA4; |
| 189 | } |
| 190 | } |
| 191 | |
| 192 | if (!strncmp(vendor.c, "GenuineIntel", 12)) { |
| 193 | if (family == 6 && (model == 9 || model == 13 || model == 14)) { |
| 194 | /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and |
| 195 | * 6/14 (core1 "yonah") theoretically support sse2, but it's |
| 196 | * usually slower than mmx, so let's just pretend they don't. |
| 197 | * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is |
| 198 | * enabled so that SSE2 is not used unless explicitly enabled |
| 199 | * by checking AV_CPU_FLAG_SSE2SLOW. The same situation |
| 200 | * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */ |
| 201 | if (rval & AV_CPU_FLAG_SSE2) |
| 202 | rval ^= AV_CPU_FLAG_SSE2SLOW | AV_CPU_FLAG_SSE2; |
| 203 | if (rval & AV_CPU_FLAG_SSE3) |
| 204 | rval ^= AV_CPU_FLAG_SSE3SLOW | AV_CPU_FLAG_SSE3; |
| 205 | } |
| 206 | /* The Atom processor has SSSE3 support, which is useful in many cases, |
| 207 | * but sometimes the SSSE3 version is slower than the SSE2 equivalent |
| 208 | * on the Atom, but is generally faster on other processors supporting |
| 209 | * SSSE3. This flag allows for selectively disabling certain SSSE3 |
| 210 | * functions on the Atom. */ |
| 211 | if (family == 6 && model == 28) |
| 212 | rval |= AV_CPU_FLAG_ATOM; |
| 213 | } |
| 214 | |
| 215 | #endif /* cpuid */ |
| 216 | |
| 217 | return rval; |
| 218 | } |