Commit 1abd8794 authored by Erik de Castro Lopo's avatar Erik de Castro Lopo

libFLAC/cpu.c: Remove OS SSE detection

Assume that all OSes that are usable today support SSE.

Patch-from: lvqcl.mail <lvqcl.mail@gmail.com>
parent 979ea746
......@@ -39,23 +39,7 @@
#include <stdlib.h>
#include <memory.h>
#if defined (__NetBSD__) || defined(__OpenBSD__)
# include <sys/param.h>
# include <sys/sysctl.h>
# include <machine/cpu.h>
#endif
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) || defined(__APPLE__)
# include <sys/types.h>
# include <sys/sysctl.h>
#endif
#if defined(__linux__) && defined(FLAC__CPU_IA32) && !defined FLAC__NO_ASM && (defined FLAC__HAS_NASM || FLAC__HAS_X86INTRIN) && !FLAC__SSE_OS
# include <sys/ucontext.h>
#endif
#if defined(_MSC_VER)
# include <windows.h>
# include <intrin.h> /* for __cpuid() and _xgetbv() */
#endif
......@@ -77,7 +61,6 @@
/* these are flags in EDX of CPUID AX=00000001 */
static const unsigned FLAC__CPUINFO_IA32_CPUID_CMOV = 0x00008000;
static const unsigned FLAC__CPUINFO_IA32_CPUID_MMX = 0x00800000;
static const unsigned FLAC__CPUINFO_IA32_CPUID_FXSR = 0x01000000;
static const unsigned FLAC__CPUINFO_IA32_CPUID_SSE = 0x02000000;
static const unsigned FLAC__CPUINFO_IA32_CPUID_SSE2 = 0x04000000;
#endif
......@@ -97,43 +80,6 @@ static const unsigned FLAC__CPUINFO_IA32_CPUID_FMA = 0x00001000;
static const unsigned FLAC__CPUINFO_IA32_CPUID_AVX2 = 0x00000020;
#endif
/*
* Extra stuff needed for detection of OS support for SSE on IA-32
*/
#if defined(__linux__) && defined(FLAC__CPU_IA32) && !defined FLAC__NO_ASM && (defined FLAC__HAS_NASM || FLAC__HAS_X86INTRIN) && !FLAC__SSE_OS
/*
* If the OS doesn't support SSE, we will get here with a SIGILL. We
* modify the return address to jump over the offending SSE instruction
* and also the operation following it that indicates the instruction
* executed successfully. In this way we use no global variables and
* stay thread-safe.
*
* 3 + 3 + 6:
* 3 bytes for "xorps xmm0,xmm0"
* 3 bytes for estimate of how long the follwing "inc var" instruction is
* 6 bytes extra in case our estimate is wrong
* 12 bytes puts us in the NOP "landing zone"
*/
static void sigill_handler_sse_os(int signal, siginfo_t *si, void *uc)
{
(void)signal, (void)si;
((ucontext_t*)uc)->uc_mcontext.gregs[14/*REG_EIP*/] += 3 + 3 + 6;
}
#endif
#if defined FLAC__CPU_IA32
static void
ia32_disable_sse(FLAC__CPUInfo *info)
{
info->ia32.sse = false;
info->ia32.sse2 = false;
info->ia32.sse3 = false;
info->ia32.ssse3 = false;
info->ia32.sse41 = false;
info->ia32.sse42 = false;
}
#endif
#if defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64
static uint32_t
cpu_xgetbv_x86(void)
......@@ -155,11 +101,7 @@ ia32_cpu_info (FLAC__CPUInfo *info)
{
#if !defined FLAC__CPU_IA32
(void) info;
#elif defined(__ANDROID__) || defined(ANDROID)
/* no need to check OS SSE support */
info->use_asm = true;
#else
FLAC__bool ia32_fxsr = false;
FLAC__bool ia32_osxsave = false;
FLAC__uint32 flags_eax, flags_ebx, flags_ecx, flags_edx;
......@@ -181,7 +123,6 @@ ia32_cpu_info (FLAC__CPUInfo *info)
info->ia32.cmov = (flags_edx & FLAC__CPUINFO_IA32_CPUID_CMOV ) ? true : false;
info->ia32.mmx = (flags_edx & FLAC__CPUINFO_IA32_CPUID_MMX ) ? true : false;
ia32_fxsr = (flags_edx & FLAC__CPUINFO_IA32_CPUID_FXSR ) ? true : false;
info->ia32.sse = (flags_edx & FLAC__CPUINFO_IA32_CPUID_SSE ) ? true : false;
info->ia32.sse2 = (flags_edx & FLAC__CPUINFO_IA32_CPUID_SSE2 ) ? true : false;
info->ia32.sse3 = (flags_ecx & FLAC__CPUINFO_IA32_CPUID_SSE3 ) ? true : false;
......@@ -213,110 +154,6 @@ ia32_cpu_info (FLAC__CPUInfo *info)
dfprintf(stderr, " AVX2 ....... %c\n", info->ia32.avx2 ? 'Y' : 'n');
}
/*
* now have to check for OS support of SSE instructions
*/
if(info->ia32.sse) {
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) || defined(__APPLE__)
int sse = 0;
size_t len = sizeof(sse);
/* at least one of these must work: */
sse = sse || (sysctlbyname("hw.instruction_sse", &sse, &len, NULL, 0) == 0 && sse);
sse = sse || (sysctlbyname("hw.optional.sse" , &sse, &len, NULL, 0) == 0 && sse); /* __APPLE__ ? */
if(!sse)
ia32_disable_sse(info);
#elif defined(__NetBSD__) || defined (__OpenBSD__)
int val = 0, mib[2] = { CTL_MACHDEP, CPU_SSE };
size_t len = sizeof(val);
if(sysctl(mib, 2, &val, &len, NULL, 0) < 0 || !val)
ia32_disable_sse(info);
else { /* double-check SSE2 */
mib[1] = CPU_SSE2;
len = sizeof(val);
if(sysctl(mib, 2, &val, &len, NULL, 0) < 0 || !val) {
ia32_disable_sse(info);
info->ia32.sse = true;
}
}
#elif defined(__linux__) && !FLAC__SSE_OS
int sse = 0;
struct sigaction sigill_save;
struct sigaction sigill_sse;
sigill_sse.sa_sigaction = sigill_handler_sse_os;
sigemptyset(&sigill_sse.sa_mask);
sigill_sse.sa_flags = SA_SIGINFO | SA_RESETHAND; /* SA_RESETHAND just in case our SIGILL return jump breaks, so we don't get stuck in a loop */
if(0 == sigaction(SIGILL, &sigill_sse, &sigill_save))
{
/* http://www.ibiblio.org/gferg/ldp/GCC-Inline-Assembly-HOWTO.html */
/* see sigill_handler_sse_os() for an explanation of the following: */
asm volatile (
"xorps %%xmm0,%%xmm0\n\t" /* will cause SIGILL if unsupported by OS */
"incl %0\n\t" /* SIGILL handler will jump over this */
/* landing zone */
"nop\n\t" /* SIGILL jump lands here if "inc" is 9 bytes */
"nop\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t" /* SIGILL jump lands here if "inc" is 3 bytes (expected) */
"nop\n\t"
"nop" /* SIGILL jump lands here if "inc" is 1 byte */
: "=r"(sse)
: "0"(sse)
);
sigaction(SIGILL, &sigill_save, NULL);
}
if(!sse)
ia32_disable_sse(info);
#elif defined(_MSC_VER)
__try {
__asm {
xorps xmm0,xmm0
}
}
__except(EXCEPTION_EXECUTE_HANDLER) {
if (_exception_code() == STATUS_ILLEGAL_INSTRUCTION)
ia32_disable_sse(info);
}
#elif defined(__GNUC__) /* MinGW goes here */
int sse = 0;
/* Based on the idea described in Agner Fog's manual "Optimizing subroutines in assembly language" */
/* In theory, not guaranteed to detect lack of OS SSE support on some future Intel CPUs, but in practice works (see the aforementioned manual) */
if (ia32_fxsr) {
struct {
FLAC__uint32 buff[128];
} __attribute__((aligned(16))) fxsr;
FLAC__uint32 old_val, new_val;
memset(fxsr.buff, 0, sizeof (fxsr.buff));
asm volatile ("fxsave %0" : "=m" (fxsr) : "m" (fxsr));
old_val = fxsr.buff[50];
fxsr.buff[50] ^= 0x0013c0de; /* change value in the buffer */
asm volatile ("fxrstor %0" : "=m" (fxsr) : "m" (fxsr)); /* try to change SSE register */
fxsr.buff[50] = old_val; /* restore old value in the buffer */
asm volatile ("fxsave %0" : "=m" (fxsr) : "m" (fxsr)); /* old value will be overwritten if SSE register was changed */
new_val = fxsr.buff[50]; /* == old_val if FXRSTOR didn't change SSE register and (old_val ^ 0x0013c0de) otherwise */
fxsr.buff[50] = old_val; /* again restore old value in the buffer */
asm volatile ("fxrstor %0" : "=m" (fxsr) : "m" (fxsr)); /* restore old values of registers */
if ((old_val^new_val) == 0x0013c0de)
sse = 1;
}
if(!sse)
ia32_disable_sse(info);
#else
/* no way to test, disable to be safe */
ia32_disable_sse(info);
#endif
dfprintf(stderr, " SSE OS sup . %c\n", info->ia32.sse ? 'Y' : 'n');
}
else /* info->ia32.sse == false */
ia32_disable_sse(info);
/*
* now have to check for OS support of AVX instructions
*/
......@@ -341,9 +178,6 @@ x86_64_cpu_info (FLAC__CPUInfo *info)
{
#if !defined FLAC__CPU_X86_64
(void) info;
#elif defined(__ANDROID__) || defined(ANDROID)
/* no need to check OS SSE support */
info->use_asm = true;
#elif !defined FLAC__NO_ASM && FLAC__HAS_X86INTRIN
FLAC__bool x86_osxsave = false;
FLAC__uint32 flags_eax, flags_ebx, flags_ecx, flags_edx;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment