aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/bugs.h5
-rw-r--r--include/asm-x86/cpufeature.h118
-rw-r--r--include/asm-x86/e820.h2
-rw-r--r--include/asm-x86/i387.h84
-rw-r--r--include/asm-x86/msr-index.h16
-rw-r--r--include/asm-x86/processor-cyrix.h8
-rw-r--r--include/asm-x86/processor-flags.h1
-rw-r--r--include/asm-x86/processor.h27
-rw-r--r--include/asm-x86/sigcontext.h87
-rw-r--r--include/asm-x86/sigcontext32.h6
-rw-r--r--include/asm-x86/thread_info.h1
-rw-r--r--include/asm-x86/ucontext.h6
-rw-r--r--include/asm-x86/xcr.h49
-rw-r--r--include/asm-x86/xsave.h118
14 files changed, 467 insertions, 61 deletions
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h
index 4761c461d23a..dc604985f2ad 100644
--- a/include/asm-x86/bugs.h
+++ b/include/asm-x86/bugs.h
@@ -2,6 +2,11 @@
2#define ASM_X86__BUGS_H 2#define ASM_X86__BUGS_H
3 3
4extern void check_bugs(void); 4extern void check_bugs(void);
5
6#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
5int ppro_with_ram_bug(void); 7int ppro_with_ram_bug(void);
8#else
9static inline int ppro_with_ram_bug(void) { return 0; }
10#endif
6 11
7#endif /* ASM_X86__BUGS_H */ 12#endif /* ASM_X86__BUGS_H */
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 065c6a86ed80..adfeae6586e1 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -6,7 +6,13 @@
6 6
7#include <asm/required-features.h> 7#include <asm/required-features.h>
8 8
9#define NCAPINTS 8 /* N 32-bit words worth of info */ 9#define NCAPINTS 9 /* N 32-bit words worth of info */
10
11/*
12 * Note: If the comment begins with a quoted string, that string is used
13 * in /proc/cpuinfo instead of the macro name. If the string is "",
14 * this feature bit is not displayed in /proc/cpuinfo at all.
15 */
10 16
11/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ 17/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
12#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ 18#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
@@ -14,7 +20,7 @@
14#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ 20#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
15#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ 21#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
16#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ 22#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
17#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ 23#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */
18#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ 24#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
19#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ 25#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
20#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ 26#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
@@ -23,22 +29,23 @@
23#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ 29#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
24#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ 30#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
25#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ 31#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
26#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ 32#define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */
33 /* (plus FCMOVcc, FCOMI with FPU) */
27#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ 34#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
28#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ 35#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
29#define X86_FEATURE_PN (0*32+18) /* Processor serial number */ 36#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
30#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ 37#define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */
31#define X86_FEATURE_DS (0*32+21) /* Debug Store */ 38#define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */
32#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ 39#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
33#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ 40#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
34#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ 41#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
35 /* of FPU context), and CR4.OSFXSR available */ 42#define X86_FEATURE_XMM (0*32+25) /* "sse" */
36#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ 43#define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */
37#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ 44#define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */
38#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */
39#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ 45#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
40#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ 46#define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */
41#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ 47#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
48#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */
42 49
43/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ 50/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
44/* Don't duplicate feature flags which are redundant with Intel! */ 51/* Don't duplicate feature flags which are redundant with Intel! */
@@ -46,7 +53,8 @@
46#define X86_FEATURE_MP (1*32+19) /* MP Capable. */ 53#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
47#define X86_FEATURE_NX (1*32+20) /* Execute Disable */ 54#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
48#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ 55#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
49#define X86_FEATURE_GBPAGES (1*32+26) /* GB pages */ 56#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */
57#define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */
50#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ 58#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
51#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ 59#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
52#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ 60#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
@@ -64,54 +72,79 @@
64#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ 72#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
65#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ 73#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
66/* cpu types for specific tunings: */ 74/* cpu types for specific tunings: */
67#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ 75#define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */
68#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ 76#define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */
69#define X86_FEATURE_P3 (3*32+ 6) /* P3 */ 77#define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */
70#define X86_FEATURE_P4 (3*32+ 7) /* P4 */ 78#define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */
71#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ 79#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
72#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ 80#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
73#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ 81#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
74#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ 82#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
83#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
75#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ 84#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
76#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ 85#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
77#define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ 86#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
78#define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ 87#define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */
79#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ 88#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */
80#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ 89#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */
81#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ 90#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
82#define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ 91#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
83#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ 92#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
84#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ 93#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
94#define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */
85 95
86/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 96/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
87#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 97#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
88#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ 98#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */
89#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ 99#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */
100#define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */
101#define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
102#define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */
103#define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */
90#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ 104#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
91#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ 105#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
106#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
92#define X86_FEATURE_CID (4*32+10) /* Context ID */ 107#define X86_FEATURE_CID (4*32+10) /* Context ID */
108#define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */
93#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ 109#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
94#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ 110#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
111#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */
95#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ 112#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
113#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */
114#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */
96#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ 115#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
97#define X86_FEATURE_XMM4_2 (4*32+20) /* Streaming SIMD Extensions-4.2 */ 116#define X86_FEATURE_AES (4*32+25) /* AES instructions */
117#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
118#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
119#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
98 120
99/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 121/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
100#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ 122#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */
101#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ 123#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */
102#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ 124#define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
103#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ 125#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */
104#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ 126#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
105#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ 127#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
106#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ 128#define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */
107#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ 129#define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */
108#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ 130#define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */
109#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ 131#define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */
110 132
111/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ 133/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
112#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ 134#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
113#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ 135#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
114#define X86_FEATURE_IBS (6*32+ 10) /* Instruction Based Sampling */ 136#define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */
137#define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */
138#define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */
139#define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */
140#define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */
141#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */
142#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
143#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
144#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
145#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
146#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
147#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
115 148
116/* 149/*
117 * Auxiliary flags: Linux defined - For features scattered in various 150 * Auxiliary flags: Linux defined - For features scattered in various
@@ -119,6 +152,13 @@
119 */ 152 */
120#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ 153#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
121 154
155/* Virtualization flags: Linux defined */
156#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
157#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */
158#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */
159#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */
160#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */
161
122#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 162#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
123 163
124#include <linux/bitops.h> 164#include <linux/bitops.h>
@@ -152,7 +192,7 @@ extern const char * const x86_power_flags[32];
152} while (0) 192} while (0)
153#define setup_force_cpu_cap(bit) do { \ 193#define setup_force_cpu_cap(bit) do { \
154 set_cpu_cap(&boot_cpu_data, bit); \ 194 set_cpu_cap(&boot_cpu_data, bit); \
155 clear_bit(bit, (unsigned long *)cleared_cpu_caps); \ 195 clear_bit(bit, (unsigned long *)cleared_cpu_caps); \
156} while (0) 196} while (0)
157 197
158#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) 198#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
@@ -193,8 +233,10 @@ extern const char * const x86_power_flags[32];
193#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) 233#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
194#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) 234#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
195#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) 235#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
196#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 236#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
197#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) 237#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
238#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
239#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
198 240
199#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) 241#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
200# define cpu_has_invlpg 1 242# define cpu_has_invlpg 1
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h
index f52daf176bcb..5abbdec06bd2 100644
--- a/include/asm-x86/e820.h
+++ b/include/asm-x86/e820.h
@@ -43,6 +43,7 @@
43#define E820_RESERVED 2 43#define E820_RESERVED 2
44#define E820_ACPI 3 44#define E820_ACPI 3
45#define E820_NVS 4 45#define E820_NVS 4
46#define E820_UNUSABLE 5
46 47
47/* reserved RAM used by kernel itself */ 48/* reserved RAM used by kernel itself */
48#define E820_RESERVED_KERN 128 49#define E820_RESERVED_KERN 128
@@ -121,6 +122,7 @@ extern void e820_register_active_regions(int nid, unsigned long start_pfn,
121extern u64 e820_hole_size(u64 start, u64 end); 122extern u64 e820_hole_size(u64 start, u64 end);
122extern void finish_e820_parsing(void); 123extern void finish_e820_parsing(void);
123extern void e820_reserve_resources(void); 124extern void e820_reserve_resources(void);
125extern void e820_reserve_resources_late(void);
124extern void setup_memory_map(void); 126extern void setup_memory_map(void);
125extern char *default_machine_specific_memory_setup(void); 127extern char *default_machine_specific_memory_setup(void);
126extern char *machine_specific_memory_setup(void); 128extern char *machine_specific_memory_setup(void);
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 1ecdc3ed96e4..9ba862a4eac0 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -19,7 +19,9 @@
19#include <asm/sigcontext.h> 19#include <asm/sigcontext.h>
20#include <asm/user.h> 20#include <asm/user.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/xsave.h>
22 23
24extern unsigned int sig_xstate_size;
23extern void fpu_init(void); 25extern void fpu_init(void);
24extern void mxcsr_feature_mask_init(void); 26extern void mxcsr_feature_mask_init(void);
25extern int init_fpu(struct task_struct *child); 27extern int init_fpu(struct task_struct *child);
@@ -31,12 +33,18 @@ extern user_regset_active_fn fpregs_active, xfpregs_active;
31extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; 33extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
32extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; 34extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set;
33 35
36extern struct _fpx_sw_bytes fx_sw_reserved;
34#ifdef CONFIG_IA32_EMULATION 37#ifdef CONFIG_IA32_EMULATION
38extern unsigned int sig_xstate_ia32_size;
39extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
35struct _fpstate_ia32; 40struct _fpstate_ia32;
36extern int save_i387_ia32(struct _fpstate_ia32 __user *buf); 41struct _xstate_ia32;
37extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf); 42extern int save_i387_xstate_ia32(void __user *buf);
43extern int restore_i387_xstate_ia32(void __user *buf);
38#endif 44#endif
39 45
46#define X87_FSW_ES (1 << 7) /* Exception Summary */
47
40#ifdef CONFIG_X86_64 48#ifdef CONFIG_X86_64
41 49
42/* Ignore delayed exceptions from user space */ 50/* Ignore delayed exceptions from user space */
@@ -47,7 +55,7 @@ static inline void tolerant_fwait(void)
47 _ASM_EXTABLE(1b, 2b)); 55 _ASM_EXTABLE(1b, 2b));
48} 56}
49 57
50static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) 58static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
51{ 59{
52 int err; 60 int err;
53 61
@@ -67,15 +75,31 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
67 return err; 75 return err;
68} 76}
69 77
70#define X87_FSW_ES (1 << 7) /* Exception Summary */ 78static inline int restore_fpu_checking(struct task_struct *tsk)
79{
80 if (task_thread_info(tsk)->status & TS_XSAVE)
81 return xrstor_checking(&tsk->thread.xstate->xsave);
82 else
83 return fxrstor_checking(&tsk->thread.xstate->fxsave);
84}
71 85
72/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception 86/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
73 is pending. Clear the x87 state here by setting it to fixed 87 is pending. Clear the x87 state here by setting it to fixed
74 values. The kernel data segment can be sometimes 0 and sometimes 88 values. The kernel data segment can be sometimes 0 and sometimes
75 new user value. Both should be ok. 89 new user value. Both should be ok.
76 Use the PDA as safe address because it should be already in L1. */ 90 Use the PDA as safe address because it should be already in L1. */
77static inline void clear_fpu_state(struct i387_fxsave_struct *fx) 91static inline void clear_fpu_state(struct task_struct *tsk)
78{ 92{
93 struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
94 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
95
96 /*
97 * xsave header may indicate the init state of the FP.
98 */
99 if ((task_thread_info(tsk)->status & TS_XSAVE) &&
100 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
101 return;
102
79 if (unlikely(fx->swd & X87_FSW_ES)) 103 if (unlikely(fx->swd & X87_FSW_ES))
80 asm volatile("fnclex"); 104 asm volatile("fnclex");
81 alternative_input(ASM_NOP8 ASM_NOP2, 105 alternative_input(ASM_NOP8 ASM_NOP2,
@@ -84,7 +108,7 @@ static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
84 X86_FEATURE_FXSAVE_LEAK); 108 X86_FEATURE_FXSAVE_LEAK);
85} 109}
86 110
87static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) 111static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
88{ 112{
89 int err; 113 int err;
90 114
@@ -108,7 +132,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
108 return err; 132 return err;
109} 133}
110 134
111static inline void __save_init_fpu(struct task_struct *tsk) 135static inline void fxsave(struct task_struct *tsk)
112{ 136{
113 /* Using "rex64; fxsave %0" is broken because, if the memory operand 137 /* Using "rex64; fxsave %0" is broken because, if the memory operand
114 uses any extended registers for addressing, a second REX prefix 138 uses any extended registers for addressing, a second REX prefix
@@ -133,7 +157,16 @@ static inline void __save_init_fpu(struct task_struct *tsk)
133 : "=m" (tsk->thread.xstate->fxsave) 157 : "=m" (tsk->thread.xstate->fxsave)
134 : "cdaSDb" (&tsk->thread.xstate->fxsave)); 158 : "cdaSDb" (&tsk->thread.xstate->fxsave));
135#endif 159#endif
136 clear_fpu_state(&tsk->thread.xstate->fxsave); 160}
161
162static inline void __save_init_fpu(struct task_struct *tsk)
163{
164 if (task_thread_info(tsk)->status & TS_XSAVE)
165 xsave(tsk);
166 else
167 fxsave(tsk);
168
169 clear_fpu_state(tsk);
137 task_thread_info(tsk)->status &= ~TS_USEDFPU; 170 task_thread_info(tsk)->status &= ~TS_USEDFPU;
138} 171}
139 172
@@ -148,6 +181,10 @@ static inline void tolerant_fwait(void)
148 181
149static inline void restore_fpu(struct task_struct *tsk) 182static inline void restore_fpu(struct task_struct *tsk)
150{ 183{
184 if (task_thread_info(tsk)->status & TS_XSAVE) {
185 xrstor_checking(&tsk->thread.xstate->xsave);
186 return;
187 }
151 /* 188 /*
152 * The "nop" is needed to make the instructions the same 189 * The "nop" is needed to make the instructions the same
153 * length. 190 * length.
@@ -173,6 +210,27 @@ static inline void restore_fpu(struct task_struct *tsk)
173 */ 210 */
174static inline void __save_init_fpu(struct task_struct *tsk) 211static inline void __save_init_fpu(struct task_struct *tsk)
175{ 212{
213 if (task_thread_info(tsk)->status & TS_XSAVE) {
214 struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
215 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
216
217 xsave(tsk);
218
219 /*
220 * xsave header may indicate the init state of the FP.
221 */
222 if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
223 goto end;
224
225 if (unlikely(fx->swd & X87_FSW_ES))
226 asm volatile("fnclex");
227
228 /*
229 * we can do a simple return here or be paranoid :)
230 */
231 goto clear_state;
232 }
233
176 /* Use more nops than strictly needed in case the compiler 234 /* Use more nops than strictly needed in case the compiler
177 varies code */ 235 varies code */
178 alternative_input( 236 alternative_input(
@@ -182,6 +240,7 @@ static inline void __save_init_fpu(struct task_struct *tsk)
182 X86_FEATURE_FXSR, 240 X86_FEATURE_FXSR,
183 [fx] "m" (tsk->thread.xstate->fxsave), 241 [fx] "m" (tsk->thread.xstate->fxsave),
184 [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); 242 [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
243clear_state:
185 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 244 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
186 is pending. Clear the x87 state here by setting it to fixed 245 is pending. Clear the x87 state here by setting it to fixed
187 values. safe_address is a random variable that should be in L1 */ 246 values. safe_address is a random variable that should be in L1 */
@@ -191,16 +250,17 @@ static inline void __save_init_fpu(struct task_struct *tsk)
191 "fildl %[addr]", /* set F?P to defined value */ 250 "fildl %[addr]", /* set F?P to defined value */
192 X86_FEATURE_FXSAVE_LEAK, 251 X86_FEATURE_FXSAVE_LEAK,
193 [addr] "m" (safe_address)); 252 [addr] "m" (safe_address));
253end:
194 task_thread_info(tsk)->status &= ~TS_USEDFPU; 254 task_thread_info(tsk)->status &= ~TS_USEDFPU;
195} 255}
196 256
257#endif /* CONFIG_X86_64 */
258
197/* 259/*
198 * Signal frame handlers... 260 * Signal frame handlers...
199 */ 261 */
200extern int save_i387(struct _fpstate __user *buf); 262extern int save_i387_xstate(void __user *buf);
201extern int restore_i387(struct _fpstate __user *buf); 263extern int restore_i387_xstate(void __user *buf);
202
203#endif /* CONFIG_X86_64 */
204 264
205static inline void __unlazy_fpu(struct task_struct *tsk) 265static inline void __unlazy_fpu(struct task_struct *tsk)
206{ 266{
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index 3052f058ab06..0bb43301a202 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -176,6 +176,7 @@
176#define MSR_IA32_TSC 0x00000010 176#define MSR_IA32_TSC 0x00000010
177#define MSR_IA32_PLATFORM_ID 0x00000017 177#define MSR_IA32_PLATFORM_ID 0x00000017
178#define MSR_IA32_EBL_CR_POWERON 0x0000002a 178#define MSR_IA32_EBL_CR_POWERON 0x0000002a
179#define MSR_IA32_FEATURE_CONTROL 0x0000003a
179 180
180#define MSR_IA32_APICBASE 0x0000001b 181#define MSR_IA32_APICBASE 0x0000001b
181#define MSR_IA32_APICBASE_BSP (1<<8) 182#define MSR_IA32_APICBASE_BSP (1<<8)
@@ -310,4 +311,19 @@
310/* Geode defined MSRs */ 311/* Geode defined MSRs */
311#define MSR_GEODE_BUSCONT_CONF0 0x00001900 312#define MSR_GEODE_BUSCONT_CONF0 0x00001900
312 313
314/* Intel VT MSRs */
315#define MSR_IA32_VMX_BASIC 0x00000480
316#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
317#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
318#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
319#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
320#define MSR_IA32_VMX_MISC 0x00000485
321#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
322#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
323#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
324#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
325#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
326#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
327#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
328
313#endif /* ASM_X86__MSR_INDEX_H */ 329#endif /* ASM_X86__MSR_INDEX_H */
diff --git a/include/asm-x86/processor-cyrix.h b/include/asm-x86/processor-cyrix.h
index 97568ada1f97..1198f2a0e42c 100644
--- a/include/asm-x86/processor-cyrix.h
+++ b/include/asm-x86/processor-cyrix.h
@@ -28,3 +28,11 @@ static inline void setCx86(u8 reg, u8 data)
28 outb(reg, 0x22); 28 outb(reg, 0x22);
29 outb(data, 0x23); 29 outb(data, 0x23);
30} 30}
31
32#define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); })
33
34#define setCx86_old(reg, data) do { \
35 outb((reg), 0x22); \
36 outb((data), 0x23); \
37} while (0)
38
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h
index 5dd79774f693..dc5f0712f9fa 100644
--- a/include/asm-x86/processor-flags.h
+++ b/include/asm-x86/processor-flags.h
@@ -59,6 +59,7 @@
59#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ 59#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
60#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ 60#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
61#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ 61#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
62#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
62 63
63/* 64/*
64 * x86-64 Task Priority Register, CR8 65 * x86-64 Task Priority Register, CR8
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 5eaf9bf0a623..c7d35464a4bb 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -76,11 +76,11 @@ struct cpuinfo_x86 {
76 int x86_tlbsize; 76 int x86_tlbsize;
77 __u8 x86_virt_bits; 77 __u8 x86_virt_bits;
78 __u8 x86_phys_bits; 78 __u8 x86_phys_bits;
79#endif
79 /* CPUID returned core id bits: */ 80 /* CPUID returned core id bits: */
80 __u8 x86_coreid_bits; 81 __u8 x86_coreid_bits;
81 /* Max extended CPUID function supported: */ 82 /* Max extended CPUID function supported: */
82 __u32 extended_cpuid_level; 83 __u32 extended_cpuid_level;
83#endif
84 /* Maximum supported CPUID level, -1=no CPUID: */ 84 /* Maximum supported CPUID level, -1=no CPUID: */
85 int cpuid_level; 85 int cpuid_level;
86 __u32 x86_capability[NCAPINTS]; 86 __u32 x86_capability[NCAPINTS];
@@ -166,11 +166,8 @@ extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
166extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 166extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
167extern unsigned short num_cache_leaves; 167extern unsigned short num_cache_leaves;
168 168
169#if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) 169extern void detect_extended_topology(struct cpuinfo_x86 *c);
170extern void detect_ht(struct cpuinfo_x86 *c); 170extern void detect_ht(struct cpuinfo_x86 *c);
171#else
172static inline void detect_ht(struct cpuinfo_x86 *c) {}
173#endif
174 171
175static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 172static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
176 unsigned int *ecx, unsigned int *edx) 173 unsigned int *ecx, unsigned int *edx)
@@ -327,7 +324,12 @@ struct i387_fxsave_struct {
327 /* 16*16 bytes for each XMM-reg = 256 bytes: */ 324 /* 16*16 bytes for each XMM-reg = 256 bytes: */
328 u32 xmm_space[64]; 325 u32 xmm_space[64];
329 326
330 u32 padding[24]; 327 u32 padding[12];
328
329 union {
330 u32 padding1[12];
331 u32 sw_reserved[12];
332 };
331 333
332} __attribute__((aligned(16))); 334} __attribute__((aligned(16)));
333 335
@@ -351,10 +353,23 @@ struct i387_soft_struct {
351 u32 entry_eip; 353 u32 entry_eip;
352}; 354};
353 355
356struct xsave_hdr_struct {
357 u64 xstate_bv;
358 u64 reserved1[2];
359 u64 reserved2[5];
360} __attribute__((packed));
361
362struct xsave_struct {
363 struct i387_fxsave_struct i387;
364 struct xsave_hdr_struct xsave_hdr;
365 /* new processor state extensions will go here */
366} __attribute__ ((packed, aligned (64)));
367
354union thread_xstate { 368union thread_xstate {
355 struct i387_fsave_struct fsave; 369 struct i387_fsave_struct fsave;
356 struct i387_fxsave_struct fxsave; 370 struct i387_fxsave_struct fxsave;
357 struct i387_soft_struct soft; 371 struct i387_soft_struct soft;
372 struct xsave_struct xsave;
358}; 373};
359 374
360#ifdef CONFIG_X86_64 375#ifdef CONFIG_X86_64
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h
index 24879c85b291..ee813f4fe5d5 100644
--- a/include/asm-x86/sigcontext.h
+++ b/include/asm-x86/sigcontext.h
@@ -4,6 +4,40 @@
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/types.h> 5#include <asm/types.h>
6 6
7#define FP_XSTATE_MAGIC1 0x46505853U
8#define FP_XSTATE_MAGIC2 0x46505845U
9#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
10
11/*
12 * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
13 * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
14 * are used to extended the fpstate pointer in the sigcontext, which now
15 * includes the extended state information along with fpstate information.
16 *
17 * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
18 * area and FP_XSTATE_MAGIC2 at the end of memory layout
19 * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
20 * extended state information in the memory layout pointed by the fpstate
21 * pointer in sigcontext.
22 */
23struct _fpx_sw_bytes {
24 __u32 magic1; /* FP_XSTATE_MAGIC1 */
25 __u32 extended_size; /* total size of the layout referred by
26 * fpstate pointer in the sigcontext.
27 */
28 __u64 xstate_bv;
29 /* feature bit mask (including fp/sse/extended
30 * state) that is present in the memory
31 * layout.
32 */
33 __u32 xstate_size; /* actual xsave state size, based on the
34 * features saved in the layout.
35 * 'extended_size' will be greater than
36 * 'xstate_size'.
37 */
38 __u32 padding[7]; /* for future use. */
39};
40
7#ifdef __i386__ 41#ifdef __i386__
8/* 42/*
9 * As documented in the iBCS2 standard.. 43 * As documented in the iBCS2 standard..
@@ -53,7 +87,13 @@ struct _fpstate {
53 unsigned long reserved; 87 unsigned long reserved;
54 struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 88 struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
55 struct _xmmreg _xmm[8]; 89 struct _xmmreg _xmm[8];
56 unsigned long padding[56]; 90 unsigned long padding1[44];
91
92 union {
93 unsigned long padding2[12];
94 struct _fpx_sw_bytes sw_reserved; /* represents the extended
95 * state info */
96 };
57}; 97};
58 98
59#define X86_FXSR_MAGIC 0x0000 99#define X86_FXSR_MAGIC 0x0000
@@ -79,7 +119,15 @@ struct sigcontext {
79 unsigned long flags; 119 unsigned long flags;
80 unsigned long sp_at_signal; 120 unsigned long sp_at_signal;
81 unsigned short ss, __ssh; 121 unsigned short ss, __ssh;
82 struct _fpstate __user *fpstate; 122
123 /*
124 * fpstate is really (struct _fpstate *) or (struct _xstate *)
125 * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
126 * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
127 * of extended memory layout. See comments at the defintion of
128 * (struct _fpx_sw_bytes)
129 */
130 void __user *fpstate; /* zero when no FPU/extended context */
83 unsigned long oldmask; 131 unsigned long oldmask;
84 unsigned long cr2; 132 unsigned long cr2;
85}; 133};
@@ -130,7 +178,12 @@ struct _fpstate {
130 __u32 mxcsr_mask; 178 __u32 mxcsr_mask;
131 __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ 179 __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
132 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ 180 __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
133 __u32 reserved2[24]; 181 __u32 reserved2[12];
182 union {
183 __u32 reserved3[12];
184 struct _fpx_sw_bytes sw_reserved; /* represents the extended
185 * state information */
186 };
134}; 187};
135 188
136#ifdef __KERNEL__ 189#ifdef __KERNEL__
@@ -161,7 +214,15 @@ struct sigcontext {
161 unsigned long trapno; 214 unsigned long trapno;
162 unsigned long oldmask; 215 unsigned long oldmask;
163 unsigned long cr2; 216 unsigned long cr2;
164 struct _fpstate __user *fpstate; /* zero when no FPU context */ 217
218 /*
219 * fpstate is really (struct _fpstate *) or (struct _xstate *)
220 * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
221 * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
222 * of extended memory layout. See comments at the defintion of
223 * (struct _fpx_sw_bytes)
224 */
225 void __user *fpstate; /* zero when no FPU/extended context */
165 unsigned long reserved1[8]; 226 unsigned long reserved1[8];
166}; 227};
167#else /* __KERNEL__ */ 228#else /* __KERNEL__ */
@@ -202,4 +263,22 @@ struct sigcontext {
202 263
203#endif /* !__i386__ */ 264#endif /* !__i386__ */
204 265
266struct _xsave_hdr {
267 __u64 xstate_bv;
268 __u64 reserved1[2];
269 __u64 reserved2[5];
270};
271
272/*
273 * Extended state pointed by the fpstate pointer in the sigcontext.
274 * In addition to the fpstate, information encoded in the xstate_hdr
275 * indicates the presence of other extended state information
276 * supported by the processor and OS.
277 */
278struct _xstate {
279 struct _fpstate fpstate;
280 struct _xsave_hdr xstate_hdr;
281 /* new processor state extensions go here */
282};
283
205#endif /* ASM_X86__SIGCONTEXT_H */ 284#endif /* ASM_X86__SIGCONTEXT_H */
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h
index 4e2ec732dd01..8c347032c2f2 100644
--- a/include/asm-x86/sigcontext32.h
+++ b/include/asm-x86/sigcontext32.h
@@ -40,7 +40,11 @@ struct _fpstate_ia32 {
40 __u32 reserved; 40 __u32 reserved;
41 struct _fpxreg _fxsr_st[8]; 41 struct _fpxreg _fxsr_st[8];
42 struct _xmmreg _xmm[8]; /* It's actually 16 */ 42 struct _xmmreg _xmm[8]; /* It's actually 16 */
43 __u32 padding[56]; 43 __u32 padding[44];
44 union {
45 __u32 padding2[12];
46 struct _fpx_sw_bytes sw_reserved;
47 };
44}; 48};
45 49
46struct sigcontext_ia32 { 50struct sigcontext_ia32 {
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index 4db0066a3a35..3f4e52bb77f5 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -241,6 +241,7 @@ static inline struct thread_info *stack_thread_info(void)
241#define TS_POLLING 0x0004 /* true if in idle loop 241#define TS_POLLING 0x0004 /* true if in idle loop
242 and not sleeping */ 242 and not sleeping */
243#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ 243#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
244#define TS_XSAVE 0x0010 /* Use xsave/xrstor */
244 245
245#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) 246#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
246 247
diff --git a/include/asm-x86/ucontext.h b/include/asm-x86/ucontext.h
index 9948dd328084..89eaa5456a7e 100644
--- a/include/asm-x86/ucontext.h
+++ b/include/asm-x86/ucontext.h
@@ -1,6 +1,12 @@
1#ifndef ASM_X86__UCONTEXT_H 1#ifndef ASM_X86__UCONTEXT_H
2#define ASM_X86__UCONTEXT_H 2#define ASM_X86__UCONTEXT_H
3 3
4#define UC_FP_XSTATE 0x1 /* indicates the presence of extended state
5 * information in the memory layout pointed
6 * by the fpstate pointer in the ucontext's
7 * sigcontext struct (uc_mcontext).
8 */
9
4struct ucontext { 10struct ucontext {
5 unsigned long uc_flags; 11 unsigned long uc_flags;
6 struct ucontext *uc_link; 12 struct ucontext *uc_link;
diff --git a/include/asm-x86/xcr.h b/include/asm-x86/xcr.h
new file mode 100644
index 000000000000..f2cba4e79a23
--- /dev/null
+++ b/include/asm-x86/xcr.h
@@ -0,0 +1,49 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2008 rPath, Inc. - All Rights Reserved
4 *
5 * This file is part of the Linux kernel, and is made available under
6 * the terms of the GNU General Public License version 2 or (at your
7 * option) any later version; incorporated herein by reference.
8 *
9 * ----------------------------------------------------------------------- */
10
11/*
12 * asm-x86/xcr.h
13 *
14 * Definitions for the eXtended Control Register instructions
15 */
16
17#ifndef _ASM_X86_XCR_H
18#define _ASM_X86_XCR_H
19
20#define XCR_XFEATURE_ENABLED_MASK 0x00000000
21
22#ifdef __KERNEL__
23# ifndef __ASSEMBLY__
24
25#include <linux/types.h>
26
27static inline u64 xgetbv(u32 index)
28{
29 u32 eax, edx;
30
31 asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
32 : "=a" (eax), "=d" (edx)
33 : "c" (index));
34 return eax + ((u64)edx << 32);
35}
36
37static inline void xsetbv(u32 index, u64 value)
38{
39 u32 eax = value;
40 u32 edx = value >> 32;
41
42 asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
43 : : "a" (eax), "d" (edx), "c" (index));
44}
45
46# endif /* __ASSEMBLY__ */
47#endif /* __KERNEL__ */
48
49#endif /* _ASM_X86_XCR_H */
diff --git a/include/asm-x86/xsave.h b/include/asm-x86/xsave.h
new file mode 100644
index 000000000000..08e9a1ac07a9
--- /dev/null
+++ b/include/asm-x86/xsave.h
@@ -0,0 +1,118 @@
1#ifndef __ASM_X86_XSAVE_H
2#define __ASM_X86_XSAVE_H
3
4#include <linux/types.h>
5#include <asm/processor.h>
6#include <asm/i387.h>
7
8#define XSTATE_FP 0x1
9#define XSTATE_SSE 0x2
10
11#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
12
13#define FXSAVE_SIZE 512
14
15/*
16 * These are the features that the OS can handle currently.
17 */
18#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE)
19
20#ifdef CONFIG_X86_64
21#define REX_PREFIX "0x48, "
22#else
23#define REX_PREFIX
24#endif
25
26extern unsigned int xstate_size;
27extern u64 pcntxt_mask;
28extern struct xsave_struct *init_xstate_buf;
29
30extern void xsave_cntxt_init(void);
31extern void xsave_init(void);
32extern int init_fpu(struct task_struct *child);
33extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
34 void __user *fpstate,
35 struct _fpx_sw_bytes *sw);
36
37static inline int xrstor_checking(struct xsave_struct *fx)
38{
39 int err;
40
41 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
42 "2:\n"
43 ".section .fixup,\"ax\"\n"
44 "3: movl $-1,%[err]\n"
45 " jmp 2b\n"
46 ".previous\n"
47 _ASM_EXTABLE(1b, 3b)
48 : [err] "=r" (err)
49 : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0)
50 : "memory");
51
52 return err;
53}
54
55static inline int xsave_user(struct xsave_struct __user *buf)
56{
57 int err;
58 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
59 "2:\n"
60 ".section .fixup,\"ax\"\n"
61 "3: movl $-1,%[err]\n"
62 " jmp 2b\n"
63 ".previous\n"
64 ".section __ex_table,\"a\"\n"
65 _ASM_ALIGN "\n"
66 _ASM_PTR "1b,3b\n"
67 ".previous"
68 : [err] "=r" (err)
69 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
70 : "memory");
71 if (unlikely(err) && __clear_user(buf, xstate_size))
72 err = -EFAULT;
73 /* No need to clear here because the caller clears USED_MATH */
74 return err;
75}
76
77static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
78{
79 int err;
80 struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
81 u32 lmask = mask;
82 u32 hmask = mask >> 32;
83
84 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
85 "2:\n"
86 ".section .fixup,\"ax\"\n"
87 "3: movl $-1,%[err]\n"
88 " jmp 2b\n"
89 ".previous\n"
90 ".section __ex_table,\"a\"\n"
91 _ASM_ALIGN "\n"
92 _ASM_PTR "1b,3b\n"
93 ".previous"
94 : [err] "=r" (err)
95 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
96 : "memory"); /* memory required? */
97 return err;
98}
99
100static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
101{
102 u32 lmask = mask;
103 u32 hmask = mask >> 32;
104
105 asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
106 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
107 : "memory");
108}
109
110static inline void xsave(struct task_struct *tsk)
111{
112 /* This, however, we can work around by forcing the compiler to select
113 an addressing mode that doesn't require extended registers. */
114 __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
115 : : "D" (&(tsk->thread.xstate->xsave)),
116 "a" (-1), "d"(-1) : "memory");
117}
118#endif