aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-10-04 16:57:00 -0400
committerArnd Bergmann <arnd@arndb.de>2012-10-04 16:57:51 -0400
commitc37d6154c0b9163c27e53cc1d0be3867b4abd760 (patch)
tree7a24522c56d1cb284dff1d3c225bbdaba0901bb5 /arch/x86/include
parente7a570ff7dff9af6e54ff5e580a61ec7652137a0 (diff)
parent8a1ab3155c2ac7fbe5f2038d6e26efeb607a1498 (diff)
Merge branch 'disintegrate-asm-generic' of git://git.infradead.org/users/dhowells/linux-headers into asm-generic
Patches from David Howells <dhowells@redhat.com>: This is to complete part of the UAPI disintegration for which the preparatory patches were pulled recently. Note that there are some fixup patches which are at the base of the branch aimed at you, plus all arches get the asm-generic branch merged in too. * 'disintegrate-asm-generic' of git://git.infradead.org/users/dhowells/linux-headers: UAPI: (Scripted) Disintegrate include/asm-generic UAPI: Fix conditional header installation handling (notably kvm_para.h on m68k) c6x: remove c6x signal.h UAPI: Split compound conditionals containing __KERNEL__ in Arm64 UAPI: Fix the guards on various asm/unistd.h files Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/alternative-asm.h9
-rw-r--r--arch/x86/include/asm/alternative.h36
-rw-r--r--arch/x86/include/asm/atomic.h4
-rw-r--r--arch/x86/include/asm/bitops.h14
-rw-r--r--arch/x86/include/asm/calling.h50
-rw-r--r--arch/x86/include/asm/checksum.h4
-rw-r--r--arch/x86/include/asm/cmpxchg.h4
-rw-r--r--arch/x86/include/asm/cpufeature.h6
-rw-r--r--arch/x86/include/asm/fpu-internal.h422
-rw-r--r--arch/x86/include/asm/ftrace.h56
-rw-r--r--arch/x86/include/asm/futex.h19
-rw-r--r--arch/x86/include/asm/hardirq.h4
-rw-r--r--arch/x86/include/asm/hpet.h2
-rw-r--r--arch/x86/include/asm/i387.h29
-rw-r--r--arch/x86/include/asm/iommu_table.h6
-rw-r--r--arch/x86/include/asm/kprobes.h1
-rw-r--r--arch/x86/include/asm/kvm.h16
-rw-r--r--arch/x86/include/asm/kvm_host.h16
-rw-r--r--arch/x86/include/asm/mce.h13
-rw-r--r--arch/x86/include/asm/microcode.h10
-rw-r--r--arch/x86/include/asm/mmzone.h4
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/include/asm/mutex.h4
-rw-r--r--arch/x86/include/asm/numa.h4
-rw-r--r--arch/x86/include/asm/pci.h2
-rw-r--r--arch/x86/include/asm/perf_event.h2
-rw-r--r--arch/x86/include/asm/perf_regs.h33
-rw-r--r--arch/x86/include/asm/pgtable.h4
-rw-r--r--arch/x86/include/asm/pgtable_types.h10
-rw-r--r--arch/x86/include/asm/posix_types.h10
-rw-r--r--arch/x86/include/asm/processor-flags.h1
-rw-r--r--arch/x86/include/asm/processor.h3
-rw-r--r--arch/x86/include/asm/rcu.h32
-rw-r--r--arch/x86/include/asm/seccomp.h4
-rw-r--r--arch/x86/include/asm/signal.h4
-rw-r--r--arch/x86/include/asm/smap.h91
-rw-r--r--arch/x86/include/asm/string.h4
-rw-r--r--arch/x86/include/asm/suspend.h4
-rw-r--r--arch/x86/include/asm/svm.h205
-rw-r--r--arch/x86/include/asm/sys_ia32.h2
-rw-r--r--arch/x86/include/asm/thread_info.h10
-rw-r--r--arch/x86/include/asm/uaccess.h32
-rw-r--r--arch/x86/include/asm/uaccess_32.h3
-rw-r--r--arch/x86/include/asm/uaccess_64.h3
-rw-r--r--arch/x86/include/asm/uprobes.h3
-rw-r--r--arch/x86/include/asm/user.h4
-rw-r--r--arch/x86/include/asm/vdso.h3
-rw-r--r--arch/x86/include/asm/vmx.h127
-rw-r--r--arch/x86/include/asm/x86_init.h9
-rw-r--r--arch/x86/include/asm/xen/interface.h11
-rw-r--r--arch/x86/include/asm/xen/swiotlb-xen.h2
-rw-r--r--arch/x86/include/asm/xor.h4
-rw-r--r--arch/x86/include/asm/xor_32.h58
-rw-r--r--arch/x86/include/asm/xor_64.h63
-rw-r--r--arch/x86/include/asm/xor_avx.h54
-rw-r--r--arch/x86/include/asm/xsave.h23
-rw-r--r--arch/x86/include/uapi/asm/Kbuild6
57 files changed, 934 insertions, 628 deletions
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 952bd0100c5c..372231c22a47 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -1,3 +1,6 @@
1#ifndef _ASM_X86_ALTERNATIVE_ASM_H
2#define _ASM_X86_ALTERNATIVE_ASM_H
3
1#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
2 5
3#include <asm/asm.h> 6#include <asm/asm.h>
@@ -5,10 +8,10 @@
5#ifdef CONFIG_SMP 8#ifdef CONFIG_SMP
6 .macro LOCK_PREFIX 9 .macro LOCK_PREFIX
7672: lock 10672: lock
8 .section .smp_locks,"a" 11 .pushsection .smp_locks,"a"
9 .balign 4 12 .balign 4
10 .long 672b - . 13 .long 672b - .
11 .previous 14 .popsection
12 .endm 15 .endm
13#else 16#else
14 .macro LOCK_PREFIX 17 .macro LOCK_PREFIX
@@ -24,3 +27,5 @@
24.endm 27.endm
25 28
26#endif /* __ASSEMBLY__ */ 29#endif /* __ASSEMBLY__ */
30
31#endif /* _ASM_X86_ALTERNATIVE_ASM_H */
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 70780689599a..58ed6d96a6ac 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -29,10 +29,10 @@
29 29
30#ifdef CONFIG_SMP 30#ifdef CONFIG_SMP
31#define LOCK_PREFIX_HERE \ 31#define LOCK_PREFIX_HERE \
32 ".section .smp_locks,\"a\"\n" \ 32 ".pushsection .smp_locks,\"a\"\n" \
33 ".balign 4\n" \ 33 ".balign 4\n" \
34 ".long 671f - .\n" /* offset */ \ 34 ".long 671f - .\n" /* offset */ \
35 ".previous\n" \ 35 ".popsection\n" \
36 "671:" 36 "671:"
37 37
38#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " 38#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
@@ -60,7 +60,7 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
60 void *locks, void *locks_end, 60 void *locks, void *locks_end,
61 void *text, void *text_end); 61 void *text, void *text_end);
62extern void alternatives_smp_module_del(struct module *mod); 62extern void alternatives_smp_module_del(struct module *mod);
63extern void alternatives_smp_switch(int smp); 63extern void alternatives_enable_smp(void);
64extern int alternatives_text_reserved(void *start, void *end); 64extern int alternatives_text_reserved(void *start, void *end);
65extern bool skip_smp_alternatives; 65extern bool skip_smp_alternatives;
66#else 66#else
@@ -68,7 +68,7 @@ static inline void alternatives_smp_module_add(struct module *mod, char *name,
68 void *locks, void *locks_end, 68 void *locks, void *locks_end,
69 void *text, void *text_end) {} 69 void *text, void *text_end) {}
70static inline void alternatives_smp_module_del(struct module *mod) {} 70static inline void alternatives_smp_module_del(struct module *mod) {}
71static inline void alternatives_smp_switch(int smp) {} 71static inline void alternatives_enable_smp(void) {}
72static inline int alternatives_text_reserved(void *start, void *end) 72static inline int alternatives_text_reserved(void *start, void *end)
73{ 73{
74 return 0; 74 return 0;
@@ -99,30 +99,30 @@ static inline int alternatives_text_reserved(void *start, void *end)
99/* alternative assembly primitive: */ 99/* alternative assembly primitive: */
100#define ALTERNATIVE(oldinstr, newinstr, feature) \ 100#define ALTERNATIVE(oldinstr, newinstr, feature) \
101 OLDINSTR(oldinstr) \ 101 OLDINSTR(oldinstr) \
102 ".section .altinstructions,\"a\"\n" \ 102 ".pushsection .altinstructions,\"a\"\n" \
103 ALTINSTR_ENTRY(feature, 1) \ 103 ALTINSTR_ENTRY(feature, 1) \
104 ".previous\n" \ 104 ".popsection\n" \
105 ".section .discard,\"aw\",@progbits\n" \ 105 ".pushsection .discard,\"aw\",@progbits\n" \
106 DISCARD_ENTRY(1) \ 106 DISCARD_ENTRY(1) \
107 ".previous\n" \ 107 ".popsection\n" \
108 ".section .altinstr_replacement, \"ax\"\n" \ 108 ".pushsection .altinstr_replacement, \"ax\"\n" \
109 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ 109 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
110 ".previous" 110 ".popsection"
111 111
112#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ 112#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
113 OLDINSTR(oldinstr) \ 113 OLDINSTR(oldinstr) \
114 ".section .altinstructions,\"a\"\n" \ 114 ".pushsection .altinstructions,\"a\"\n" \
115 ALTINSTR_ENTRY(feature1, 1) \ 115 ALTINSTR_ENTRY(feature1, 1) \
116 ALTINSTR_ENTRY(feature2, 2) \ 116 ALTINSTR_ENTRY(feature2, 2) \
117 ".previous\n" \ 117 ".popsection\n" \
118 ".section .discard,\"aw\",@progbits\n" \ 118 ".pushsection .discard,\"aw\",@progbits\n" \
119 DISCARD_ENTRY(1) \ 119 DISCARD_ENTRY(1) \
120 DISCARD_ENTRY(2) \ 120 DISCARD_ENTRY(2) \
121 ".previous\n" \ 121 ".popsection\n" \
122 ".section .altinstr_replacement, \"ax\"\n" \ 122 ".pushsection .altinstr_replacement, \"ax\"\n" \
123 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ 123 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
124 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ 124 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
125 ".previous" 125 ".popsection"
126 126
127/* 127/*
128 * This must be included *after* the definition of ALTERNATIVE due to 128 * This must be included *after* the definition of ALTERNATIVE due to
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 58cb6d4085f7..250b8774c158 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -309,9 +309,9 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
309#define smp_mb__after_atomic_inc() barrier() 309#define smp_mb__after_atomic_inc() barrier()
310 310
311#ifdef CONFIG_X86_32 311#ifdef CONFIG_X86_32
312# include "atomic64_32.h" 312# include <asm/atomic64_32.h>
313#else 313#else
314# include "atomic64_64.h" 314# include <asm/atomic64_64.h>
315#endif 315#endif
316 316
317#endif /* _ASM_X86_ATOMIC_H */ 317#endif /* _ASM_X86_ATOMIC_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 72f5009deb5a..6dfd0195bb55 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -355,7 +355,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
355 */ 355 */
356static inline unsigned long __ffs(unsigned long word) 356static inline unsigned long __ffs(unsigned long word)
357{ 357{
358 asm("bsf %1,%0" 358 asm("rep; bsf %1,%0"
359 : "=r" (word) 359 : "=r" (word)
360 : "rm" (word)); 360 : "rm" (word));
361 return word; 361 return word;
@@ -369,7 +369,7 @@ static inline unsigned long __ffs(unsigned long word)
369 */ 369 */
370static inline unsigned long ffz(unsigned long word) 370static inline unsigned long ffz(unsigned long word)
371{ 371{
372 asm("bsf %1,%0" 372 asm("rep; bsf %1,%0"
373 : "=r" (word) 373 : "=r" (word)
374 : "r" (~word)); 374 : "r" (~word));
375 return word; 375 return word;
@@ -417,10 +417,9 @@ static inline int ffs(int x)
417 * We cannot do this on 32 bits because at the very least some 417 * We cannot do this on 32 bits because at the very least some
418 * 486 CPUs did not behave this way. 418 * 486 CPUs did not behave this way.
419 */ 419 */
420 long tmp = -1;
421 asm("bsfl %1,%0" 420 asm("bsfl %1,%0"
422 : "=r" (r) 421 : "=r" (r)
423 : "rm" (x), "0" (tmp)); 422 : "rm" (x), "0" (-1));
424#elif defined(CONFIG_X86_CMOV) 423#elif defined(CONFIG_X86_CMOV)
425 asm("bsfl %1,%0\n\t" 424 asm("bsfl %1,%0\n\t"
426 "cmovzl %2,%0" 425 "cmovzl %2,%0"
@@ -459,10 +458,9 @@ static inline int fls(int x)
459 * We cannot do this on 32 bits because at the very least some 458 * We cannot do this on 32 bits because at the very least some
460 * 486 CPUs did not behave this way. 459 * 486 CPUs did not behave this way.
461 */ 460 */
462 long tmp = -1;
463 asm("bsrl %1,%0" 461 asm("bsrl %1,%0"
464 : "=r" (r) 462 : "=r" (r)
465 : "rm" (x), "0" (tmp)); 463 : "rm" (x), "0" (-1));
466#elif defined(CONFIG_X86_CMOV) 464#elif defined(CONFIG_X86_CMOV)
467 asm("bsrl %1,%0\n\t" 465 asm("bsrl %1,%0\n\t"
468 "cmovzl %2,%0" 466 "cmovzl %2,%0"
@@ -490,13 +488,13 @@ static inline int fls(int x)
490#ifdef CONFIG_X86_64 488#ifdef CONFIG_X86_64
491static __always_inline int fls64(__u64 x) 489static __always_inline int fls64(__u64 x)
492{ 490{
493 long bitpos = -1; 491 int bitpos = -1;
494 /* 492 /*
495 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the 493 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
496 * dest reg is undefined if x==0, but their CPU architect says its 494 * dest reg is undefined if x==0, but their CPU architect says its
497 * value is written to set it to the same as before. 495 * value is written to set it to the same as before.
498 */ 496 */
499 asm("bsrq %1,%0" 497 asm("bsrq %1,%q0"
500 : "+r" (bitpos) 498 : "+r" (bitpos)
501 : "rm" (x)); 499 : "rm" (x));
502 return bitpos + 1; 500 return bitpos + 1;
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index a9e3a740f697..0fa675033912 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -46,41 +46,39 @@ For 32-bit we have the following conventions - kernel is built with
46 46
47*/ 47*/
48 48
49#include "dwarf2.h" 49#include <asm/dwarf2.h>
50 50
51/* 51/*
52 * 64-bit system call stack frame layout defines and helpers, for 52 * 64-bit system call stack frame layout defines and helpers,
53 * assembly code (note that the seemingly unnecessary parentheses 53 * for assembly code:
54 * are to prevent cpp from inserting spaces in expressions that get
55 * passed to macros):
56 */ 54 */
57 55
58#define R15 (0) 56#define R15 0
59#define R14 (8) 57#define R14 8
60#define R13 (16) 58#define R13 16
61#define R12 (24) 59#define R12 24
62#define RBP (32) 60#define RBP 32
63#define RBX (40) 61#define RBX 40
64 62
65/* arguments: interrupts/non tracing syscalls only save up to here: */ 63/* arguments: interrupts/non tracing syscalls only save up to here: */
66#define R11 (48) 64#define R11 48
67#define R10 (56) 65#define R10 56
68#define R9 (64) 66#define R9 64
69#define R8 (72) 67#define R8 72
70#define RAX (80) 68#define RAX 80
71#define RCX (88) 69#define RCX 88
72#define RDX (96) 70#define RDX 96
73#define RSI (104) 71#define RSI 104
74#define RDI (112) 72#define RDI 112
75#define ORIG_RAX (120) /* + error_code */ 73#define ORIG_RAX 120 /* + error_code */
76/* end of arguments */ 74/* end of arguments */
77 75
78/* cpu exception frame or undefined in case of fast syscall: */ 76/* cpu exception frame or undefined in case of fast syscall: */
79#define RIP (128) 77#define RIP 128
80#define CS (136) 78#define CS 136
81#define EFLAGS (144) 79#define EFLAGS 144
82#define RSP (152) 80#define RSP 152
83#define SS (160) 81#define SS 160
84 82
85#define ARGOFFSET R11 83#define ARGOFFSET R11
86#define SWFRAME ORIG_RAX 84#define SWFRAME ORIG_RAX
diff --git a/arch/x86/include/asm/checksum.h b/arch/x86/include/asm/checksum.h
index 848850fd7d62..5f5bb0f97361 100644
--- a/arch/x86/include/asm/checksum.h
+++ b/arch/x86/include/asm/checksum.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
2# include "checksum_32.h" 2# include <asm/checksum_32.h>
3#else 3#else
4# include "checksum_64.h" 4# include <asm/checksum_64.h>
5#endif 5#endif
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 99480e55973d..8d871eaddb66 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -138,9 +138,9 @@ extern void __add_wrong_size(void)
138 __raw_cmpxchg((ptr), (old), (new), (size), "") 138 __raw_cmpxchg((ptr), (old), (new), (size), "")
139 139
140#ifdef CONFIG_X86_32 140#ifdef CONFIG_X86_32
141# include "cmpxchg_32.h" 141# include <asm/cmpxchg_32.h>
142#else 142#else
143# include "cmpxchg_64.h" 143# include <asm/cmpxchg_64.h>
144#endif 144#endif
145 145
146#ifdef __HAVE_ARCH_CMPXCHG 146#ifdef __HAVE_ARCH_CMPXCHG
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 6b7ee5ff6820..8c297aa53eef 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -4,7 +4,9 @@
4#ifndef _ASM_X86_CPUFEATURE_H 4#ifndef _ASM_X86_CPUFEATURE_H
5#define _ASM_X86_CPUFEATURE_H 5#define _ASM_X86_CPUFEATURE_H
6 6
7#ifndef _ASM_X86_REQUIRED_FEATURES_H
7#include <asm/required-features.h> 8#include <asm/required-features.h>
9#endif
8 10
9#define NCAPINTS 10 /* N 32-bit words worth of info */ 11#define NCAPINTS 10 /* N 32-bit words worth of info */
10 12
@@ -97,6 +99,7 @@
97#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ 99#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
98#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ 100#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
99#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ 101#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
102#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */
100 103
101/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 104/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
102#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ 105#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
@@ -209,6 +212,7 @@
209#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ 212#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
210#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ 213#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
211#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ 214#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
215#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
212 216
213#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 217#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
214 218
@@ -299,12 +303,14 @@ extern const char * const x86_power_flags[32];
299#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) 303#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
300#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 304#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
301#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 305#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
306#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
302#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) 307#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
303#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 308#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
304#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 309#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
305#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 310#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
306#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 311#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
307#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 312#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
313#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
308 314
309#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) 315#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
310# define cpu_has_invlpg 1 316# define cpu_has_invlpg 1
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 75f4c6d6a331..831dbb9c6c02 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -12,6 +12,7 @@
12 12
13#include <linux/kernel_stat.h> 13#include <linux/kernel_stat.h>
14#include <linux/regset.h> 14#include <linux/regset.h>
15#include <linux/compat.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <asm/asm.h> 17#include <asm/asm.h>
17#include <asm/cpufeature.h> 18#include <asm/cpufeature.h>
@@ -20,43 +21,76 @@
20#include <asm/user.h> 21#include <asm/user.h>
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
22#include <asm/xsave.h> 23#include <asm/xsave.h>
24#include <asm/smap.h>
23 25
24extern unsigned int sig_xstate_size; 26#ifdef CONFIG_X86_64
27# include <asm/sigcontext32.h>
28# include <asm/user32.h>
29int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
30 compat_sigset_t *set, struct pt_regs *regs);
31int ia32_setup_frame(int sig, struct k_sigaction *ka,
32 compat_sigset_t *set, struct pt_regs *regs);
33#else
34# define user_i387_ia32_struct user_i387_struct
35# define user32_fxsr_struct user_fxsr_struct
36# define ia32_setup_frame __setup_frame
37# define ia32_setup_rt_frame __setup_rt_frame
38#endif
39
40extern unsigned int mxcsr_feature_mask;
25extern void fpu_init(void); 41extern void fpu_init(void);
42extern void eager_fpu_init(void);
26 43
27DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); 44DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
28 45
46extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
47 struct task_struct *tsk);
48extern void convert_to_fxsr(struct task_struct *tsk,
49 const struct user_i387_ia32_struct *env);
50
29extern user_regset_active_fn fpregs_active, xfpregs_active; 51extern user_regset_active_fn fpregs_active, xfpregs_active;
30extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, 52extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
31 xstateregs_get; 53 xstateregs_get;
32extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, 54extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
33 xstateregs_set; 55 xstateregs_set;
34 56
35
36/* 57/*
37 * xstateregs_active == fpregs_active. Please refer to the comment 58 * xstateregs_active == fpregs_active. Please refer to the comment
38 * at the definition of fpregs_active. 59 * at the definition of fpregs_active.
39 */ 60 */
40#define xstateregs_active fpregs_active 61#define xstateregs_active fpregs_active
41 62
42extern struct _fpx_sw_bytes fx_sw_reserved;
43#ifdef CONFIG_IA32_EMULATION
44extern unsigned int sig_xstate_ia32_size;
45extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
46struct _fpstate_ia32;
47struct _xstate_ia32;
48extern int save_i387_xstate_ia32(void __user *buf);
49extern int restore_i387_xstate_ia32(void __user *buf);
50#endif
51
52#ifdef CONFIG_MATH_EMULATION 63#ifdef CONFIG_MATH_EMULATION
64# define HAVE_HWFP (boot_cpu_data.hard_math)
53extern void finit_soft_fpu(struct i387_soft_struct *soft); 65extern void finit_soft_fpu(struct i387_soft_struct *soft);
54#else 66#else
67# define HAVE_HWFP 1
55static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} 68static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
56#endif 69#endif
57 70
71static inline int is_ia32_compat_frame(void)
72{
73 return config_enabled(CONFIG_IA32_EMULATION) &&
74 test_thread_flag(TIF_IA32);
75}
76
77static inline int is_ia32_frame(void)
78{
79 return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
80}
81
82static inline int is_x32_frame(void)
83{
84 return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
85}
86
58#define X87_FSW_ES (1 << 7) /* Exception Summary */ 87#define X87_FSW_ES (1 << 7) /* Exception Summary */
59 88
89static __always_inline __pure bool use_eager_fpu(void)
90{
91 return static_cpu_has(X86_FEATURE_EAGER_FPU);
92}
93
60static __always_inline __pure bool use_xsaveopt(void) 94static __always_inline __pure bool use_xsaveopt(void)
61{ 95{
62 return static_cpu_has(X86_FEATURE_XSAVEOPT); 96 return static_cpu_has(X86_FEATURE_XSAVEOPT);
@@ -72,6 +106,13 @@ static __always_inline __pure bool use_fxsr(void)
72 return static_cpu_has(X86_FEATURE_FXSR); 106 return static_cpu_has(X86_FEATURE_FXSR);
73} 107}
74 108
109static inline void fx_finit(struct i387_fxsave_struct *fx)
110{
111 memset(fx, 0, xstate_size);
112 fx->cwd = 0x37f;
113 fx->mxcsr = MXCSR_DEFAULT;
114}
115
75extern void __sanitize_i387_state(struct task_struct *); 116extern void __sanitize_i387_state(struct task_struct *);
76 117
77static inline void sanitize_i387_state(struct task_struct *tsk) 118static inline void sanitize_i387_state(struct task_struct *tsk)
@@ -81,131 +122,121 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
81 __sanitize_i387_state(tsk); 122 __sanitize_i387_state(tsk);
82} 123}
83 124
84#ifdef CONFIG_X86_64 125#define user_insn(insn, output, input...) \
85static inline int fxrstor_checking(struct i387_fxsave_struct *fx) 126({ \
86{ 127 int err; \
87 int err; 128 asm volatile(ASM_STAC "\n" \
88 129 "1:" #insn "\n\t" \
89 /* See comment in fxsave() below. */ 130 "2: " ASM_CLAC "\n" \
90#ifdef CONFIG_AS_FXSAVEQ 131 ".section .fixup,\"ax\"\n" \
91 asm volatile("1: fxrstorq %[fx]\n\t" 132 "3: movl $-1,%[err]\n" \
92 "2:\n" 133 " jmp 2b\n" \
93 ".section .fixup,\"ax\"\n" 134 ".previous\n" \
94 "3: movl $-1,%[err]\n" 135 _ASM_EXTABLE(1b, 3b) \
95 " jmp 2b\n" 136 : [err] "=r" (err), output \
96 ".previous\n" 137 : "0"(0), input); \
97 _ASM_EXTABLE(1b, 3b) 138 err; \
98 : [err] "=r" (err) 139})
99 : [fx] "m" (*fx), "0" (0)); 140
100#else 141#define check_insn(insn, output, input...) \
101 asm volatile("1: rex64/fxrstor (%[fx])\n\t" 142({ \
102 "2:\n" 143 int err; \
103 ".section .fixup,\"ax\"\n" 144 asm volatile("1:" #insn "\n\t" \
104 "3: movl $-1,%[err]\n" 145 "2:\n" \
105 " jmp 2b\n" 146 ".section .fixup,\"ax\"\n" \
106 ".previous\n" 147 "3: movl $-1,%[err]\n" \
107 _ASM_EXTABLE(1b, 3b) 148 " jmp 2b\n" \
108 : [err] "=r" (err) 149 ".previous\n" \
109 : [fx] "R" (fx), "m" (*fx), "0" (0)); 150 _ASM_EXTABLE(1b, 3b) \
110#endif 151 : [err] "=r" (err), output \
111 return err; 152 : "0"(0), input); \
153 err; \
154})
155
156static inline int fsave_user(struct i387_fsave_struct __user *fx)
157{
158 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
112} 159}
113 160
114static inline int fxsave_user(struct i387_fxsave_struct __user *fx) 161static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
115{ 162{
116 int err; 163 if (config_enabled(CONFIG_X86_32))
164 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
165 else if (config_enabled(CONFIG_AS_FXSAVEQ))
166 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
117 167
118 /* 168 /* See comment in fpu_fxsave() below. */
119 * Clear the bytes not touched by the fxsave and reserved 169 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
120 * for the SW usage.
121 */
122 err = __clear_user(&fx->sw_reserved,
123 sizeof(struct _fpx_sw_bytes));
124 if (unlikely(err))
125 return -EFAULT;
126
127 /* See comment in fxsave() below. */
128#ifdef CONFIG_AS_FXSAVEQ
129 asm volatile("1: fxsaveq %[fx]\n\t"
130 "2:\n"
131 ".section .fixup,\"ax\"\n"
132 "3: movl $-1,%[err]\n"
133 " jmp 2b\n"
134 ".previous\n"
135 _ASM_EXTABLE(1b, 3b)
136 : [err] "=r" (err), [fx] "=m" (*fx)
137 : "0" (0));
138#else
139 asm volatile("1: rex64/fxsave (%[fx])\n\t"
140 "2:\n"
141 ".section .fixup,\"ax\"\n"
142 "3: movl $-1,%[err]\n"
143 " jmp 2b\n"
144 ".previous\n"
145 _ASM_EXTABLE(1b, 3b)
146 : [err] "=r" (err), "=m" (*fx)
147 : [fx] "R" (fx), "0" (0));
148#endif
149 if (unlikely(err) &&
150 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
151 err = -EFAULT;
152 /* No need to clear here because the caller clears USED_MATH */
153 return err;
154} 170}
155 171
156static inline void fpu_fxsave(struct fpu *fpu) 172static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
157{ 173{
158 /* Using "rex64; fxsave %0" is broken because, if the memory operand 174 if (config_enabled(CONFIG_X86_32))
159 uses any extended registers for addressing, a second REX prefix 175 return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
160 will be generated (to the assembler, rex64 followed by semicolon 176 else if (config_enabled(CONFIG_AS_FXSAVEQ))
161 is a separate instruction), and hence the 64-bitness is lost. */ 177 return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
162 178
163#ifdef CONFIG_AS_FXSAVEQ 179 /* See comment in fpu_fxsave() below. */
164 /* Using "fxsaveq %0" would be the ideal choice, but is only supported 180 return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
165 starting with gas 2.16. */ 181 "m" (*fx));
166 __asm__ __volatile__("fxsaveq %0"
167 : "=m" (fpu->state->fxsave));
168#else
169 /* Using, as a workaround, the properly prefixed form below isn't
170 accepted by any binutils version so far released, complaining that
171 the same type of prefix is used twice if an extended register is
172 needed for addressing (fix submitted to mainline 2005-11-21).
173 asm volatile("rex64/fxsave %0"
174 : "=m" (fpu->state->fxsave));
175 This, however, we can work around by forcing the compiler to select
176 an addressing mode that doesn't require extended registers. */
177 asm volatile("rex64/fxsave (%[fx])"
178 : "=m" (fpu->state->fxsave)
179 : [fx] "R" (&fpu->state->fxsave));
180#endif
181} 182}
182 183
183#else /* CONFIG_X86_32 */ 184static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
185{
186 if (config_enabled(CONFIG_X86_32))
187 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
188 else if (config_enabled(CONFIG_AS_FXSAVEQ))
189 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
184 190
185/* perform fxrstor iff the processor has extended states, otherwise frstor */ 191 /* See comment in fpu_fxsave() below. */
186static inline int fxrstor_checking(struct i387_fxsave_struct *fx) 192 return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
193 "m" (*fx));
194}
195
196static inline int frstor_checking(struct i387_fsave_struct *fx)
187{ 197{
188 /* 198 return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
189 * The "nop" is needed to make the instructions the same 199}
190 * length.
191 */
192 alternative_input(
193 "nop ; frstor %1",
194 "fxrstor %1",
195 X86_FEATURE_FXSR,
196 "m" (*fx));
197 200
198 return 0; 201static inline int frstor_user(struct i387_fsave_struct __user *fx)
202{
203 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
199} 204}
200 205
201static inline void fpu_fxsave(struct fpu *fpu) 206static inline void fpu_fxsave(struct fpu *fpu)
202{ 207{
203 asm volatile("fxsave %[fx]" 208 if (config_enabled(CONFIG_X86_32))
204 : [fx] "=m" (fpu->state->fxsave)); 209 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
210 else if (config_enabled(CONFIG_AS_FXSAVEQ))
211 asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave));
212 else {
213 /* Using "rex64; fxsave %0" is broken because, if the memory
214 * operand uses any extended registers for addressing, a second
215 * REX prefix will be generated (to the assembler, rex64
216 * followed by semicolon is a separate instruction), and hence
217 * the 64-bitness is lost.
218 *
219 * Using "fxsaveq %0" would be the ideal choice, but is only
220 * supported starting with gas 2.16.
221 *
222 * Using, as a workaround, the properly prefixed form below
223 * isn't accepted by any binutils version so far released,
224 * complaining that the same type of prefix is used twice if
225 * an extended register is needed for addressing (fix submitted
226 * to mainline 2005-11-21).
227 *
228 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
229 *
230 * This, however, we can work around by forcing the compiler to
231 * select an addressing mode that doesn't require extended
232 * registers.
233 */
234 asm volatile( "rex64/fxsave (%[fx])"
235 : "=m" (fpu->state->fxsave)
236 : [fx] "R" (&fpu->state->fxsave));
237 }
205} 238}
206 239
207#endif /* CONFIG_X86_64 */
208
209/* 240/*
210 * These must be called with preempt disabled. Returns 241 * These must be called with preempt disabled. Returns
211 * 'true' if the FPU state is still intact. 242 * 'true' if the FPU state is still intact.
@@ -248,17 +279,14 @@ static inline int __save_init_fpu(struct task_struct *tsk)
248 return fpu_save_init(&tsk->thread.fpu); 279 return fpu_save_init(&tsk->thread.fpu);
249} 280}
250 281
251static inline int fpu_fxrstor_checking(struct fpu *fpu)
252{
253 return fxrstor_checking(&fpu->state->fxsave);
254}
255
256static inline int fpu_restore_checking(struct fpu *fpu) 282static inline int fpu_restore_checking(struct fpu *fpu)
257{ 283{
258 if (use_xsave()) 284 if (use_xsave())
259 return fpu_xrstor_checking(fpu); 285 return fpu_xrstor_checking(&fpu->state->xsave);
286 else if (use_fxsr())
287 return fxrstor_checking(&fpu->state->fxsave);
260 else 288 else
261 return fpu_fxrstor_checking(fpu); 289 return frstor_checking(&fpu->state->fsave);
262} 290}
263 291
264static inline int restore_fpu_checking(struct task_struct *tsk) 292static inline int restore_fpu_checking(struct task_struct *tsk)
@@ -310,15 +338,52 @@ static inline void __thread_set_has_fpu(struct task_struct *tsk)
310static inline void __thread_fpu_end(struct task_struct *tsk) 338static inline void __thread_fpu_end(struct task_struct *tsk)
311{ 339{
312 __thread_clear_has_fpu(tsk); 340 __thread_clear_has_fpu(tsk);
313 stts(); 341 if (!use_eager_fpu())
342 stts();
314} 343}
315 344
316static inline void __thread_fpu_begin(struct task_struct *tsk) 345static inline void __thread_fpu_begin(struct task_struct *tsk)
317{ 346{
318 clts(); 347 if (!use_eager_fpu())
348 clts();
319 __thread_set_has_fpu(tsk); 349 __thread_set_has_fpu(tsk);
320} 350}
321 351
352static inline void __drop_fpu(struct task_struct *tsk)
353{
354 if (__thread_has_fpu(tsk)) {
355 /* Ignore delayed exceptions from user space */
356 asm volatile("1: fwait\n"
357 "2:\n"
358 _ASM_EXTABLE(1b, 2b));
359 __thread_fpu_end(tsk);
360 }
361}
362
363static inline void drop_fpu(struct task_struct *tsk)
364{
365 /*
366 * Forget coprocessor state..
367 */
368 preempt_disable();
369 tsk->fpu_counter = 0;
370 __drop_fpu(tsk);
371 clear_used_math();
372 preempt_enable();
373}
374
375static inline void drop_init_fpu(struct task_struct *tsk)
376{
377 if (!use_eager_fpu())
378 drop_fpu(tsk);
379 else {
380 if (use_xsave())
381 xrstor_state(init_xstate_buf, -1);
382 else
383 fxrstor_checking(&init_xstate_buf->i387);
384 }
385}
386
322/* 387/*
323 * FPU state switching for scheduling. 388 * FPU state switching for scheduling.
324 * 389 *
@@ -352,7 +417,12 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
352{ 417{
353 fpu_switch_t fpu; 418 fpu_switch_t fpu;
354 419
355 fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; 420 /*
421 * If the task has used the math, pre-load the FPU on xsave processors
422 * or if the past 5 consecutive context-switches used math.
423 */
424 fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
425 new->fpu_counter > 5);
356 if (__thread_has_fpu(old)) { 426 if (__thread_has_fpu(old)) {
357 if (!__save_init_fpu(old)) 427 if (!__save_init_fpu(old))
358 cpu = ~0; 428 cpu = ~0;
@@ -364,14 +434,14 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
364 new->fpu_counter++; 434 new->fpu_counter++;
365 __thread_set_has_fpu(new); 435 __thread_set_has_fpu(new);
366 prefetch(new->thread.fpu.state); 436 prefetch(new->thread.fpu.state);
367 } else 437 } else if (!use_eager_fpu())
368 stts(); 438 stts();
369 } else { 439 } else {
370 old->fpu_counter = 0; 440 old->fpu_counter = 0;
371 old->thread.fpu.last_cpu = ~0; 441 old->thread.fpu.last_cpu = ~0;
372 if (fpu.preload) { 442 if (fpu.preload) {
373 new->fpu_counter++; 443 new->fpu_counter++;
374 if (fpu_lazy_restore(new, cpu)) 444 if (!use_eager_fpu() && fpu_lazy_restore(new, cpu))
375 fpu.preload = 0; 445 fpu.preload = 0;
376 else 446 else
377 prefetch(new->thread.fpu.state); 447 prefetch(new->thread.fpu.state);
@@ -391,44 +461,40 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
391{ 461{
392 if (fpu.preload) { 462 if (fpu.preload) {
393 if (unlikely(restore_fpu_checking(new))) 463 if (unlikely(restore_fpu_checking(new)))
394 __thread_fpu_end(new); 464 drop_init_fpu(new);
395 } 465 }
396} 466}
397 467
398/* 468/*
399 * Signal frame handlers... 469 * Signal frame handlers...
400 */ 470 */
401extern int save_i387_xstate(void __user *buf); 471extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
402extern int restore_i387_xstate(void __user *buf); 472extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
403 473
404static inline void __clear_fpu(struct task_struct *tsk) 474static inline int xstate_sigframe_size(void)
405{ 475{
406 if (__thread_has_fpu(tsk)) { 476 return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
407 /* Ignore delayed exceptions from user space */ 477}
408 asm volatile("1: fwait\n" 478
409 "2:\n" 479static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
410 _ASM_EXTABLE(1b, 2b)); 480{
411 __thread_fpu_end(tsk); 481 void __user *buf_fx = buf;
482 int size = xstate_sigframe_size();
483
484 if (ia32_frame && use_fxsr()) {
485 buf_fx = buf + sizeof(struct i387_fsave_struct);
486 size += sizeof(struct i387_fsave_struct);
412 } 487 }
488
489 return __restore_xstate_sig(buf, buf_fx, size);
413} 490}
414 491
415/* 492/*
416 * The actual user_fpu_begin/end() functions 493 * Need to be preemption-safe.
417 * need to be preemption-safe.
418 * 494 *
419 * NOTE! user_fpu_end() must be used only after you 495 * NOTE! user_fpu_begin() must be used only immediately before restoring
420 * have saved the FP state, and user_fpu_begin() must 496 * it. This function does not do any save/restore on their own.
421 * be used only immediately before restoring it.
422 * These functions do not do any save/restore on
423 * their own.
424 */ 497 */
425static inline void user_fpu_end(void)
426{
427 preempt_disable();
428 __thread_fpu_end(current);
429 preempt_enable();
430}
431
432static inline void user_fpu_begin(void) 498static inline void user_fpu_begin(void)
433{ 499{
434 preempt_disable(); 500 preempt_disable();
@@ -437,25 +503,32 @@ static inline void user_fpu_begin(void)
437 preempt_enable(); 503 preempt_enable();
438} 504}
439 505
506static inline void __save_fpu(struct task_struct *tsk)
507{
508 if (use_xsave())
509 xsave_state(&tsk->thread.fpu.state->xsave, -1);
510 else
511 fpu_fxsave(&tsk->thread.fpu);
512}
513
440/* 514/*
441 * These disable preemption on their own and are safe 515 * These disable preemption on their own and are safe
442 */ 516 */
443static inline void save_init_fpu(struct task_struct *tsk) 517static inline void save_init_fpu(struct task_struct *tsk)
444{ 518{
445 WARN_ON_ONCE(!__thread_has_fpu(tsk)); 519 WARN_ON_ONCE(!__thread_has_fpu(tsk));
520
521 if (use_eager_fpu()) {
522 __save_fpu(tsk);
523 return;
524 }
525
446 preempt_disable(); 526 preempt_disable();
447 __save_init_fpu(tsk); 527 __save_init_fpu(tsk);
448 __thread_fpu_end(tsk); 528 __thread_fpu_end(tsk);
449 preempt_enable(); 529 preempt_enable();
450} 530}
451 531
452static inline void clear_fpu(struct task_struct *tsk)
453{
454 preempt_disable();
455 __clear_fpu(tsk);
456 preempt_enable();
457}
458
459/* 532/*
460 * i387 state interaction 533 * i387 state interaction
461 */ 534 */
@@ -510,11 +583,34 @@ static inline void fpu_free(struct fpu *fpu)
510 } 583 }
511} 584}
512 585
513static inline void fpu_copy(struct fpu *dst, struct fpu *src) 586static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
514{ 587{
515 memcpy(dst->state, src->state, xstate_size); 588 if (use_eager_fpu()) {
589 memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
590 __save_fpu(dst);
591 } else {
592 struct fpu *dfpu = &dst->thread.fpu;
593 struct fpu *sfpu = &src->thread.fpu;
594
595 unlazy_fpu(src);
596 memcpy(dfpu->state, sfpu->state, xstate_size);
597 }
516} 598}
517 599
518extern void fpu_finit(struct fpu *fpu); 600static inline unsigned long
601alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
602 unsigned long *size)
603{
604 unsigned long frame_size = xstate_sigframe_size();
605
606 *buf_fx = sp = round_down(sp - frame_size, 64);
607 if (ia32_frame && use_fxsr()) {
608 frame_size += sizeof(struct i387_fsave_struct);
609 sp -= sizeof(struct i387_fsave_struct);
610 }
611
612 *size = frame_size;
613 return sp;
614}
519 615
520#endif 616#endif
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index b0767bc08740..9a25b522d377 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -3,38 +3,54 @@
3 3
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5 5
6 .macro MCOUNT_SAVE_FRAME 6 /* skip is set if the stack was already partially adjusted */
7 /* taken from glibc */ 7 .macro MCOUNT_SAVE_FRAME skip=0
8 subq $0x38, %rsp 8 /*
9 movq %rax, (%rsp) 9 * We add enough stack to save all regs.
10 movq %rcx, 8(%rsp) 10 */
11 movq %rdx, 16(%rsp) 11 subq $(SS+8-\skip), %rsp
12 movq %rsi, 24(%rsp) 12 movq %rax, RAX(%rsp)
13 movq %rdi, 32(%rsp) 13 movq %rcx, RCX(%rsp)
14 movq %r8, 40(%rsp) 14 movq %rdx, RDX(%rsp)
15 movq %r9, 48(%rsp) 15 movq %rsi, RSI(%rsp)
16 movq %rdi, RDI(%rsp)
17 movq %r8, R8(%rsp)
18 movq %r9, R9(%rsp)
19 /* Move RIP to its proper location */
20 movq SS+8(%rsp), %rdx
21 movq %rdx, RIP(%rsp)
16 .endm 22 .endm
17 23
18 .macro MCOUNT_RESTORE_FRAME 24 .macro MCOUNT_RESTORE_FRAME skip=0
19 movq 48(%rsp), %r9 25 movq R9(%rsp), %r9
20 movq 40(%rsp), %r8 26 movq R8(%rsp), %r8
21 movq 32(%rsp), %rdi 27 movq RDI(%rsp), %rdi
22 movq 24(%rsp), %rsi 28 movq RSI(%rsp), %rsi
23 movq 16(%rsp), %rdx 29 movq RDX(%rsp), %rdx
24 movq 8(%rsp), %rcx 30 movq RCX(%rsp), %rcx
25 movq (%rsp), %rax 31 movq RAX(%rsp), %rax
26 addq $0x38, %rsp 32 addq $(SS+8-\skip), %rsp
27 .endm 33 .endm
28 34
29#endif 35#endif
30 36
31#ifdef CONFIG_FUNCTION_TRACER 37#ifdef CONFIG_FUNCTION_TRACER
32#define MCOUNT_ADDR ((long)(mcount)) 38#ifdef CC_USING_FENTRY
39# define MCOUNT_ADDR ((long)(__fentry__))
40#else
41# define MCOUNT_ADDR ((long)(mcount))
42#endif
33#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 43#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
34 44
45#ifdef CONFIG_DYNAMIC_FTRACE
46#define ARCH_SUPPORTS_FTRACE_OPS 1
47#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
48#endif
49
35#ifndef __ASSEMBLY__ 50#ifndef __ASSEMBLY__
36extern void mcount(void); 51extern void mcount(void);
37extern atomic_t modifying_ftrace_code; 52extern atomic_t modifying_ftrace_code;
53extern void __fentry__(void);
38 54
39static inline unsigned long ftrace_call_adjust(unsigned long addr) 55static inline unsigned long ftrace_call_adjust(unsigned long addr)
40{ 56{
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index 71ecbcba1a4e..f373046e63ec 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -9,10 +9,13 @@
9#include <asm/asm.h> 9#include <asm/asm.h>
10#include <asm/errno.h> 10#include <asm/errno.h>
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/smap.h>
12 13
13#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ 14#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
14 asm volatile("1:\t" insn "\n" \ 15 asm volatile("\t" ASM_STAC "\n" \
15 "2:\t.section .fixup,\"ax\"\n" \ 16 "1:\t" insn "\n" \
17 "2:\t" ASM_CLAC "\n" \
18 "\t.section .fixup,\"ax\"\n" \
16 "3:\tmov\t%3, %1\n" \ 19 "3:\tmov\t%3, %1\n" \
17 "\tjmp\t2b\n" \ 20 "\tjmp\t2b\n" \
18 "\t.previous\n" \ 21 "\t.previous\n" \
@@ -21,12 +24,14 @@
21 : "i" (-EFAULT), "0" (oparg), "1" (0)) 24 : "i" (-EFAULT), "0" (oparg), "1" (0))
22 25
23#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ 26#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
24 asm volatile("1:\tmovl %2, %0\n" \ 27 asm volatile("\t" ASM_STAC "\n" \
28 "1:\tmovl %2, %0\n" \
25 "\tmovl\t%0, %3\n" \ 29 "\tmovl\t%0, %3\n" \
26 "\t" insn "\n" \ 30 "\t" insn "\n" \
27 "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ 31 "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
28 "\tjnz\t1b\n" \ 32 "\tjnz\t1b\n" \
29 "3:\t.section .fixup,\"ax\"\n" \ 33 "3:\t" ASM_CLAC "\n" \
34 "\t.section .fixup,\"ax\"\n" \
30 "4:\tmov\t%5, %1\n" \ 35 "4:\tmov\t%5, %1\n" \
31 "\tjmp\t3b\n" \ 36 "\tjmp\t3b\n" \
32 "\t.previous\n" \ 37 "\t.previous\n" \
@@ -122,8 +127,10 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
122 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 127 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
123 return -EFAULT; 128 return -EFAULT;
124 129
125 asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" 130 asm volatile("\t" ASM_STAC "\n"
126 "2:\t.section .fixup, \"ax\"\n" 131 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
132 "2:\t" ASM_CLAC "\n"
133 "\t.section .fixup, \"ax\"\n"
127 "3:\tmov %3, %0\n" 134 "3:\tmov %3, %0\n"
128 "\tjmp 2b\n" 135 "\tjmp 2b\n"
129 "\t.previous\n" 136 "\t.previous\n"
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index d3895dbf4ddb..81f04cee5f74 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -18,6 +18,10 @@ typedef struct {
18#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
19 unsigned int irq_resched_count; 19 unsigned int irq_resched_count;
20 unsigned int irq_call_count; 20 unsigned int irq_call_count;
21 /*
22 * irq_tlb_count is double-counted in irq_call_count, so it must be
23 * subtracted from irq_call_count when displaying irq_call_count
24 */
21 unsigned int irq_tlb_count; 25 unsigned int irq_tlb_count;
22#endif 26#endif
23#ifdef CONFIG_X86_THERMAL_VECTOR 27#ifdef CONFIG_X86_THERMAL_VECTOR
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 2c392d663dce..434e2106cc87 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -35,8 +35,6 @@
35#define HPET_ID_NUMBER_SHIFT 8 35#define HPET_ID_NUMBER_SHIFT 8
36#define HPET_ID_VENDOR_SHIFT 16 36#define HPET_ID_VENDOR_SHIFT 16
37 37
38#define HPET_ID_VENDOR_8086 0x8086
39
40#define HPET_CFG_ENABLE 0x001 38#define HPET_CFG_ENABLE 0x001
41#define HPET_CFG_LEGACY 0x002 39#define HPET_CFG_LEGACY 0x002
42#define HPET_LEGACY_8254 2 40#define HPET_LEGACY_8254 2
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 257d9cca214f..ed8089d69094 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -19,12 +19,37 @@ struct pt_regs;
19struct user_i387_struct; 19struct user_i387_struct;
20 20
21extern int init_fpu(struct task_struct *child); 21extern int init_fpu(struct task_struct *child);
22extern void fpu_finit(struct fpu *fpu);
22extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); 23extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
23extern void math_state_restore(void); 24extern void math_state_restore(void);
24 25
25extern bool irq_fpu_usable(void); 26extern bool irq_fpu_usable(void);
26extern void kernel_fpu_begin(void); 27
27extern void kernel_fpu_end(void); 28/*
29 * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
30 * and they don't touch the preempt state on their own.
31 * If you enable preemption after __kernel_fpu_begin(), preempt notifier
32 * should call the __kernel_fpu_end() to prevent the kernel/user FPU
33 * state from getting corrupted. KVM for example uses this model.
34 *
35 * All other cases use kernel_fpu_begin/end() which disable preemption
36 * during kernel FPU usage.
37 */
38extern void __kernel_fpu_begin(void);
39extern void __kernel_fpu_end(void);
40
41static inline void kernel_fpu_begin(void)
42{
43 WARN_ON_ONCE(!irq_fpu_usable());
44 preempt_disable();
45 __kernel_fpu_begin();
46}
47
48static inline void kernel_fpu_end(void)
49{
50 __kernel_fpu_end();
51 preempt_enable();
52}
28 53
29/* 54/*
30 * Some instructions like VIA's padlock instructions generate a spurious 55 * Some instructions like VIA's padlock instructions generate a spurious
diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h
index f229b13a5f30..f42a04735a0a 100644
--- a/arch/x86/include/asm/iommu_table.h
+++ b/arch/x86/include/asm/iommu_table.h
@@ -48,7 +48,7 @@ struct iommu_table_entry {
48 48
49 49
50#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\ 50#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\
51 static const struct iommu_table_entry const \ 51 static const struct iommu_table_entry \
52 __iommu_entry_##_detect __used \ 52 __iommu_entry_##_detect __used \
53 __attribute__ ((unused, __section__(".iommu_table"), \ 53 __attribute__ ((unused, __section__(".iommu_table"), \
54 aligned((sizeof(void *))))) \ 54 aligned((sizeof(void *))))) \
@@ -63,10 +63,10 @@ struct iommu_table_entry {
63 * to stop detecting the other IOMMUs after yours has been detected. 63 * to stop detecting the other IOMMUs after yours has been detected.
64 */ 64 */
65#define IOMMU_INIT_POST(_detect) \ 65#define IOMMU_INIT_POST(_detect) \
66 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 0) 66 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 0)
67 67
68#define IOMMU_INIT_POST_FINISH(detect) \ 68#define IOMMU_INIT_POST_FINISH(detect) \
69 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 1) 69 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 1)
70 70
71/* 71/*
72 * A more sophisticated version of IOMMU_INIT. This variant requires: 72 * A more sophisticated version of IOMMU_INIT. This variant requires:
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 547882539157..d3ddd17405d0 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -27,6 +27,7 @@
27#include <asm/insn.h> 27#include <asm/insn.h>
28 28
29#define __ARCH_WANT_KPROBES_INSN_SLOT 29#define __ARCH_WANT_KPROBES_INSN_SLOT
30#define ARCH_SUPPORTS_KPROBES_ON_FTRACE
30 31
31struct pt_regs; 32struct pt_regs;
32struct kprobe; 33struct kprobe;
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index 246617efd67f..41e08cb6a092 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -9,6 +9,22 @@
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/ioctl.h> 10#include <linux/ioctl.h>
11 11
12#define DE_VECTOR 0
13#define DB_VECTOR 1
14#define BP_VECTOR 3
15#define OF_VECTOR 4
16#define BR_VECTOR 5
17#define UD_VECTOR 6
18#define NM_VECTOR 7
19#define DF_VECTOR 8
20#define TS_VECTOR 10
21#define NP_VECTOR 11
22#define SS_VECTOR 12
23#define GP_VECTOR 13
24#define PF_VECTOR 14
25#define MF_VECTOR 16
26#define MC_VECTOR 18
27
12/* Select x86 specific features in <linux/kvm.h> */ 28/* Select x86 specific features in <linux/kvm.h> */
13#define __KVM_HAVE_PIT 29#define __KVM_HAVE_PIT
14#define __KVM_HAVE_IOAPIC 30#define __KVM_HAVE_IOAPIC
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 09155d64cf7e..1eaa6b056670 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -75,22 +75,6 @@
75#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) 75#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
76#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) 76#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
77 77
78#define DE_VECTOR 0
79#define DB_VECTOR 1
80#define BP_VECTOR 3
81#define OF_VECTOR 4
82#define BR_VECTOR 5
83#define UD_VECTOR 6
84#define NM_VECTOR 7
85#define DF_VECTOR 8
86#define TS_VECTOR 10
87#define NP_VECTOR 11
88#define SS_VECTOR 12
89#define GP_VECTOR 13
90#define PF_VECTOR 14
91#define MF_VECTOR 16
92#define MC_VECTOR 18
93
94#define SELECTOR_TI_MASK (1 << 2) 78#define SELECTOR_TI_MASK (1 << 2)
95#define SELECTOR_RPL_MASK 0x03 79#define SELECTOR_RPL_MASK 0x03
96 80
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index a3ac52b29cbf..54d73b1f00a0 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -116,19 +116,9 @@ struct mce_log {
116/* Software defined banks */ 116/* Software defined banks */
117#define MCE_EXTENDED_BANK 128 117#define MCE_EXTENDED_BANK 128
118#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 118#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
119 119#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
120#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */
121#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9)
122#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9)
123#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9)
124#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9)
125#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9)
126#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
127#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
128
129 120
130#ifdef __KERNEL__ 121#ifdef __KERNEL__
131
132extern void mce_register_decode_chain(struct notifier_block *nb); 122extern void mce_register_decode_chain(struct notifier_block *nb);
133extern void mce_unregister_decode_chain(struct notifier_block *nb); 123extern void mce_unregister_decode_chain(struct notifier_block *nb);
134 124
@@ -171,6 +161,7 @@ DECLARE_PER_CPU(struct device *, mce_device);
171#ifdef CONFIG_X86_MCE_INTEL 161#ifdef CONFIG_X86_MCE_INTEL
172extern int mce_cmci_disabled; 162extern int mce_cmci_disabled;
173extern int mce_ignore_ce; 163extern int mce_ignore_ce;
164extern int mce_bios_cmci_threshold;
174void mce_intel_feature_init(struct cpuinfo_x86 *c); 165void mce_intel_feature_init(struct cpuinfo_x86 *c);
175void cmci_clear(void); 166void cmci_clear(void);
176void cmci_reenable(void); 167void cmci_reenable(void);
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 4ebe157bf73d..43d921b4752c 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -15,8 +15,8 @@ struct microcode_ops {
15 enum ucode_state (*request_microcode_user) (int cpu, 15 enum ucode_state (*request_microcode_user) (int cpu,
16 const void __user *buf, size_t size); 16 const void __user *buf, size_t size);
17 17
18 enum ucode_state (*request_microcode_fw) (int cpu, 18 enum ucode_state (*request_microcode_fw) (int cpu, struct device *,
19 struct device *device); 19 bool refresh_fw);
20 20
21 void (*microcode_fini_cpu) (int cpu); 21 void (*microcode_fini_cpu) (int cpu);
22 22
@@ -49,12 +49,6 @@ static inline struct microcode_ops * __init init_intel_microcode(void)
49#ifdef CONFIG_MICROCODE_AMD 49#ifdef CONFIG_MICROCODE_AMD
50extern struct microcode_ops * __init init_amd_microcode(void); 50extern struct microcode_ops * __init init_amd_microcode(void);
51extern void __exit exit_amd_microcode(void); 51extern void __exit exit_amd_microcode(void);
52
53static inline void get_ucode_data(void *to, const u8 *from, size_t n)
54{
55 memcpy(to, from, n);
56}
57
58#else 52#else
59static inline struct microcode_ops * __init init_amd_microcode(void) 53static inline struct microcode_ops * __init init_amd_microcode(void)
60{ 54{
diff --git a/arch/x86/include/asm/mmzone.h b/arch/x86/include/asm/mmzone.h
index 64217ea16a36..d497bc425cae 100644
--- a/arch/x86/include/asm/mmzone.h
+++ b/arch/x86/include/asm/mmzone.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
2# include "mmzone_32.h" 2# include <asm/mmzone_32.h>
3#else 3#else
4# include "mmzone_64.h" 4# include <asm/mmzone_64.h>
5#endif 5#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 957ec87385af..fbee9714d9ab 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -248,6 +248,9 @@
248 248
249#define MSR_IA32_PERF_STATUS 0x00000198 249#define MSR_IA32_PERF_STATUS 0x00000198
250#define MSR_IA32_PERF_CTL 0x00000199 250#define MSR_IA32_PERF_CTL 0x00000199
251#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
252#define MSR_AMD_PERF_STATUS 0xc0010063
253#define MSR_AMD_PERF_CTL 0xc0010062
251 254
252#define MSR_IA32_MPERF 0x000000e7 255#define MSR_IA32_MPERF 0x000000e7
253#define MSR_IA32_APERF 0x000000e8 256#define MSR_IA32_APERF 0x000000e8
diff --git a/arch/x86/include/asm/mutex.h b/arch/x86/include/asm/mutex.h
index a731b9c573a6..7d3a48275394 100644
--- a/arch/x86/include/asm/mutex.h
+++ b/arch/x86/include/asm/mutex.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
2# include "mutex_32.h" 2# include <asm/mutex_32.h>
3#else 3#else
4# include "mutex_64.h" 4# include <asm/mutex_64.h>
5#endif 5#endif
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index bfacd2ccf651..49119fcea2dc 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -53,9 +53,9 @@ static inline int numa_cpu_node(int cpu)
53#endif /* CONFIG_NUMA */ 53#endif /* CONFIG_NUMA */
54 54
55#ifdef CONFIG_X86_32 55#ifdef CONFIG_X86_32
56# include "numa_32.h" 56# include <asm/numa_32.h>
57#else 57#else
58# include "numa_64.h" 58# include <asm/numa_64.h>
59#endif 59#endif
60 60
61#ifdef CONFIG_NUMA 61#ifdef CONFIG_NUMA
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index df75d07571ce..6e41b9343928 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -141,7 +141,7 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq);
141#endif /* __KERNEL__ */ 141#endif /* __KERNEL__ */
142 142
143#ifdef CONFIG_X86_64 143#ifdef CONFIG_X86_64
144#include "pci_64.h" 144#include <asm/pci_64.h>
145#endif 145#endif
146 146
147/* implement the pci_ DMA API in terms of the generic device dma_ one */ 147/* implement the pci_ DMA API in terms of the generic device dma_ one */
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index cb4e43bce98a..4fabcdf1cfa7 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -262,4 +262,6 @@ static inline void perf_check_microcode(void) { }
262 static inline void amd_pmu_disable_virt(void) { } 262 static inline void amd_pmu_disable_virt(void) { }
263#endif 263#endif
264 264
265#define arch_perf_out_copy_user copy_from_user_nmi
266
265#endif /* _ASM_X86_PERF_EVENT_H */ 267#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/arch/x86/include/asm/perf_regs.h b/arch/x86/include/asm/perf_regs.h
new file mode 100644
index 000000000000..3f2207bfd17b
--- /dev/null
+++ b/arch/x86/include/asm/perf_regs.h
@@ -0,0 +1,33 @@
1#ifndef _ASM_X86_PERF_REGS_H
2#define _ASM_X86_PERF_REGS_H
3
4enum perf_event_x86_regs {
5 PERF_REG_X86_AX,
6 PERF_REG_X86_BX,
7 PERF_REG_X86_CX,
8 PERF_REG_X86_DX,
9 PERF_REG_X86_SI,
10 PERF_REG_X86_DI,
11 PERF_REG_X86_BP,
12 PERF_REG_X86_SP,
13 PERF_REG_X86_IP,
14 PERF_REG_X86_FLAGS,
15 PERF_REG_X86_CS,
16 PERF_REG_X86_SS,
17 PERF_REG_X86_DS,
18 PERF_REG_X86_ES,
19 PERF_REG_X86_FS,
20 PERF_REG_X86_GS,
21 PERF_REG_X86_R8,
22 PERF_REG_X86_R9,
23 PERF_REG_X86_R10,
24 PERF_REG_X86_R11,
25 PERF_REG_X86_R12,
26 PERF_REG_X86_R13,
27 PERF_REG_X86_R14,
28 PERF_REG_X86_R15,
29
30 PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1,
31 PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1,
32};
33#endif /* _ASM_X86_PERF_REGS_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 49afb3f41eb6..fc9948465293 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -384,9 +384,9 @@ pte_t *populate_extra_pte(unsigned long vaddr);
384#endif /* __ASSEMBLY__ */ 384#endif /* __ASSEMBLY__ */
385 385
386#ifdef CONFIG_X86_32 386#ifdef CONFIG_X86_32
387# include "pgtable_32.h" 387# include <asm/pgtable_32.h>
388#else 388#else
389# include "pgtable_64.h" 389# include <asm/pgtable_64.h>
390#endif 390#endif
391 391
392#ifndef __ASSEMBLY__ 392#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 013286a10c2c..ec8a1fc9505d 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -174,9 +174,9 @@
174#endif 174#endif
175 175
176#ifdef CONFIG_X86_32 176#ifdef CONFIG_X86_32
177# include "pgtable_32_types.h" 177# include <asm/pgtable_32_types.h>
178#else 178#else
179# include "pgtable_64_types.h" 179# include <asm/pgtable_64_types.h>
180#endif 180#endif
181 181
182#ifndef __ASSEMBLY__ 182#ifndef __ASSEMBLY__
@@ -303,11 +303,9 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pte);
303 303
304extern void native_pagetable_reserve(u64 start, u64 end); 304extern void native_pagetable_reserve(u64 start, u64 end);
305#ifdef CONFIG_X86_32 305#ifdef CONFIG_X86_32
306extern void native_pagetable_setup_start(pgd_t *base); 306extern void native_pagetable_init(void);
307extern void native_pagetable_setup_done(pgd_t *base);
308#else 307#else
309#define native_pagetable_setup_start x86_init_pgd_noop 308#define native_pagetable_init paging_init
310#define native_pagetable_setup_done x86_init_pgd_noop
311#endif 309#endif
312 310
313struct seq_file; 311struct seq_file;
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h
index 7ef7c3020e5c..bad3665c25fc 100644
--- a/arch/x86/include/asm/posix_types.h
+++ b/arch/x86/include/asm/posix_types.h
@@ -1,15 +1,15 @@
1#ifdef __KERNEL__ 1#ifdef __KERNEL__
2# ifdef CONFIG_X86_32 2# ifdef CONFIG_X86_32
3# include "posix_types_32.h" 3# include <asm/posix_types_32.h>
4# else 4# else
5# include "posix_types_64.h" 5# include <asm/posix_types_64.h>
6# endif 6# endif
7#else 7#else
8# ifdef __i386__ 8# ifdef __i386__
9# include "posix_types_32.h" 9# include <asm/posix_types_32.h>
10# elif defined(__ILP32__) 10# elif defined(__ILP32__)
11# include "posix_types_x32.h" 11# include <asm/posix_types_x32.h>
12# else 12# else
13# include "posix_types_64.h" 13# include <asm/posix_types_64.h>
14# endif 14# endif
15#endif 15#endif
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index aea1d1d848c7..680cf09ed100 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -65,6 +65,7 @@
65#define X86_CR4_PCIDE 0x00020000 /* enable PCID support */ 65#define X86_CR4_PCIDE 0x00020000 /* enable PCID support */
66#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ 66#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
67#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ 67#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
68#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */
68 69
69/* 70/*
70 * x86-64 Task Priority Register, CR8 71 * x86-64 Task Priority Register, CR8
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index d048cad9bcad..b98c0d958ebb 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -423,7 +423,6 @@ DECLARE_INIT_PER_CPU(irq_stack_union);
423 423
424DECLARE_PER_CPU(char *, irq_stack_ptr); 424DECLARE_PER_CPU(char *, irq_stack_ptr);
425DECLARE_PER_CPU(unsigned int, irq_count); 425DECLARE_PER_CPU(unsigned int, irq_count);
426extern unsigned long kernel_eflags;
427extern asmlinkage void ignore_sysret(void); 426extern asmlinkage void ignore_sysret(void);
428#else /* X86_64 */ 427#else /* X86_64 */
429#ifdef CONFIG_CC_STACKPROTECTOR 428#ifdef CONFIG_CC_STACKPROTECTOR
@@ -759,6 +758,8 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr)
759 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 758 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
760} 759}
761 760
761extern void set_task_blockstep(struct task_struct *task, bool on);
762
762/* 763/*
763 * from system description table in BIOS. Mostly for MCA use, but 764 * from system description table in BIOS. Mostly for MCA use, but
764 * others may find it useful: 765 * others may find it useful:
diff --git a/arch/x86/include/asm/rcu.h b/arch/x86/include/asm/rcu.h
new file mode 100644
index 000000000000..d1ac07a23979
--- /dev/null
+++ b/arch/x86/include/asm/rcu.h
@@ -0,0 +1,32 @@
1#ifndef _ASM_X86_RCU_H
2#define _ASM_X86_RCU_H
3
4#ifndef __ASSEMBLY__
5
6#include <linux/rcupdate.h>
7#include <asm/ptrace.h>
8
9static inline void exception_enter(struct pt_regs *regs)
10{
11 rcu_user_exit();
12}
13
14static inline void exception_exit(struct pt_regs *regs)
15{
16#ifdef CONFIG_RCU_USER_QS
17 if (user_mode(regs))
18 rcu_user_enter();
19#endif
20}
21
22#else /* __ASSEMBLY__ */
23
24#ifdef CONFIG_RCU_USER_QS
25# define SCHEDULE_USER call schedule_user
26#else
27# define SCHEDULE_USER call schedule
28#endif
29
30#endif /* !__ASSEMBLY__ */
31
32#endif
diff --git a/arch/x86/include/asm/seccomp.h b/arch/x86/include/asm/seccomp.h
index c62e58a5a90d..0f3d7f099224 100644
--- a/arch/x86/include/asm/seccomp.h
+++ b/arch/x86/include/asm/seccomp.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
2# include "seccomp_32.h" 2# include <asm/seccomp_32.h>
3#else 3#else
4# include "seccomp_64.h" 4# include <asm/seccomp_64.h>
5#endif 5#endif
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
index 598457cbd0f8..323973f4abf1 100644
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -31,6 +31,10 @@ typedef struct {
31 unsigned long sig[_NSIG_WORDS]; 31 unsigned long sig[_NSIG_WORDS];
32} sigset_t; 32} sigset_t;
33 33
34#ifndef CONFIG_COMPAT
35typedef sigset_t compat_sigset_t;
36#endif
37
34#else 38#else
35/* Here we must cater to libcs that poke about in kernel headers. */ 39/* Here we must cater to libcs that poke about in kernel headers. */
36 40
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
new file mode 100644
index 000000000000..8d3120f4e270
--- /dev/null
+++ b/arch/x86/include/asm/smap.h
@@ -0,0 +1,91 @@
1/*
2 * Supervisor Mode Access Prevention support
3 *
4 * Copyright (C) 2012 Intel Corporation
5 * Author: H. Peter Anvin <hpa@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13#ifndef _ASM_X86_SMAP_H
14#define _ASM_X86_SMAP_H
15
16#include <linux/stringify.h>
17#include <asm/nops.h>
18#include <asm/cpufeature.h>
19
20/* "Raw" instruction opcodes */
21#define __ASM_CLAC .byte 0x0f,0x01,0xca
22#define __ASM_STAC .byte 0x0f,0x01,0xcb
23
24#ifdef __ASSEMBLY__
25
26#include <asm/alternative-asm.h>
27
28#ifdef CONFIG_X86_SMAP
29
30#define ASM_CLAC \
31 661: ASM_NOP3 ; \
32 .pushsection .altinstr_replacement, "ax" ; \
33 662: __ASM_CLAC ; \
34 .popsection ; \
35 .pushsection .altinstructions, "a" ; \
36 altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
37 .popsection
38
39#define ASM_STAC \
40 661: ASM_NOP3 ; \
41 .pushsection .altinstr_replacement, "ax" ; \
42 662: __ASM_STAC ; \
43 .popsection ; \
44 .pushsection .altinstructions, "a" ; \
45 altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
46 .popsection
47
48#else /* CONFIG_X86_SMAP */
49
50#define ASM_CLAC
51#define ASM_STAC
52
53#endif /* CONFIG_X86_SMAP */
54
55#else /* __ASSEMBLY__ */
56
57#include <asm/alternative.h>
58
59#ifdef CONFIG_X86_SMAP
60
61static __always_inline void clac(void)
62{
63 /* Note: a barrier is implicit in alternative() */
64 alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
65}
66
67static __always_inline void stac(void)
68{
69 /* Note: a barrier is implicit in alternative() */
70 alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP);
71}
72
73/* These macros can be used in asm() statements */
74#define ASM_CLAC \
75 ALTERNATIVE(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
76#define ASM_STAC \
77 ALTERNATIVE(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP)
78
79#else /* CONFIG_X86_SMAP */
80
81static inline void clac(void) { }
82static inline void stac(void) { }
83
84#define ASM_CLAC
85#define ASM_STAC
86
87#endif /* CONFIG_X86_SMAP */
88
89#endif /* __ASSEMBLY__ */
90
91#endif /* _ASM_X86_SMAP_H */
diff --git a/arch/x86/include/asm/string.h b/arch/x86/include/asm/string.h
index 6dfd6d9373a0..09224d7a5862 100644
--- a/arch/x86/include/asm/string.h
+++ b/arch/x86/include/asm/string.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
2# include "string_32.h" 2# include <asm/string_32.h>
3#else 3#else
4# include "string_64.h" 4# include <asm/string_64.h>
5#endif 5#endif
diff --git a/arch/x86/include/asm/suspend.h b/arch/x86/include/asm/suspend.h
index 9bd521fe4570..2fab6c2c3575 100644
--- a/arch/x86/include/asm/suspend.h
+++ b/arch/x86/include/asm/suspend.h
@@ -1,5 +1,5 @@
1#ifdef CONFIG_X86_32 1#ifdef CONFIG_X86_32
2# include "suspend_32.h" 2# include <asm/suspend_32.h>
3#else 3#else
4# include "suspend_64.h" 4# include <asm/suspend_64.h>
5#endif 5#endif
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index f2b83bc7d784..cdf5674dd23a 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -1,6 +1,135 @@
1#ifndef __SVM_H 1#ifndef __SVM_H
2#define __SVM_H 2#define __SVM_H
3 3
4#define SVM_EXIT_READ_CR0 0x000
5#define SVM_EXIT_READ_CR3 0x003
6#define SVM_EXIT_READ_CR4 0x004
7#define SVM_EXIT_READ_CR8 0x008
8#define SVM_EXIT_WRITE_CR0 0x010
9#define SVM_EXIT_WRITE_CR3 0x013
10#define SVM_EXIT_WRITE_CR4 0x014
11#define SVM_EXIT_WRITE_CR8 0x018
12#define SVM_EXIT_READ_DR0 0x020
13#define SVM_EXIT_READ_DR1 0x021
14#define SVM_EXIT_READ_DR2 0x022
15#define SVM_EXIT_READ_DR3 0x023
16#define SVM_EXIT_READ_DR4 0x024
17#define SVM_EXIT_READ_DR5 0x025
18#define SVM_EXIT_READ_DR6 0x026
19#define SVM_EXIT_READ_DR7 0x027
20#define SVM_EXIT_WRITE_DR0 0x030
21#define SVM_EXIT_WRITE_DR1 0x031
22#define SVM_EXIT_WRITE_DR2 0x032
23#define SVM_EXIT_WRITE_DR3 0x033
24#define SVM_EXIT_WRITE_DR4 0x034
25#define SVM_EXIT_WRITE_DR5 0x035
26#define SVM_EXIT_WRITE_DR6 0x036
27#define SVM_EXIT_WRITE_DR7 0x037
28#define SVM_EXIT_EXCP_BASE 0x040
29#define SVM_EXIT_INTR 0x060
30#define SVM_EXIT_NMI 0x061
31#define SVM_EXIT_SMI 0x062
32#define SVM_EXIT_INIT 0x063
33#define SVM_EXIT_VINTR 0x064
34#define SVM_EXIT_CR0_SEL_WRITE 0x065
35#define SVM_EXIT_IDTR_READ 0x066
36#define SVM_EXIT_GDTR_READ 0x067
37#define SVM_EXIT_LDTR_READ 0x068
38#define SVM_EXIT_TR_READ 0x069
39#define SVM_EXIT_IDTR_WRITE 0x06a
40#define SVM_EXIT_GDTR_WRITE 0x06b
41#define SVM_EXIT_LDTR_WRITE 0x06c
42#define SVM_EXIT_TR_WRITE 0x06d
43#define SVM_EXIT_RDTSC 0x06e
44#define SVM_EXIT_RDPMC 0x06f
45#define SVM_EXIT_PUSHF 0x070
46#define SVM_EXIT_POPF 0x071
47#define SVM_EXIT_CPUID 0x072
48#define SVM_EXIT_RSM 0x073
49#define SVM_EXIT_IRET 0x074
50#define SVM_EXIT_SWINT 0x075
51#define SVM_EXIT_INVD 0x076
52#define SVM_EXIT_PAUSE 0x077
53#define SVM_EXIT_HLT 0x078
54#define SVM_EXIT_INVLPG 0x079
55#define SVM_EXIT_INVLPGA 0x07a
56#define SVM_EXIT_IOIO 0x07b
57#define SVM_EXIT_MSR 0x07c
58#define SVM_EXIT_TASK_SWITCH 0x07d
59#define SVM_EXIT_FERR_FREEZE 0x07e
60#define SVM_EXIT_SHUTDOWN 0x07f
61#define SVM_EXIT_VMRUN 0x080
62#define SVM_EXIT_VMMCALL 0x081
63#define SVM_EXIT_VMLOAD 0x082
64#define SVM_EXIT_VMSAVE 0x083
65#define SVM_EXIT_STGI 0x084
66#define SVM_EXIT_CLGI 0x085
67#define SVM_EXIT_SKINIT 0x086
68#define SVM_EXIT_RDTSCP 0x087
69#define SVM_EXIT_ICEBP 0x088
70#define SVM_EXIT_WBINVD 0x089
71#define SVM_EXIT_MONITOR 0x08a
72#define SVM_EXIT_MWAIT 0x08b
73#define SVM_EXIT_MWAIT_COND 0x08c
74#define SVM_EXIT_XSETBV 0x08d
75#define SVM_EXIT_NPF 0x400
76
77#define SVM_EXIT_ERR -1
78
79#define SVM_EXIT_REASONS \
80 { SVM_EXIT_READ_CR0, "read_cr0" }, \
81 { SVM_EXIT_READ_CR3, "read_cr3" }, \
82 { SVM_EXIT_READ_CR4, "read_cr4" }, \
83 { SVM_EXIT_READ_CR8, "read_cr8" }, \
84 { SVM_EXIT_WRITE_CR0, "write_cr0" }, \
85 { SVM_EXIT_WRITE_CR3, "write_cr3" }, \
86 { SVM_EXIT_WRITE_CR4, "write_cr4" }, \
87 { SVM_EXIT_WRITE_CR8, "write_cr8" }, \
88 { SVM_EXIT_READ_DR0, "read_dr0" }, \
89 { SVM_EXIT_READ_DR1, "read_dr1" }, \
90 { SVM_EXIT_READ_DR2, "read_dr2" }, \
91 { SVM_EXIT_READ_DR3, "read_dr3" }, \
92 { SVM_EXIT_WRITE_DR0, "write_dr0" }, \
93 { SVM_EXIT_WRITE_DR1, "write_dr1" }, \
94 { SVM_EXIT_WRITE_DR2, "write_dr2" }, \
95 { SVM_EXIT_WRITE_DR3, "write_dr3" }, \
96 { SVM_EXIT_WRITE_DR5, "write_dr5" }, \
97 { SVM_EXIT_WRITE_DR7, "write_dr7" }, \
98 { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \
99 { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \
100 { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
101 { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
102 { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
103 { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
104 { SVM_EXIT_INTR, "interrupt" }, \
105 { SVM_EXIT_NMI, "nmi" }, \
106 { SVM_EXIT_SMI, "smi" }, \
107 { SVM_EXIT_INIT, "init" }, \
108 { SVM_EXIT_VINTR, "vintr" }, \
109 { SVM_EXIT_CPUID, "cpuid" }, \
110 { SVM_EXIT_INVD, "invd" }, \
111 { SVM_EXIT_HLT, "hlt" }, \
112 { SVM_EXIT_INVLPG, "invlpg" }, \
113 { SVM_EXIT_INVLPGA, "invlpga" }, \
114 { SVM_EXIT_IOIO, "io" }, \
115 { SVM_EXIT_MSR, "msr" }, \
116 { SVM_EXIT_TASK_SWITCH, "task_switch" }, \
117 { SVM_EXIT_SHUTDOWN, "shutdown" }, \
118 { SVM_EXIT_VMRUN, "vmrun" }, \
119 { SVM_EXIT_VMMCALL, "hypercall" }, \
120 { SVM_EXIT_VMLOAD, "vmload" }, \
121 { SVM_EXIT_VMSAVE, "vmsave" }, \
122 { SVM_EXIT_STGI, "stgi" }, \
123 { SVM_EXIT_CLGI, "clgi" }, \
124 { SVM_EXIT_SKINIT, "skinit" }, \
125 { SVM_EXIT_WBINVD, "wbinvd" }, \
126 { SVM_EXIT_MONITOR, "monitor" }, \
127 { SVM_EXIT_MWAIT, "mwait" }, \
128 { SVM_EXIT_XSETBV, "xsetbv" }, \
129 { SVM_EXIT_NPF, "npf" }
130
131#ifdef __KERNEL__
132
4enum { 133enum {
5 INTERCEPT_INTR, 134 INTERCEPT_INTR,
6 INTERCEPT_NMI, 135 INTERCEPT_NMI,
@@ -264,81 +393,6 @@ struct __attribute__ ((__packed__)) vmcb {
264 393
265#define SVM_EXITINFO_REG_MASK 0x0F 394#define SVM_EXITINFO_REG_MASK 0x0F
266 395
267#define SVM_EXIT_READ_CR0 0x000
268#define SVM_EXIT_READ_CR3 0x003
269#define SVM_EXIT_READ_CR4 0x004
270#define SVM_EXIT_READ_CR8 0x008
271#define SVM_EXIT_WRITE_CR0 0x010
272#define SVM_EXIT_WRITE_CR3 0x013
273#define SVM_EXIT_WRITE_CR4 0x014
274#define SVM_EXIT_WRITE_CR8 0x018
275#define SVM_EXIT_READ_DR0 0x020
276#define SVM_EXIT_READ_DR1 0x021
277#define SVM_EXIT_READ_DR2 0x022
278#define SVM_EXIT_READ_DR3 0x023
279#define SVM_EXIT_READ_DR4 0x024
280#define SVM_EXIT_READ_DR5 0x025
281#define SVM_EXIT_READ_DR6 0x026
282#define SVM_EXIT_READ_DR7 0x027
283#define SVM_EXIT_WRITE_DR0 0x030
284#define SVM_EXIT_WRITE_DR1 0x031
285#define SVM_EXIT_WRITE_DR2 0x032
286#define SVM_EXIT_WRITE_DR3 0x033
287#define SVM_EXIT_WRITE_DR4 0x034
288#define SVM_EXIT_WRITE_DR5 0x035
289#define SVM_EXIT_WRITE_DR6 0x036
290#define SVM_EXIT_WRITE_DR7 0x037
291#define SVM_EXIT_EXCP_BASE 0x040
292#define SVM_EXIT_INTR 0x060
293#define SVM_EXIT_NMI 0x061
294#define SVM_EXIT_SMI 0x062
295#define SVM_EXIT_INIT 0x063
296#define SVM_EXIT_VINTR 0x064
297#define SVM_EXIT_CR0_SEL_WRITE 0x065
298#define SVM_EXIT_IDTR_READ 0x066
299#define SVM_EXIT_GDTR_READ 0x067
300#define SVM_EXIT_LDTR_READ 0x068
301#define SVM_EXIT_TR_READ 0x069
302#define SVM_EXIT_IDTR_WRITE 0x06a
303#define SVM_EXIT_GDTR_WRITE 0x06b
304#define SVM_EXIT_LDTR_WRITE 0x06c
305#define SVM_EXIT_TR_WRITE 0x06d
306#define SVM_EXIT_RDTSC 0x06e
307#define SVM_EXIT_RDPMC 0x06f
308#define SVM_EXIT_PUSHF 0x070
309#define SVM_EXIT_POPF 0x071
310#define SVM_EXIT_CPUID 0x072
311#define SVM_EXIT_RSM 0x073
312#define SVM_EXIT_IRET 0x074
313#define SVM_EXIT_SWINT 0x075
314#define SVM_EXIT_INVD 0x076
315#define SVM_EXIT_PAUSE 0x077
316#define SVM_EXIT_HLT 0x078
317#define SVM_EXIT_INVLPG 0x079
318#define SVM_EXIT_INVLPGA 0x07a
319#define SVM_EXIT_IOIO 0x07b
320#define SVM_EXIT_MSR 0x07c
321#define SVM_EXIT_TASK_SWITCH 0x07d
322#define SVM_EXIT_FERR_FREEZE 0x07e
323#define SVM_EXIT_SHUTDOWN 0x07f
324#define SVM_EXIT_VMRUN 0x080
325#define SVM_EXIT_VMMCALL 0x081
326#define SVM_EXIT_VMLOAD 0x082
327#define SVM_EXIT_VMSAVE 0x083
328#define SVM_EXIT_STGI 0x084
329#define SVM_EXIT_CLGI 0x085
330#define SVM_EXIT_SKINIT 0x086
331#define SVM_EXIT_RDTSCP 0x087
332#define SVM_EXIT_ICEBP 0x088
333#define SVM_EXIT_WBINVD 0x089
334#define SVM_EXIT_MONITOR 0x08a
335#define SVM_EXIT_MWAIT 0x08b
336#define SVM_EXIT_MWAIT_COND 0x08c
337#define SVM_EXIT_XSETBV 0x08d
338#define SVM_EXIT_NPF 0x400
339
340#define SVM_EXIT_ERR -1
341
342#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) 396#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
343 397
344#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" 398#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
@@ -350,3 +404,4 @@ struct __attribute__ ((__packed__)) vmcb {
350 404
351#endif 405#endif
352 406
407#endif
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 3fda9db48819..4ca1c611b552 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
40 struct old_sigaction32 __user *); 40 struct old_sigaction32 __user *);
41asmlinkage long sys32_alarm(unsigned int); 41asmlinkage long sys32_alarm(unsigned int);
42 42
43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); 43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
44asmlinkage long sys32_sysfs(int, u32, u32); 44asmlinkage long sys32_sysfs(int, u32, u32);
45 45
46asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, 46asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 89f794f007ec..c535d847e3b5 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -89,6 +89,7 @@ struct thread_info {
89#define TIF_NOTSC 16 /* TSC is not accessible in userland */ 89#define TIF_NOTSC 16 /* TSC is not accessible in userland */
90#define TIF_IA32 17 /* IA32 compatibility process */ 90#define TIF_IA32 17 /* IA32 compatibility process */
91#define TIF_FORK 18 /* ret_from_fork */ 91#define TIF_FORK 18 /* ret_from_fork */
92#define TIF_NOHZ 19 /* in adaptive nohz mode */
92#define TIF_MEMDIE 20 /* is terminating due to OOM killer */ 93#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
93#define TIF_DEBUG 21 /* uses debug registers */ 94#define TIF_DEBUG 21 /* uses debug registers */
94#define TIF_IO_BITMAP 22 /* uses I/O bitmap */ 95#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
@@ -114,6 +115,7 @@ struct thread_info {
114#define _TIF_NOTSC (1 << TIF_NOTSC) 115#define _TIF_NOTSC (1 << TIF_NOTSC)
115#define _TIF_IA32 (1 << TIF_IA32) 116#define _TIF_IA32 (1 << TIF_IA32)
116#define _TIF_FORK (1 << TIF_FORK) 117#define _TIF_FORK (1 << TIF_FORK)
118#define _TIF_NOHZ (1 << TIF_NOHZ)
117#define _TIF_DEBUG (1 << TIF_DEBUG) 119#define _TIF_DEBUG (1 << TIF_DEBUG)
118#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) 120#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
119#define _TIF_FORCED_TF (1 << TIF_FORCED_TF) 121#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
@@ -126,12 +128,13 @@ struct thread_info {
126/* work to do in syscall_trace_enter() */ 128/* work to do in syscall_trace_enter() */
127#define _TIF_WORK_SYSCALL_ENTRY \ 129#define _TIF_WORK_SYSCALL_ENTRY \
128 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ 130 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
129 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) 131 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
132 _TIF_NOHZ)
130 133
131/* work to do in syscall_trace_leave() */ 134/* work to do in syscall_trace_leave() */
132#define _TIF_WORK_SYSCALL_EXIT \ 135#define _TIF_WORK_SYSCALL_EXIT \
133 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ 136 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
134 _TIF_SYSCALL_TRACEPOINT) 137 _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
135 138
136/* work to do on interrupt/exception return */ 139/* work to do on interrupt/exception return */
137#define _TIF_WORK_MASK \ 140#define _TIF_WORK_MASK \
@@ -141,7 +144,8 @@ struct thread_info {
141 144
142/* work to do on any return to user space */ 145/* work to do on any return to user space */
143#define _TIF_ALLWORK_MASK \ 146#define _TIF_ALLWORK_MASK \
144 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT) 147 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
148 _TIF_NOHZ)
145 149
146/* Only used for 64 bit */ 150/* Only used for 64 bit */
147#define _TIF_DO_NOTIFY_MASK \ 151#define _TIF_DO_NOTIFY_MASK \
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index e1f3a17034fc..7ccf8d131535 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -9,6 +9,7 @@
9#include <linux/string.h> 9#include <linux/string.h>
10#include <asm/asm.h> 10#include <asm/asm.h>
11#include <asm/page.h> 11#include <asm/page.h>
12#include <asm/smap.h>
12 13
13#define VERIFY_READ 0 14#define VERIFY_READ 0
14#define VERIFY_WRITE 1 15#define VERIFY_WRITE 1
@@ -192,9 +193,10 @@ extern int __get_user_bad(void);
192 193
193#ifdef CONFIG_X86_32 194#ifdef CONFIG_X86_32
194#define __put_user_asm_u64(x, addr, err, errret) \ 195#define __put_user_asm_u64(x, addr, err, errret) \
195 asm volatile("1: movl %%eax,0(%2)\n" \ 196 asm volatile(ASM_STAC "\n" \
197 "1: movl %%eax,0(%2)\n" \
196 "2: movl %%edx,4(%2)\n" \ 198 "2: movl %%edx,4(%2)\n" \
197 "3:\n" \ 199 "3: " ASM_CLAC "\n" \
198 ".section .fixup,\"ax\"\n" \ 200 ".section .fixup,\"ax\"\n" \
199 "4: movl %3,%0\n" \ 201 "4: movl %3,%0\n" \
200 " jmp 3b\n" \ 202 " jmp 3b\n" \
@@ -205,9 +207,10 @@ extern int __get_user_bad(void);
205 : "A" (x), "r" (addr), "i" (errret), "0" (err)) 207 : "A" (x), "r" (addr), "i" (errret), "0" (err))
206 208
207#define __put_user_asm_ex_u64(x, addr) \ 209#define __put_user_asm_ex_u64(x, addr) \
208 asm volatile("1: movl %%eax,0(%1)\n" \ 210 asm volatile(ASM_STAC "\n" \
211 "1: movl %%eax,0(%1)\n" \
209 "2: movl %%edx,4(%1)\n" \ 212 "2: movl %%edx,4(%1)\n" \
210 "3:\n" \ 213 "3: " ASM_CLAC "\n" \
211 _ASM_EXTABLE_EX(1b, 2b) \ 214 _ASM_EXTABLE_EX(1b, 2b) \
212 _ASM_EXTABLE_EX(2b, 3b) \ 215 _ASM_EXTABLE_EX(2b, 3b) \
213 : : "A" (x), "r" (addr)) 216 : : "A" (x), "r" (addr))
@@ -379,8 +382,9 @@ do { \
379} while (0) 382} while (0)
380 383
381#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 384#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
382 asm volatile("1: mov"itype" %2,%"rtype"1\n" \ 385 asm volatile(ASM_STAC "\n" \
383 "2:\n" \ 386 "1: mov"itype" %2,%"rtype"1\n" \
387 "2: " ASM_CLAC "\n" \
384 ".section .fixup,\"ax\"\n" \ 388 ".section .fixup,\"ax\"\n" \
385 "3: mov %3,%0\n" \ 389 "3: mov %3,%0\n" \
386 " xor"itype" %"rtype"1,%"rtype"1\n" \ 390 " xor"itype" %"rtype"1,%"rtype"1\n" \
@@ -443,8 +447,9 @@ struct __large_struct { unsigned long buf[100]; };
443 * aliasing issues. 447 * aliasing issues.
444 */ 448 */
445#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ 449#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
446 asm volatile("1: mov"itype" %"rtype"1,%2\n" \ 450 asm volatile(ASM_STAC "\n" \
447 "2:\n" \ 451 "1: mov"itype" %"rtype"1,%2\n" \
452 "2: " ASM_CLAC "\n" \
448 ".section .fixup,\"ax\"\n" \ 453 ".section .fixup,\"ax\"\n" \
449 "3: mov %3,%0\n" \ 454 "3: mov %3,%0\n" \
450 " jmp 2b\n" \ 455 " jmp 2b\n" \
@@ -463,13 +468,13 @@ struct __large_struct { unsigned long buf[100]; };
463 * uaccess_try and catch 468 * uaccess_try and catch
464 */ 469 */
465#define uaccess_try do { \ 470#define uaccess_try do { \
466 int prev_err = current_thread_info()->uaccess_err; \
467 current_thread_info()->uaccess_err = 0; \ 471 current_thread_info()->uaccess_err = 0; \
472 stac(); \
468 barrier(); 473 barrier();
469 474
470#define uaccess_catch(err) \ 475#define uaccess_catch(err) \
476 clac(); \
471 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ 477 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
472 current_thread_info()->uaccess_err = prev_err; \
473} while (0) 478} while (0)
474 479
475/** 480/**
@@ -569,6 +574,9 @@ strncpy_from_user(char *dst, const char __user *src, long count);
569extern __must_check long strlen_user(const char __user *str); 574extern __must_check long strlen_user(const char __user *str);
570extern __must_check long strnlen_user(const char __user *str, long n); 575extern __must_check long strnlen_user(const char __user *str, long n);
571 576
577unsigned long __must_check clear_user(void __user *mem, unsigned long len);
578unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
579
572/* 580/*
573 * movsl can be slow when source and dest are not both 8-byte aligned 581 * movsl can be slow when source and dest are not both 8-byte aligned
574 */ 582 */
@@ -581,9 +589,9 @@ extern struct movsl_mask {
581#define ARCH_HAS_NOCACHE_UACCESS 1 589#define ARCH_HAS_NOCACHE_UACCESS 1
582 590
583#ifdef CONFIG_X86_32 591#ifdef CONFIG_X86_32
584# include "uaccess_32.h" 592# include <asm/uaccess_32.h>
585#else 593#else
586# include "uaccess_64.h" 594# include <asm/uaccess_64.h>
587#endif 595#endif
588 596
589#endif /* _ASM_X86_UACCESS_H */ 597#endif /* _ASM_X86_UACCESS_H */
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 576e39bca6ad..7f760a9f1f61 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -213,7 +213,4 @@ static inline unsigned long __must_check copy_from_user(void *to,
213 return n; 213 return n;
214} 214}
215 215
216unsigned long __must_check clear_user(void __user *mem, unsigned long len);
217unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
218
219#endif /* _ASM_X86_UACCESS_32_H */ 216#endif /* _ASM_X86_UACCESS_32_H */
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index d8def8b3dba0..142810c457dc 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -217,9 +217,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
217 } 217 }
218} 218}
219 219
220__must_check unsigned long clear_user(void __user *mem, unsigned long len);
221__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
222
223static __must_check __always_inline int 220static __must_check __always_inline int
224__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) 221__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
225{ 222{
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index f3971bbcd1de..8ff8be7835ab 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -42,10 +42,11 @@ struct arch_uprobe {
42}; 42};
43 43
44struct arch_uprobe_task { 44struct arch_uprobe_task {
45 unsigned long saved_trap_nr;
46#ifdef CONFIG_X86_64 45#ifdef CONFIG_X86_64
47 unsigned long saved_scratch_register; 46 unsigned long saved_scratch_register;
48#endif 47#endif
48 unsigned int saved_trap_nr;
49 unsigned int saved_tf;
49}; 50};
50 51
51extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); 52extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
diff --git a/arch/x86/include/asm/user.h b/arch/x86/include/asm/user.h
index 24532c7da3d6..ccab4af1646d 100644
--- a/arch/x86/include/asm/user.h
+++ b/arch/x86/include/asm/user.h
@@ -2,9 +2,9 @@
2#define _ASM_X86_USER_H 2#define _ASM_X86_USER_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5# include "user_32.h" 5# include <asm/user_32.h>
6#else 6#else
7# include "user_64.h" 7# include <asm/user_64.h>
8#endif 8#endif
9 9
10#include <asm/types.h> 10#include <asm/types.h>
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index bb0522850b74..fddb53d63915 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -11,7 +11,8 @@ extern const char VDSO32_PRELINK[];
11#define VDSO32_SYMBOL(base, name) \ 11#define VDSO32_SYMBOL(base, name) \
12({ \ 12({ \
13 extern const char VDSO32_##name[]; \ 13 extern const char VDSO32_##name[]; \
14 (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \ 14 (void __user *)(VDSO32_##name - VDSO32_PRELINK + \
15 (unsigned long)(base)); \
15}) 16})
16#endif 17#endif
17 18
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 74fcb963595b..36ec21c36d68 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -25,6 +25,88 @@
25 * 25 *
26 */ 26 */
27 27
28#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
29
30#define EXIT_REASON_EXCEPTION_NMI 0
31#define EXIT_REASON_EXTERNAL_INTERRUPT 1
32#define EXIT_REASON_TRIPLE_FAULT 2
33
34#define EXIT_REASON_PENDING_INTERRUPT 7
35#define EXIT_REASON_NMI_WINDOW 8
36#define EXIT_REASON_TASK_SWITCH 9
37#define EXIT_REASON_CPUID 10
38#define EXIT_REASON_HLT 12
39#define EXIT_REASON_INVD 13
40#define EXIT_REASON_INVLPG 14
41#define EXIT_REASON_RDPMC 15
42#define EXIT_REASON_RDTSC 16
43#define EXIT_REASON_VMCALL 18
44#define EXIT_REASON_VMCLEAR 19
45#define EXIT_REASON_VMLAUNCH 20
46#define EXIT_REASON_VMPTRLD 21
47#define EXIT_REASON_VMPTRST 22
48#define EXIT_REASON_VMREAD 23
49#define EXIT_REASON_VMRESUME 24
50#define EXIT_REASON_VMWRITE 25
51#define EXIT_REASON_VMOFF 26
52#define EXIT_REASON_VMON 27
53#define EXIT_REASON_CR_ACCESS 28
54#define EXIT_REASON_DR_ACCESS 29
55#define EXIT_REASON_IO_INSTRUCTION 30
56#define EXIT_REASON_MSR_READ 31
57#define EXIT_REASON_MSR_WRITE 32
58#define EXIT_REASON_INVALID_STATE 33
59#define EXIT_REASON_MWAIT_INSTRUCTION 36
60#define EXIT_REASON_MONITOR_INSTRUCTION 39
61#define EXIT_REASON_PAUSE_INSTRUCTION 40
62#define EXIT_REASON_MCE_DURING_VMENTRY 41
63#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
64#define EXIT_REASON_APIC_ACCESS 44
65#define EXIT_REASON_EPT_VIOLATION 48
66#define EXIT_REASON_EPT_MISCONFIG 49
67#define EXIT_REASON_WBINVD 54
68#define EXIT_REASON_XSETBV 55
69#define EXIT_REASON_INVPCID 58
70
71#define VMX_EXIT_REASONS \
72 { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
73 { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
74 { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
75 { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
76 { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
77 { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
78 { EXIT_REASON_CPUID, "CPUID" }, \
79 { EXIT_REASON_HLT, "HLT" }, \
80 { EXIT_REASON_INVLPG, "INVLPG" }, \
81 { EXIT_REASON_RDPMC, "RDPMC" }, \
82 { EXIT_REASON_RDTSC, "RDTSC" }, \
83 { EXIT_REASON_VMCALL, "VMCALL" }, \
84 { EXIT_REASON_VMCLEAR, "VMCLEAR" }, \
85 { EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \
86 { EXIT_REASON_VMPTRLD, "VMPTRLD" }, \
87 { EXIT_REASON_VMPTRST, "VMPTRST" }, \
88 { EXIT_REASON_VMREAD, "VMREAD" }, \
89 { EXIT_REASON_VMRESUME, "VMRESUME" }, \
90 { EXIT_REASON_VMWRITE, "VMWRITE" }, \
91 { EXIT_REASON_VMOFF, "VMOFF" }, \
92 { EXIT_REASON_VMON, "VMON" }, \
93 { EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \
94 { EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \
95 { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
96 { EXIT_REASON_MSR_READ, "MSR_READ" }, \
97 { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
98 { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
99 { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
100 { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \
101 { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
102 { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
103 { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
104 { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
105 { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
106 { EXIT_REASON_WBINVD, "WBINVD" }
107
108#ifdef __KERNEL__
109
28#include <linux/types.h> 110#include <linux/types.h>
29 111
30/* 112/*
@@ -241,49 +323,6 @@ enum vmcs_field {
241 HOST_RIP = 0x00006c16, 323 HOST_RIP = 0x00006c16,
242}; 324};
243 325
244#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
245
246#define EXIT_REASON_EXCEPTION_NMI 0
247#define EXIT_REASON_EXTERNAL_INTERRUPT 1
248#define EXIT_REASON_TRIPLE_FAULT 2
249
250#define EXIT_REASON_PENDING_INTERRUPT 7
251#define EXIT_REASON_NMI_WINDOW 8
252#define EXIT_REASON_TASK_SWITCH 9
253#define EXIT_REASON_CPUID 10
254#define EXIT_REASON_HLT 12
255#define EXIT_REASON_INVD 13
256#define EXIT_REASON_INVLPG 14
257#define EXIT_REASON_RDPMC 15
258#define EXIT_REASON_RDTSC 16
259#define EXIT_REASON_VMCALL 18
260#define EXIT_REASON_VMCLEAR 19
261#define EXIT_REASON_VMLAUNCH 20
262#define EXIT_REASON_VMPTRLD 21
263#define EXIT_REASON_VMPTRST 22
264#define EXIT_REASON_VMREAD 23
265#define EXIT_REASON_VMRESUME 24
266#define EXIT_REASON_VMWRITE 25
267#define EXIT_REASON_VMOFF 26
268#define EXIT_REASON_VMON 27
269#define EXIT_REASON_CR_ACCESS 28
270#define EXIT_REASON_DR_ACCESS 29
271#define EXIT_REASON_IO_INSTRUCTION 30
272#define EXIT_REASON_MSR_READ 31
273#define EXIT_REASON_MSR_WRITE 32
274#define EXIT_REASON_INVALID_STATE 33
275#define EXIT_REASON_MWAIT_INSTRUCTION 36
276#define EXIT_REASON_MONITOR_INSTRUCTION 39
277#define EXIT_REASON_PAUSE_INSTRUCTION 40
278#define EXIT_REASON_MCE_DURING_VMENTRY 41
279#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
280#define EXIT_REASON_APIC_ACCESS 44
281#define EXIT_REASON_EPT_VIOLATION 48
282#define EXIT_REASON_EPT_MISCONFIG 49
283#define EXIT_REASON_WBINVD 54
284#define EXIT_REASON_XSETBV 55
285#define EXIT_REASON_INVPCID 58
286
287/* 326/*
288 * Interruption-information format 327 * Interruption-information format
289 */ 328 */
@@ -488,3 +527,5 @@ enum vm_instruction_error_number {
488}; 527};
489 528
490#endif 529#endif
530
531#endif
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 38155f667144..57693498519c 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -81,12 +81,13 @@ struct x86_init_mapping {
81 81
82/** 82/**
83 * struct x86_init_paging - platform specific paging functions 83 * struct x86_init_paging - platform specific paging functions
84 * @pagetable_setup_start: platform specific pre paging_init() call 84 * @pagetable_init: platform specific paging initialization call to setup
85 * @pagetable_setup_done: platform specific post paging_init() call 85 * the kernel pagetables and prepare accessors functions.
86 * Callback must call paging_init(). Called once after the
87 * direct mapping for phys memory is available.
86 */ 88 */
87struct x86_init_paging { 89struct x86_init_paging {
88 void (*pagetable_setup_start)(pgd_t *base); 90 void (*pagetable_init)(void);
89 void (*pagetable_setup_done)(pgd_t *base);
90}; 91};
91 92
92/** 93/**
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index cbf0c9d50b92..1707cfa928fb 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -47,6 +47,10 @@
47#endif 47#endif
48 48
49#ifndef __ASSEMBLY__ 49#ifndef __ASSEMBLY__
50/* Explicitly size integers that represent pfns in the public interface
51 * with Xen so that on ARM we can have one ABI that works for 32 and 64
52 * bit guests. */
53typedef unsigned long xen_pfn_t;
50/* Guest handles for primitive C types. */ 54/* Guest handles for primitive C types. */
51__DEFINE_GUEST_HANDLE(uchar, unsigned char); 55__DEFINE_GUEST_HANDLE(uchar, unsigned char);
52__DEFINE_GUEST_HANDLE(uint, unsigned int); 56__DEFINE_GUEST_HANDLE(uint, unsigned int);
@@ -57,6 +61,7 @@ DEFINE_GUEST_HANDLE(long);
57DEFINE_GUEST_HANDLE(void); 61DEFINE_GUEST_HANDLE(void);
58DEFINE_GUEST_HANDLE(uint64_t); 62DEFINE_GUEST_HANDLE(uint64_t);
59DEFINE_GUEST_HANDLE(uint32_t); 63DEFINE_GUEST_HANDLE(uint32_t);
64DEFINE_GUEST_HANDLE(xen_pfn_t);
60#endif 65#endif
61 66
62#ifndef HYPERVISOR_VIRT_START 67#ifndef HYPERVISOR_VIRT_START
@@ -116,11 +121,13 @@ struct arch_shared_info {
116#endif /* !__ASSEMBLY__ */ 121#endif /* !__ASSEMBLY__ */
117 122
118#ifdef CONFIG_X86_32 123#ifdef CONFIG_X86_32
119#include "interface_32.h" 124#include <asm/xen/interface_32.h>
120#else 125#else
121#include "interface_64.h" 126#include <asm/xen/interface_64.h>
122#endif 127#endif
123 128
129#include <asm/pvclock-abi.h>
130
124#ifndef __ASSEMBLY__ 131#ifndef __ASSEMBLY__
125/* 132/*
126 * The following is all CPU context. Note that the fpu_ctxt block is filled 133 * The following is all CPU context. Note that the fpu_ctxt block is filled
diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h b/arch/x86/include/asm/xen/swiotlb-xen.h
index 1be1ab7d6a41..ee52fcac6f72 100644
--- a/arch/x86/include/asm/xen/swiotlb-xen.h
+++ b/arch/x86/include/asm/xen/swiotlb-xen.h
@@ -5,10 +5,12 @@
5extern int xen_swiotlb; 5extern int xen_swiotlb;
6extern int __init pci_xen_swiotlb_detect(void); 6extern int __init pci_xen_swiotlb_detect(void);
7extern void __init pci_xen_swiotlb_init(void); 7extern void __init pci_xen_swiotlb_init(void);
8extern int pci_xen_swiotlb_init_late(void);
8#else 9#else
9#define xen_swiotlb (0) 10#define xen_swiotlb (0)
10static inline int __init pci_xen_swiotlb_detect(void) { return 0; } 11static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
11static inline void __init pci_xen_swiotlb_init(void) { } 12static inline void __init pci_xen_swiotlb_init(void) { }
13static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
12#endif 14#endif
13 15
14#endif /* _ASM_X86_SWIOTLB_XEN_H */ 16#endif /* _ASM_X86_SWIOTLB_XEN_H */
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h
index 7fcf6f3dbcc3..f8fde90bc45e 100644
--- a/arch/x86/include/asm/xor.h
+++ b/arch/x86/include/asm/xor.h
@@ -3,8 +3,8 @@
3# include <asm-generic/xor.h> 3# include <asm-generic/xor.h>
4#else 4#else
5#ifdef CONFIG_X86_32 5#ifdef CONFIG_X86_32
6# include "xor_32.h" 6# include <asm/xor_32.h>
7#else 7#else
8# include "xor_64.h" 8# include <asm/xor_64.h>
9#endif 9#endif
10#endif 10#endif
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index 454570891bdc..f79cb7ec0e06 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -534,38 +534,6 @@ static struct xor_block_template xor_block_p5_mmx = {
534 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) 534 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
535 */ 535 */
536 536
537#define XMMS_SAVE \
538do { \
539 preempt_disable(); \
540 cr0 = read_cr0(); \
541 clts(); \
542 asm volatile( \
543 "movups %%xmm0,(%0) ;\n\t" \
544 "movups %%xmm1,0x10(%0) ;\n\t" \
545 "movups %%xmm2,0x20(%0) ;\n\t" \
546 "movups %%xmm3,0x30(%0) ;\n\t" \
547 : \
548 : "r" (xmm_save) \
549 : "memory"); \
550} while (0)
551
552#define XMMS_RESTORE \
553do { \
554 asm volatile( \
555 "sfence ;\n\t" \
556 "movups (%0),%%xmm0 ;\n\t" \
557 "movups 0x10(%0),%%xmm1 ;\n\t" \
558 "movups 0x20(%0),%%xmm2 ;\n\t" \
559 "movups 0x30(%0),%%xmm3 ;\n\t" \
560 : \
561 : "r" (xmm_save) \
562 : "memory"); \
563 write_cr0(cr0); \
564 preempt_enable(); \
565} while (0)
566
567#define ALIGN16 __attribute__((aligned(16)))
568
569#define OFFS(x) "16*("#x")" 537#define OFFS(x) "16*("#x")"
570#define PF_OFFS(x) "256+16*("#x")" 538#define PF_OFFS(x) "256+16*("#x")"
571#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" 539#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
@@ -587,10 +555,8 @@ static void
587xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) 555xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
588{ 556{
589 unsigned long lines = bytes >> 8; 557 unsigned long lines = bytes >> 8;
590 char xmm_save[16*4] ALIGN16;
591 int cr0;
592 558
593 XMMS_SAVE; 559 kernel_fpu_begin();
594 560
595 asm volatile( 561 asm volatile(
596#undef BLOCK 562#undef BLOCK
@@ -633,7 +599,7 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
633 : 599 :
634 : "memory"); 600 : "memory");
635 601
636 XMMS_RESTORE; 602 kernel_fpu_end();
637} 603}
638 604
639static void 605static void
@@ -641,10 +607,8 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
641 unsigned long *p3) 607 unsigned long *p3)
642{ 608{
643 unsigned long lines = bytes >> 8; 609 unsigned long lines = bytes >> 8;
644 char xmm_save[16*4] ALIGN16;
645 int cr0;
646 610
647 XMMS_SAVE; 611 kernel_fpu_begin();
648 612
649 asm volatile( 613 asm volatile(
650#undef BLOCK 614#undef BLOCK
@@ -694,7 +658,7 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
694 : 658 :
695 : "memory" ); 659 : "memory" );
696 660
697 XMMS_RESTORE; 661 kernel_fpu_end();
698} 662}
699 663
700static void 664static void
@@ -702,10 +666,8 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
702 unsigned long *p3, unsigned long *p4) 666 unsigned long *p3, unsigned long *p4)
703{ 667{
704 unsigned long lines = bytes >> 8; 668 unsigned long lines = bytes >> 8;
705 char xmm_save[16*4] ALIGN16;
706 int cr0;
707 669
708 XMMS_SAVE; 670 kernel_fpu_begin();
709 671
710 asm volatile( 672 asm volatile(
711#undef BLOCK 673#undef BLOCK
@@ -762,7 +724,7 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
762 : 724 :
763 : "memory" ); 725 : "memory" );
764 726
765 XMMS_RESTORE; 727 kernel_fpu_end();
766} 728}
767 729
768static void 730static void
@@ -770,10 +732,8 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
770 unsigned long *p3, unsigned long *p4, unsigned long *p5) 732 unsigned long *p3, unsigned long *p4, unsigned long *p5)
771{ 733{
772 unsigned long lines = bytes >> 8; 734 unsigned long lines = bytes >> 8;
773 char xmm_save[16*4] ALIGN16;
774 int cr0;
775 735
776 XMMS_SAVE; 736 kernel_fpu_begin();
777 737
778 /* Make sure GCC forgets anything it knows about p4 or p5, 738 /* Make sure GCC forgets anything it knows about p4 or p5,
779 such that it won't pass to the asm volatile below a 739 such that it won't pass to the asm volatile below a
@@ -850,7 +810,7 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
850 like assuming they have some legal value. */ 810 like assuming they have some legal value. */
851 asm("" : "=r" (p4), "=r" (p5)); 811 asm("" : "=r" (p4), "=r" (p5));
852 812
853 XMMS_RESTORE; 813 kernel_fpu_end();
854} 814}
855 815
856static struct xor_block_template xor_block_pIII_sse = { 816static struct xor_block_template xor_block_pIII_sse = {
@@ -862,7 +822,7 @@ static struct xor_block_template xor_block_pIII_sse = {
862}; 822};
863 823
864/* Also try the AVX routines */ 824/* Also try the AVX routines */
865#include "xor_avx.h" 825#include <asm/xor_avx.h>
866 826
867/* Also try the generic routines. */ 827/* Also try the generic routines. */
868#include <asm-generic/xor.h> 828#include <asm-generic/xor.h>
diff --git a/arch/x86/include/asm/xor_64.h b/arch/x86/include/asm/xor_64.h
index b9b2323e90fe..87ac522c4af5 100644
--- a/arch/x86/include/asm/xor_64.h
+++ b/arch/x86/include/asm/xor_64.h
@@ -34,41 +34,7 @@
34 * no advantages to be gotten from x86-64 here anyways. 34 * no advantages to be gotten from x86-64 here anyways.
35 */ 35 */
36 36
37typedef struct { 37#include <asm/i387.h>
38 unsigned long a, b;
39} __attribute__((aligned(16))) xmm_store_t;
40
41/* Doesn't use gcc to save the XMM registers, because there is no easy way to
42 tell it to do a clts before the register saving. */
43#define XMMS_SAVE \
44do { \
45 preempt_disable(); \
46 asm volatile( \
47 "movq %%cr0,%0 ;\n\t" \
48 "clts ;\n\t" \
49 "movups %%xmm0,(%1) ;\n\t" \
50 "movups %%xmm1,0x10(%1) ;\n\t" \
51 "movups %%xmm2,0x20(%1) ;\n\t" \
52 "movups %%xmm3,0x30(%1) ;\n\t" \
53 : "=&r" (cr0) \
54 : "r" (xmm_save) \
55 : "memory"); \
56} while (0)
57
58#define XMMS_RESTORE \
59do { \
60 asm volatile( \
61 "sfence ;\n\t" \
62 "movups (%1),%%xmm0 ;\n\t" \
63 "movups 0x10(%1),%%xmm1 ;\n\t" \
64 "movups 0x20(%1),%%xmm2 ;\n\t" \
65 "movups 0x30(%1),%%xmm3 ;\n\t" \
66 "movq %0,%%cr0 ;\n\t" \
67 : \
68 : "r" (cr0), "r" (xmm_save) \
69 : "memory"); \
70 preempt_enable(); \
71} while (0)
72 38
73#define OFFS(x) "16*("#x")" 39#define OFFS(x) "16*("#x")"
74#define PF_OFFS(x) "256+16*("#x")" 40#define PF_OFFS(x) "256+16*("#x")"
@@ -91,10 +57,8 @@ static void
91xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) 57xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
92{ 58{
93 unsigned int lines = bytes >> 8; 59 unsigned int lines = bytes >> 8;
94 unsigned long cr0;
95 xmm_store_t xmm_save[4];
96 60
97 XMMS_SAVE; 61 kernel_fpu_begin();
98 62
99 asm volatile( 63 asm volatile(
100#undef BLOCK 64#undef BLOCK
@@ -135,7 +99,7 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
135 : [inc] "r" (256UL) 99 : [inc] "r" (256UL)
136 : "memory"); 100 : "memory");
137 101
138 XMMS_RESTORE; 102 kernel_fpu_end();
139} 103}
140 104
141static void 105static void
@@ -143,11 +107,8 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
143 unsigned long *p3) 107 unsigned long *p3)
144{ 108{
145 unsigned int lines = bytes >> 8; 109 unsigned int lines = bytes >> 8;
146 xmm_store_t xmm_save[4];
147 unsigned long cr0;
148
149 XMMS_SAVE;
150 110
111 kernel_fpu_begin();
151 asm volatile( 112 asm volatile(
152#undef BLOCK 113#undef BLOCK
153#define BLOCK(i) \ 114#define BLOCK(i) \
@@ -194,7 +155,7 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
194 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) 155 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
195 : [inc] "r" (256UL) 156 : [inc] "r" (256UL)
196 : "memory"); 157 : "memory");
197 XMMS_RESTORE; 158 kernel_fpu_end();
198} 159}
199 160
200static void 161static void
@@ -202,10 +163,8 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
202 unsigned long *p3, unsigned long *p4) 163 unsigned long *p3, unsigned long *p4)
203{ 164{
204 unsigned int lines = bytes >> 8; 165 unsigned int lines = bytes >> 8;
205 xmm_store_t xmm_save[4];
206 unsigned long cr0;
207 166
208 XMMS_SAVE; 167 kernel_fpu_begin();
209 168
210 asm volatile( 169 asm volatile(
211#undef BLOCK 170#undef BLOCK
@@ -261,7 +220,7 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
261 : [inc] "r" (256UL) 220 : [inc] "r" (256UL)
262 : "memory" ); 221 : "memory" );
263 222
264 XMMS_RESTORE; 223 kernel_fpu_end();
265} 224}
266 225
267static void 226static void
@@ -269,10 +228,8 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
269 unsigned long *p3, unsigned long *p4, unsigned long *p5) 228 unsigned long *p3, unsigned long *p4, unsigned long *p5)
270{ 229{
271 unsigned int lines = bytes >> 8; 230 unsigned int lines = bytes >> 8;
272 xmm_store_t xmm_save[4];
273 unsigned long cr0;
274 231
275 XMMS_SAVE; 232 kernel_fpu_begin();
276 233
277 asm volatile( 234 asm volatile(
278#undef BLOCK 235#undef BLOCK
@@ -336,7 +293,7 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
336 : [inc] "r" (256UL) 293 : [inc] "r" (256UL)
337 : "memory"); 294 : "memory");
338 295
339 XMMS_RESTORE; 296 kernel_fpu_end();
340} 297}
341 298
342static struct xor_block_template xor_block_sse = { 299static struct xor_block_template xor_block_sse = {
@@ -349,7 +306,7 @@ static struct xor_block_template xor_block_sse = {
349 306
350 307
351/* Also try the AVX routines */ 308/* Also try the AVX routines */
352#include "xor_avx.h" 309#include <asm/xor_avx.h>
353 310
354#undef XOR_TRY_TEMPLATES 311#undef XOR_TRY_TEMPLATES
355#define XOR_TRY_TEMPLATES \ 312#define XOR_TRY_TEMPLATES \
diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h
index 2510d35f480e..7ea79c5fa1f2 100644
--- a/arch/x86/include/asm/xor_avx.h
+++ b/arch/x86/include/asm/xor_avx.h
@@ -20,32 +20,6 @@
20#include <linux/compiler.h> 20#include <linux/compiler.h>
21#include <asm/i387.h> 21#include <asm/i387.h>
22 22
23#define ALIGN32 __aligned(32)
24
25#define YMM_SAVED_REGS 4
26
27#define YMMS_SAVE \
28do { \
29 preempt_disable(); \
30 cr0 = read_cr0(); \
31 clts(); \
32 asm volatile("vmovaps %%ymm0, %0" : "=m" (ymm_save[0]) : : "memory"); \
33 asm volatile("vmovaps %%ymm1, %0" : "=m" (ymm_save[32]) : : "memory"); \
34 asm volatile("vmovaps %%ymm2, %0" : "=m" (ymm_save[64]) : : "memory"); \
35 asm volatile("vmovaps %%ymm3, %0" : "=m" (ymm_save[96]) : : "memory"); \
36} while (0);
37
38#define YMMS_RESTORE \
39do { \
40 asm volatile("sfence" : : : "memory"); \
41 asm volatile("vmovaps %0, %%ymm3" : : "m" (ymm_save[96])); \
42 asm volatile("vmovaps %0, %%ymm2" : : "m" (ymm_save[64])); \
43 asm volatile("vmovaps %0, %%ymm1" : : "m" (ymm_save[32])); \
44 asm volatile("vmovaps %0, %%ymm0" : : "m" (ymm_save[0])); \
45 write_cr0(cr0); \
46 preempt_enable(); \
47} while (0);
48
49#define BLOCK4(i) \ 23#define BLOCK4(i) \
50 BLOCK(32 * i, 0) \ 24 BLOCK(32 * i, 0) \
51 BLOCK(32 * (i + 1), 1) \ 25 BLOCK(32 * (i + 1), 1) \
@@ -60,10 +34,9 @@ do { \
60 34
61static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1) 35static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1)
62{ 36{
63 unsigned long cr0, lines = bytes >> 9; 37 unsigned long lines = bytes >> 9;
64 char ymm_save[32 * YMM_SAVED_REGS] ALIGN32;
65 38
66 YMMS_SAVE 39 kernel_fpu_begin();
67 40
68 while (lines--) { 41 while (lines--) {
69#undef BLOCK 42#undef BLOCK
@@ -82,16 +55,15 @@ do { \
82 p1 = (unsigned long *)((uintptr_t)p1 + 512); 55 p1 = (unsigned long *)((uintptr_t)p1 + 512);
83 } 56 }
84 57
85 YMMS_RESTORE 58 kernel_fpu_end();
86} 59}
87 60
88static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1, 61static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1,
89 unsigned long *p2) 62 unsigned long *p2)
90{ 63{
91 unsigned long cr0, lines = bytes >> 9; 64 unsigned long lines = bytes >> 9;
92 char ymm_save[32 * YMM_SAVED_REGS] ALIGN32;
93 65
94 YMMS_SAVE 66 kernel_fpu_begin();
95 67
96 while (lines--) { 68 while (lines--) {
97#undef BLOCK 69#undef BLOCK
@@ -113,16 +85,15 @@ do { \
113 p2 = (unsigned long *)((uintptr_t)p2 + 512); 85 p2 = (unsigned long *)((uintptr_t)p2 + 512);
114 } 86 }
115 87
116 YMMS_RESTORE 88 kernel_fpu_end();
117} 89}
118 90
119static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1, 91static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1,
120 unsigned long *p2, unsigned long *p3) 92 unsigned long *p2, unsigned long *p3)
121{ 93{
122 unsigned long cr0, lines = bytes >> 9; 94 unsigned long lines = bytes >> 9;
123 char ymm_save[32 * YMM_SAVED_REGS] ALIGN32;
124 95
125 YMMS_SAVE 96 kernel_fpu_begin();
126 97
127 while (lines--) { 98 while (lines--) {
128#undef BLOCK 99#undef BLOCK
@@ -147,16 +118,15 @@ do { \
147 p3 = (unsigned long *)((uintptr_t)p3 + 512); 118 p3 = (unsigned long *)((uintptr_t)p3 + 512);
148 } 119 }
149 120
150 YMMS_RESTORE 121 kernel_fpu_end();
151} 122}
152 123
153static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1, 124static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1,
154 unsigned long *p2, unsigned long *p3, unsigned long *p4) 125 unsigned long *p2, unsigned long *p3, unsigned long *p4)
155{ 126{
156 unsigned long cr0, lines = bytes >> 9; 127 unsigned long lines = bytes >> 9;
157 char ymm_save[32 * YMM_SAVED_REGS] ALIGN32;
158 128
159 YMMS_SAVE 129 kernel_fpu_begin();
160 130
161 while (lines--) { 131 while (lines--) {
162#undef BLOCK 132#undef BLOCK
@@ -184,7 +154,7 @@ do { \
184 p4 = (unsigned long *)((uintptr_t)p4 + 512); 154 p4 = (unsigned long *)((uintptr_t)p4 + 512);
185 } 155 }
186 156
187 YMMS_RESTORE 157 kernel_fpu_end();
188} 158}
189 159
190static struct xor_block_template xor_block_avx = { 160static struct xor_block_template xor_block_avx = {
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 8a1b6f9b594a..0415cdabb5a6 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -34,17 +34,14 @@
34extern unsigned int xstate_size; 34extern unsigned int xstate_size;
35extern u64 pcntxt_mask; 35extern u64 pcntxt_mask;
36extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; 36extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
37extern struct xsave_struct *init_xstate_buf;
37 38
38extern void xsave_init(void); 39extern void xsave_init(void);
39extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); 40extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
40extern int init_fpu(struct task_struct *child); 41extern int init_fpu(struct task_struct *child);
41extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
42 void __user *fpstate,
43 struct _fpx_sw_bytes *sw);
44 42
45static inline int fpu_xrstor_checking(struct fpu *fpu) 43static inline int fpu_xrstor_checking(struct xsave_struct *fx)
46{ 44{
47 struct xsave_struct *fx = &fpu->state->xsave;
48 int err; 45 int err;
49 46
50 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" 47 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
@@ -69,13 +66,13 @@ static inline int xsave_user(struct xsave_struct __user *buf)
69 * Clear the xsave header first, so that reserved fields are 66 * Clear the xsave header first, so that reserved fields are
70 * initialized to zero. 67 * initialized to zero.
71 */ 68 */
72 err = __clear_user(&buf->xsave_hdr, 69 err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));
73 sizeof(struct xsave_hdr_struct));
74 if (unlikely(err)) 70 if (unlikely(err))
75 return -EFAULT; 71 return -EFAULT;
76 72
77 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" 73 __asm__ __volatile__(ASM_STAC "\n"
78 "2:\n" 74 "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
75 "2: " ASM_CLAC "\n"
79 ".section .fixup,\"ax\"\n" 76 ".section .fixup,\"ax\"\n"
80 "3: movl $-1,%[err]\n" 77 "3: movl $-1,%[err]\n"
81 " jmp 2b\n" 78 " jmp 2b\n"
@@ -84,9 +81,6 @@ static inline int xsave_user(struct xsave_struct __user *buf)
84 : [err] "=r" (err) 81 : [err] "=r" (err)
85 : "D" (buf), "a" (-1), "d" (-1), "0" (0) 82 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
86 : "memory"); 83 : "memory");
87 if (unlikely(err) && __clear_user(buf, xstate_size))
88 err = -EFAULT;
89 /* No need to clear here because the caller clears USED_MATH */
90 return err; 84 return err;
91} 85}
92 86
@@ -97,8 +91,9 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
97 u32 lmask = mask; 91 u32 lmask = mask;
98 u32 hmask = mask >> 32; 92 u32 hmask = mask >> 32;
99 93
100 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" 94 __asm__ __volatile__(ASM_STAC "\n"
101 "2:\n" 95 "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
96 "2: " ASM_CLAC "\n"
102 ".section .fixup,\"ax\"\n" 97 ".section .fixup,\"ax\"\n"
103 "3: movl $-1,%[err]\n" 98 "3: movl $-1,%[err]\n"
104 " jmp 2b\n" 99 " jmp 2b\n"
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
new file mode 100644
index 000000000000..83b6e9a0dce4
--- /dev/null
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -0,0 +1,6 @@
1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm
3
4genhdr-y += unistd_32.h
5genhdr-y += unistd_64.h
6genhdr-y += unistd_x32.h