diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2012-10-19 15:19:19 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2012-10-19 15:19:19 -0400 |
commit | e05dacd71db0a5da7c1a44bcaab2a8a240b9c233 (patch) | |
tree | 31382cf1c7d62c03126448affb2fc86e8c4aaa8b /arch/x86/include | |
parent | 3ab0b83bf6a1e834f4b884150d8012990c75d25d (diff) | |
parent | ddffeb8c4d0331609ef2581d84de4d763607bd37 (diff) |
Merge commit 'v3.7-rc1' into stable/for-linus-3.7
* commit 'v3.7-rc1': (10892 commits)
Linux 3.7-rc1
x86, boot: Explicitly include autoconf.h for hostprogs
perf: Fix UAPI fallout
ARM: config: make sure that platforms are ordered by option string
ARM: config: sort select statements alphanumerically
UAPI: (Scripted) Disintegrate include/linux/byteorder
UAPI: (Scripted) Disintegrate include/linux
UAPI: Unexport linux/blk_types.h
UAPI: Unexport part of linux/ppp-comp.h
perf: Handle new rbtree implementation
procfs: don't need a PATH_MAX allocation to hold a string representation of an int
vfs: embed struct filename inside of names_cache allocation if possible
audit: make audit_inode take struct filename
vfs: make path_openat take a struct filename pointer
vfs: turn do_path_lookup into wrapper around struct filename variant
audit: allow audit code to satisfy getname requests from its names_list
vfs: define struct filename and have getname() return it
btrfs: Fix compilation with user namespace support enabled
userns: Fix posix_acl_file_xattr_userns gid conversion
userns: Properly print bluetooth socket uids
...
Diffstat (limited to 'arch/x86/include')
68 files changed, 1089 insertions, 750 deletions
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index f9c0d3ba9e84..66e5f0ef0523 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild | |||
@@ -26,3 +26,5 @@ header-y += vsyscall.h | |||
26 | genhdr-y += unistd_32.h | 26 | genhdr-y += unistd_32.h |
27 | genhdr-y += unistd_64.h | 27 | genhdr-y += unistd_64.h |
28 | genhdr-y += unistd_x32.h | 28 | genhdr-y += unistd_x32.h |
29 | |||
30 | generic-y += clkdev.h | ||
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index 952bd0100c5c..372231c22a47 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h | |||
@@ -1,3 +1,6 @@ | |||
1 | #ifndef _ASM_X86_ALTERNATIVE_ASM_H | ||
2 | #define _ASM_X86_ALTERNATIVE_ASM_H | ||
3 | |||
1 | #ifdef __ASSEMBLY__ | 4 | #ifdef __ASSEMBLY__ |
2 | 5 | ||
3 | #include <asm/asm.h> | 6 | #include <asm/asm.h> |
@@ -5,10 +8,10 @@ | |||
5 | #ifdef CONFIG_SMP | 8 | #ifdef CONFIG_SMP |
6 | .macro LOCK_PREFIX | 9 | .macro LOCK_PREFIX |
7 | 672: lock | 10 | 672: lock |
8 | .section .smp_locks,"a" | 11 | .pushsection .smp_locks,"a" |
9 | .balign 4 | 12 | .balign 4 |
10 | .long 672b - . | 13 | .long 672b - . |
11 | .previous | 14 | .popsection |
12 | .endm | 15 | .endm |
13 | #else | 16 | #else |
14 | .macro LOCK_PREFIX | 17 | .macro LOCK_PREFIX |
@@ -24,3 +27,5 @@ | |||
24 | .endm | 27 | .endm |
25 | 28 | ||
26 | #endif /* __ASSEMBLY__ */ | 29 | #endif /* __ASSEMBLY__ */ |
30 | |||
31 | #endif /* _ASM_X86_ALTERNATIVE_ASM_H */ | ||
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 70780689599a..58ed6d96a6ac 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -29,10 +29,10 @@ | |||
29 | 29 | ||
30 | #ifdef CONFIG_SMP | 30 | #ifdef CONFIG_SMP |
31 | #define LOCK_PREFIX_HERE \ | 31 | #define LOCK_PREFIX_HERE \ |
32 | ".section .smp_locks,\"a\"\n" \ | 32 | ".pushsection .smp_locks,\"a\"\n" \ |
33 | ".balign 4\n" \ | 33 | ".balign 4\n" \ |
34 | ".long 671f - .\n" /* offset */ \ | 34 | ".long 671f - .\n" /* offset */ \ |
35 | ".previous\n" \ | 35 | ".popsection\n" \ |
36 | "671:" | 36 | "671:" |
37 | 37 | ||
38 | #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " | 38 | #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " |
@@ -60,7 +60,7 @@ extern void alternatives_smp_module_add(struct module *mod, char *name, | |||
60 | void *locks, void *locks_end, | 60 | void *locks, void *locks_end, |
61 | void *text, void *text_end); | 61 | void *text, void *text_end); |
62 | extern void alternatives_smp_module_del(struct module *mod); | 62 | extern void alternatives_smp_module_del(struct module *mod); |
63 | extern void alternatives_smp_switch(int smp); | 63 | extern void alternatives_enable_smp(void); |
64 | extern int alternatives_text_reserved(void *start, void *end); | 64 | extern int alternatives_text_reserved(void *start, void *end); |
65 | extern bool skip_smp_alternatives; | 65 | extern bool skip_smp_alternatives; |
66 | #else | 66 | #else |
@@ -68,7 +68,7 @@ static inline void alternatives_smp_module_add(struct module *mod, char *name, | |||
68 | void *locks, void *locks_end, | 68 | void *locks, void *locks_end, |
69 | void *text, void *text_end) {} | 69 | void *text, void *text_end) {} |
70 | static inline void alternatives_smp_module_del(struct module *mod) {} | 70 | static inline void alternatives_smp_module_del(struct module *mod) {} |
71 | static inline void alternatives_smp_switch(int smp) {} | 71 | static inline void alternatives_enable_smp(void) {} |
72 | static inline int alternatives_text_reserved(void *start, void *end) | 72 | static inline int alternatives_text_reserved(void *start, void *end) |
73 | { | 73 | { |
74 | return 0; | 74 | return 0; |
@@ -99,30 +99,30 @@ static inline int alternatives_text_reserved(void *start, void *end) | |||
99 | /* alternative assembly primitive: */ | 99 | /* alternative assembly primitive: */ |
100 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ | 100 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ |
101 | OLDINSTR(oldinstr) \ | 101 | OLDINSTR(oldinstr) \ |
102 | ".section .altinstructions,\"a\"\n" \ | 102 | ".pushsection .altinstructions,\"a\"\n" \ |
103 | ALTINSTR_ENTRY(feature, 1) \ | 103 | ALTINSTR_ENTRY(feature, 1) \ |
104 | ".previous\n" \ | 104 | ".popsection\n" \ |
105 | ".section .discard,\"aw\",@progbits\n" \ | 105 | ".pushsection .discard,\"aw\",@progbits\n" \ |
106 | DISCARD_ENTRY(1) \ | 106 | DISCARD_ENTRY(1) \ |
107 | ".previous\n" \ | 107 | ".popsection\n" \ |
108 | ".section .altinstr_replacement, \"ax\"\n" \ | 108 | ".pushsection .altinstr_replacement, \"ax\"\n" \ |
109 | ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ | 109 | ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ |
110 | ".previous" | 110 | ".popsection" |
111 | 111 | ||
112 | #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ | 112 | #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ |
113 | OLDINSTR(oldinstr) \ | 113 | OLDINSTR(oldinstr) \ |
114 | ".section .altinstructions,\"a\"\n" \ | 114 | ".pushsection .altinstructions,\"a\"\n" \ |
115 | ALTINSTR_ENTRY(feature1, 1) \ | 115 | ALTINSTR_ENTRY(feature1, 1) \ |
116 | ALTINSTR_ENTRY(feature2, 2) \ | 116 | ALTINSTR_ENTRY(feature2, 2) \ |
117 | ".previous\n" \ | 117 | ".popsection\n" \ |
118 | ".section .discard,\"aw\",@progbits\n" \ | 118 | ".pushsection .discard,\"aw\",@progbits\n" \ |
119 | DISCARD_ENTRY(1) \ | 119 | DISCARD_ENTRY(1) \ |
120 | DISCARD_ENTRY(2) \ | 120 | DISCARD_ENTRY(2) \ |
121 | ".previous\n" \ | 121 | ".popsection\n" \ |
122 | ".section .altinstr_replacement, \"ax\"\n" \ | 122 | ".pushsection .altinstr_replacement, \"ax\"\n" \ |
123 | ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ | 123 | ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ |
124 | ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ | 124 | ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ |
125 | ".previous" | 125 | ".popsection" |
126 | 126 | ||
127 | /* | 127 | /* |
128 | * This must be included *after* the definition of ALTERNATIVE due to | 128 | * This must be included *after* the definition of ALTERNATIVE due to |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index f34261296ffb..338803422239 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -409,7 +409,7 @@ extern struct apic *apic; | |||
409 | * to enforce the order with in them. | 409 | * to enforce the order with in them. |
410 | */ | 410 | */ |
411 | #define apic_driver(sym) \ | 411 | #define apic_driver(sym) \ |
412 | static struct apic *__apicdrivers_##sym __used \ | 412 | static const struct apic *__apicdrivers_##sym __used \ |
413 | __aligned(sizeof(struct apic *)) \ | 413 | __aligned(sizeof(struct apic *)) \ |
414 | __section(.apicdrivers) = { &sym } | 414 | __section(.apicdrivers) = { &sym } |
415 | 415 | ||
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 58cb6d4085f7..b6c3b821acf6 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
@@ -240,30 +240,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
240 | return c; | 240 | return c; |
241 | } | 241 | } |
242 | 242 | ||
243 | |||
244 | /* | ||
245 | * atomic_dec_if_positive - decrement by 1 if old value positive | ||
246 | * @v: pointer of type atomic_t | ||
247 | * | ||
248 | * The function returns the old value of *v minus 1, even if | ||
249 | * the atomic variable, v, was not decremented. | ||
250 | */ | ||
251 | static inline int atomic_dec_if_positive(atomic_t *v) | ||
252 | { | ||
253 | int c, old, dec; | ||
254 | c = atomic_read(v); | ||
255 | for (;;) { | ||
256 | dec = c - 1; | ||
257 | if (unlikely(dec < 0)) | ||
258 | break; | ||
259 | old = atomic_cmpxchg((v), c, dec); | ||
260 | if (likely(old == c)) | ||
261 | break; | ||
262 | c = old; | ||
263 | } | ||
264 | return dec; | ||
265 | } | ||
266 | |||
267 | /** | 243 | /** |
268 | * atomic_inc_short - increment of a short integer | 244 | * atomic_inc_short - increment of a short integer |
269 | * @v: pointer to type int | 245 | * @v: pointer to type int |
@@ -309,9 +285,9 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) | |||
309 | #define smp_mb__after_atomic_inc() barrier() | 285 | #define smp_mb__after_atomic_inc() barrier() |
310 | 286 | ||
311 | #ifdef CONFIG_X86_32 | 287 | #ifdef CONFIG_X86_32 |
312 | # include "atomic64_32.h" | 288 | # include <asm/atomic64_32.h> |
313 | #else | 289 | #else |
314 | # include "atomic64_64.h" | 290 | # include <asm/atomic64_64.h> |
315 | #endif | 291 | #endif |
316 | 292 | ||
317 | #endif /* _ASM_X86_ATOMIC_H */ | 293 | #endif /* _ASM_X86_ATOMIC_H */ |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 72f5009deb5a..6dfd0195bb55 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -355,7 +355,7 @@ static int test_bit(int nr, const volatile unsigned long *addr); | |||
355 | */ | 355 | */ |
356 | static inline unsigned long __ffs(unsigned long word) | 356 | static inline unsigned long __ffs(unsigned long word) |
357 | { | 357 | { |
358 | asm("bsf %1,%0" | 358 | asm("rep; bsf %1,%0" |
359 | : "=r" (word) | 359 | : "=r" (word) |
360 | : "rm" (word)); | 360 | : "rm" (word)); |
361 | return word; | 361 | return word; |
@@ -369,7 +369,7 @@ static inline unsigned long __ffs(unsigned long word) | |||
369 | */ | 369 | */ |
370 | static inline unsigned long ffz(unsigned long word) | 370 | static inline unsigned long ffz(unsigned long word) |
371 | { | 371 | { |
372 | asm("bsf %1,%0" | 372 | asm("rep; bsf %1,%0" |
373 | : "=r" (word) | 373 | : "=r" (word) |
374 | : "r" (~word)); | 374 | : "r" (~word)); |
375 | return word; | 375 | return word; |
@@ -417,10 +417,9 @@ static inline int ffs(int x) | |||
417 | * We cannot do this on 32 bits because at the very least some | 417 | * We cannot do this on 32 bits because at the very least some |
418 | * 486 CPUs did not behave this way. | 418 | * 486 CPUs did not behave this way. |
419 | */ | 419 | */ |
420 | long tmp = -1; | ||
421 | asm("bsfl %1,%0" | 420 | asm("bsfl %1,%0" |
422 | : "=r" (r) | 421 | : "=r" (r) |
423 | : "rm" (x), "0" (tmp)); | 422 | : "rm" (x), "0" (-1)); |
424 | #elif defined(CONFIG_X86_CMOV) | 423 | #elif defined(CONFIG_X86_CMOV) |
425 | asm("bsfl %1,%0\n\t" | 424 | asm("bsfl %1,%0\n\t" |
426 | "cmovzl %2,%0" | 425 | "cmovzl %2,%0" |
@@ -459,10 +458,9 @@ static inline int fls(int x) | |||
459 | * We cannot do this on 32 bits because at the very least some | 458 | * We cannot do this on 32 bits because at the very least some |
460 | * 486 CPUs did not behave this way. | 459 | * 486 CPUs did not behave this way. |
461 | */ | 460 | */ |
462 | long tmp = -1; | ||
463 | asm("bsrl %1,%0" | 461 | asm("bsrl %1,%0" |
464 | : "=r" (r) | 462 | : "=r" (r) |
465 | : "rm" (x), "0" (tmp)); | 463 | : "rm" (x), "0" (-1)); |
466 | #elif defined(CONFIG_X86_CMOV) | 464 | #elif defined(CONFIG_X86_CMOV) |
467 | asm("bsrl %1,%0\n\t" | 465 | asm("bsrl %1,%0\n\t" |
468 | "cmovzl %2,%0" | 466 | "cmovzl %2,%0" |
@@ -490,13 +488,13 @@ static inline int fls(int x) | |||
490 | #ifdef CONFIG_X86_64 | 488 | #ifdef CONFIG_X86_64 |
491 | static __always_inline int fls64(__u64 x) | 489 | static __always_inline int fls64(__u64 x) |
492 | { | 490 | { |
493 | long bitpos = -1; | 491 | int bitpos = -1; |
494 | /* | 492 | /* |
495 | * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the | 493 | * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the |
496 | * dest reg is undefined if x==0, but their CPU architect says its | 494 | * dest reg is undefined if x==0, but their CPU architect says its |
497 | * value is written to set it to the same as before. | 495 | * value is written to set it to the same as before. |
498 | */ | 496 | */ |
499 | asm("bsrq %1,%0" | 497 | asm("bsrq %1,%q0" |
500 | : "+r" (bitpos) | 498 | : "+r" (bitpos) |
501 | : "rm" (x)); | 499 | : "rm" (x)); |
502 | return bitpos + 1; | 500 | return bitpos + 1; |
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h index a9e3a740f697..0fa675033912 100644 --- a/arch/x86/include/asm/calling.h +++ b/arch/x86/include/asm/calling.h | |||
@@ -46,41 +46,39 @@ For 32-bit we have the following conventions - kernel is built with | |||
46 | 46 | ||
47 | */ | 47 | */ |
48 | 48 | ||
49 | #include "dwarf2.h" | 49 | #include <asm/dwarf2.h> |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * 64-bit system call stack frame layout defines and helpers, for | 52 | * 64-bit system call stack frame layout defines and helpers, |
53 | * assembly code (note that the seemingly unnecessary parentheses | 53 | * for assembly code: |
54 | * are to prevent cpp from inserting spaces in expressions that get | ||
55 | * passed to macros): | ||
56 | */ | 54 | */ |
57 | 55 | ||
58 | #define R15 (0) | 56 | #define R15 0 |
59 | #define R14 (8) | 57 | #define R14 8 |
60 | #define R13 (16) | 58 | #define R13 16 |
61 | #define R12 (24) | 59 | #define R12 24 |
62 | #define RBP (32) | 60 | #define RBP 32 |
63 | #define RBX (40) | 61 | #define RBX 40 |
64 | 62 | ||
65 | /* arguments: interrupts/non tracing syscalls only save up to here: */ | 63 | /* arguments: interrupts/non tracing syscalls only save up to here: */ |
66 | #define R11 (48) | 64 | #define R11 48 |
67 | #define R10 (56) | 65 | #define R10 56 |
68 | #define R9 (64) | 66 | #define R9 64 |
69 | #define R8 (72) | 67 | #define R8 72 |
70 | #define RAX (80) | 68 | #define RAX 80 |
71 | #define RCX (88) | 69 | #define RCX 88 |
72 | #define RDX (96) | 70 | #define RDX 96 |
73 | #define RSI (104) | 71 | #define RSI 104 |
74 | #define RDI (112) | 72 | #define RDI 112 |
75 | #define ORIG_RAX (120) /* + error_code */ | 73 | #define ORIG_RAX 120 /* + error_code */ |
76 | /* end of arguments */ | 74 | /* end of arguments */ |
77 | 75 | ||
78 | /* cpu exception frame or undefined in case of fast syscall: */ | 76 | /* cpu exception frame or undefined in case of fast syscall: */ |
79 | #define RIP (128) | 77 | #define RIP 128 |
80 | #define CS (136) | 78 | #define CS 136 |
81 | #define EFLAGS (144) | 79 | #define EFLAGS 144 |
82 | #define RSP (152) | 80 | #define RSP 152 |
83 | #define SS (160) | 81 | #define SS 160 |
84 | 82 | ||
85 | #define ARGOFFSET R11 | 83 | #define ARGOFFSET R11 |
86 | #define SWFRAME ORIG_RAX | 84 | #define SWFRAME ORIG_RAX |
diff --git a/arch/x86/include/asm/checksum.h b/arch/x86/include/asm/checksum.h index 848850fd7d62..5f5bb0f97361 100644 --- a/arch/x86/include/asm/checksum.h +++ b/arch/x86/include/asm/checksum.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifdef CONFIG_X86_32 |
2 | # include "checksum_32.h" | 2 | # include <asm/checksum_32.h> |
3 | #else | 3 | #else |
4 | # include "checksum_64.h" | 4 | # include <asm/checksum_64.h> |
5 | #endif | 5 | #endif |
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 99480e55973d..8d871eaddb66 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h | |||
@@ -138,9 +138,9 @@ extern void __add_wrong_size(void) | |||
138 | __raw_cmpxchg((ptr), (old), (new), (size), "") | 138 | __raw_cmpxchg((ptr), (old), (new), (size), "") |
139 | 139 | ||
140 | #ifdef CONFIG_X86_32 | 140 | #ifdef CONFIG_X86_32 |
141 | # include "cmpxchg_32.h" | 141 | # include <asm/cmpxchg_32.h> |
142 | #else | 142 | #else |
143 | # include "cmpxchg_64.h" | 143 | # include <asm/cmpxchg_64.h> |
144 | #endif | 144 | #endif |
145 | 145 | ||
146 | #ifdef __HAVE_ARCH_CMPXCHG | 146 | #ifdef __HAVE_ARCH_CMPXCHG |
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index fedf32b73e65..59c6c401f79f 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h | |||
@@ -41,6 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64; | |||
41 | typedef u32 compat_uint_t; | 41 | typedef u32 compat_uint_t; |
42 | typedef u32 compat_ulong_t; | 42 | typedef u32 compat_ulong_t; |
43 | typedef u64 __attribute__((aligned(4))) compat_u64; | 43 | typedef u64 __attribute__((aligned(4))) compat_u64; |
44 | typedef u32 compat_uptr_t; | ||
44 | 45 | ||
45 | struct compat_timespec { | 46 | struct compat_timespec { |
46 | compat_time_t tv_sec; | 47 | compat_time_t tv_sec; |
@@ -124,6 +125,78 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */ | |||
124 | 125 | ||
125 | typedef u32 compat_sigset_word; | 126 | typedef u32 compat_sigset_word; |
126 | 127 | ||
128 | typedef union compat_sigval { | ||
129 | compat_int_t sival_int; | ||
130 | compat_uptr_t sival_ptr; | ||
131 | } compat_sigval_t; | ||
132 | |||
133 | typedef struct compat_siginfo { | ||
134 | int si_signo; | ||
135 | int si_errno; | ||
136 | int si_code; | ||
137 | |||
138 | union { | ||
139 | int _pad[128/sizeof(int) - 3]; | ||
140 | |||
141 | /* kill() */ | ||
142 | struct { | ||
143 | unsigned int _pid; /* sender's pid */ | ||
144 | unsigned int _uid; /* sender's uid */ | ||
145 | } _kill; | ||
146 | |||
147 | /* POSIX.1b timers */ | ||
148 | struct { | ||
149 | compat_timer_t _tid; /* timer id */ | ||
150 | int _overrun; /* overrun count */ | ||
151 | compat_sigval_t _sigval; /* same as below */ | ||
152 | int _sys_private; /* not to be passed to user */ | ||
153 | int _overrun_incr; /* amount to add to overrun */ | ||
154 | } _timer; | ||
155 | |||
156 | /* POSIX.1b signals */ | ||
157 | struct { | ||
158 | unsigned int _pid; /* sender's pid */ | ||
159 | unsigned int _uid; /* sender's uid */ | ||
160 | compat_sigval_t _sigval; | ||
161 | } _rt; | ||
162 | |||
163 | /* SIGCHLD */ | ||
164 | struct { | ||
165 | unsigned int _pid; /* which child */ | ||
166 | unsigned int _uid; /* sender's uid */ | ||
167 | int _status; /* exit code */ | ||
168 | compat_clock_t _utime; | ||
169 | compat_clock_t _stime; | ||
170 | } _sigchld; | ||
171 | |||
172 | /* SIGCHLD (x32 version) */ | ||
173 | struct { | ||
174 | unsigned int _pid; /* which child */ | ||
175 | unsigned int _uid; /* sender's uid */ | ||
176 | int _status; /* exit code */ | ||
177 | compat_s64 _utime; | ||
178 | compat_s64 _stime; | ||
179 | } _sigchld_x32; | ||
180 | |||
181 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | ||
182 | struct { | ||
183 | unsigned int _addr; /* faulting insn/memory ref. */ | ||
184 | } _sigfault; | ||
185 | |||
186 | /* SIGPOLL */ | ||
187 | struct { | ||
188 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
189 | int _fd; | ||
190 | } _sigpoll; | ||
191 | |||
192 | struct { | ||
193 | unsigned int _call_addr; /* calling insn */ | ||
194 | int _syscall; /* triggering system call number */ | ||
195 | unsigned int _arch; /* AUDIT_ARCH_* of syscall */ | ||
196 | } _sigsys; | ||
197 | } _sifields; | ||
198 | } compat_siginfo_t; | ||
199 | |||
127 | #define COMPAT_OFF_T_MAX 0x7fffffff | 200 | #define COMPAT_OFF_T_MAX 0x7fffffff |
128 | #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL | 201 | #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL |
129 | 202 | ||
@@ -209,7 +282,6 @@ typedef struct user_regs_struct32 compat_elf_gregset_t; | |||
209 | * as pointers because the syscall entry code will have | 282 | * as pointers because the syscall entry code will have |
210 | * appropriately converted them already. | 283 | * appropriately converted them already. |
211 | */ | 284 | */ |
212 | typedef u32 compat_uptr_t; | ||
213 | 285 | ||
214 | static inline void __user *compat_ptr(compat_uptr_t uptr) | 286 | static inline void __user *compat_ptr(compat_uptr_t uptr) |
215 | { | 287 | { |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 6b7ee5ff6820..8c297aa53eef 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -4,7 +4,9 @@ | |||
4 | #ifndef _ASM_X86_CPUFEATURE_H | 4 | #ifndef _ASM_X86_CPUFEATURE_H |
5 | #define _ASM_X86_CPUFEATURE_H | 5 | #define _ASM_X86_CPUFEATURE_H |
6 | 6 | ||
7 | #ifndef _ASM_X86_REQUIRED_FEATURES_H | ||
7 | #include <asm/required-features.h> | 8 | #include <asm/required-features.h> |
9 | #endif | ||
8 | 10 | ||
9 | #define NCAPINTS 10 /* N 32-bit words worth of info */ | 11 | #define NCAPINTS 10 /* N 32-bit words worth of info */ |
10 | 12 | ||
@@ -97,6 +99,7 @@ | |||
97 | #define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ | 99 | #define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ |
98 | #define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ | 100 | #define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ |
99 | #define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ | 101 | #define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ |
102 | #define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */ | ||
100 | 103 | ||
101 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 104 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
102 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ | 105 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ |
@@ -209,6 +212,7 @@ | |||
209 | #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ | 212 | #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ |
210 | #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ | 213 | #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ |
211 | #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ | 214 | #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ |
215 | #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ | ||
212 | 216 | ||
213 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 217 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
214 | 218 | ||
@@ -299,12 +303,14 @@ extern const char * const x86_power_flags[32]; | |||
299 | #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) | 303 | #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) |
300 | #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) | 304 | #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) |
301 | #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) | 305 | #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) |
306 | #define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT) | ||
302 | #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) | 307 | #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) |
303 | #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) | 308 | #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) |
304 | #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) | 309 | #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) |
305 | #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) | 310 | #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) |
306 | #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) | 311 | #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) |
307 | #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) | 312 | #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) |
313 | #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) | ||
308 | 314 | ||
309 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) | 315 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
310 | # define cpu_has_invlpg 1 | 316 | # define cpu_has_invlpg 1 |
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 75f4c6d6a331..831dbb9c6c02 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
14 | #include <linux/regset.h> | 14 | #include <linux/regset.h> |
15 | #include <linux/compat.h> | ||
15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
16 | #include <asm/asm.h> | 17 | #include <asm/asm.h> |
17 | #include <asm/cpufeature.h> | 18 | #include <asm/cpufeature.h> |
@@ -20,43 +21,76 @@ | |||
20 | #include <asm/user.h> | 21 | #include <asm/user.h> |
21 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
22 | #include <asm/xsave.h> | 23 | #include <asm/xsave.h> |
24 | #include <asm/smap.h> | ||
23 | 25 | ||
24 | extern unsigned int sig_xstate_size; | 26 | #ifdef CONFIG_X86_64 |
27 | # include <asm/sigcontext32.h> | ||
28 | # include <asm/user32.h> | ||
29 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
30 | compat_sigset_t *set, struct pt_regs *regs); | ||
31 | int ia32_setup_frame(int sig, struct k_sigaction *ka, | ||
32 | compat_sigset_t *set, struct pt_regs *regs); | ||
33 | #else | ||
34 | # define user_i387_ia32_struct user_i387_struct | ||
35 | # define user32_fxsr_struct user_fxsr_struct | ||
36 | # define ia32_setup_frame __setup_frame | ||
37 | # define ia32_setup_rt_frame __setup_rt_frame | ||
38 | #endif | ||
39 | |||
40 | extern unsigned int mxcsr_feature_mask; | ||
25 | extern void fpu_init(void); | 41 | extern void fpu_init(void); |
42 | extern void eager_fpu_init(void); | ||
26 | 43 | ||
27 | DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); | 44 | DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); |
28 | 45 | ||
46 | extern void convert_from_fxsr(struct user_i387_ia32_struct *env, | ||
47 | struct task_struct *tsk); | ||
48 | extern void convert_to_fxsr(struct task_struct *tsk, | ||
49 | const struct user_i387_ia32_struct *env); | ||
50 | |||
29 | extern user_regset_active_fn fpregs_active, xfpregs_active; | 51 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
30 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, | 52 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, |
31 | xstateregs_get; | 53 | xstateregs_get; |
32 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | 54 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, |
33 | xstateregs_set; | 55 | xstateregs_set; |
34 | 56 | ||
35 | |||
36 | /* | 57 | /* |
37 | * xstateregs_active == fpregs_active. Please refer to the comment | 58 | * xstateregs_active == fpregs_active. Please refer to the comment |
38 | * at the definition of fpregs_active. | 59 | * at the definition of fpregs_active. |
39 | */ | 60 | */ |
40 | #define xstateregs_active fpregs_active | 61 | #define xstateregs_active fpregs_active |
41 | 62 | ||
42 | extern struct _fpx_sw_bytes fx_sw_reserved; | ||
43 | #ifdef CONFIG_IA32_EMULATION | ||
44 | extern unsigned int sig_xstate_ia32_size; | ||
45 | extern struct _fpx_sw_bytes fx_sw_reserved_ia32; | ||
46 | struct _fpstate_ia32; | ||
47 | struct _xstate_ia32; | ||
48 | extern int save_i387_xstate_ia32(void __user *buf); | ||
49 | extern int restore_i387_xstate_ia32(void __user *buf); | ||
50 | #endif | ||
51 | |||
52 | #ifdef CONFIG_MATH_EMULATION | 63 | #ifdef CONFIG_MATH_EMULATION |
64 | # define HAVE_HWFP (boot_cpu_data.hard_math) | ||
53 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | 65 | extern void finit_soft_fpu(struct i387_soft_struct *soft); |
54 | #else | 66 | #else |
67 | # define HAVE_HWFP 1 | ||
55 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | 68 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} |
56 | #endif | 69 | #endif |
57 | 70 | ||
71 | static inline int is_ia32_compat_frame(void) | ||
72 | { | ||
73 | return config_enabled(CONFIG_IA32_EMULATION) && | ||
74 | test_thread_flag(TIF_IA32); | ||
75 | } | ||
76 | |||
77 | static inline int is_ia32_frame(void) | ||
78 | { | ||
79 | return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame(); | ||
80 | } | ||
81 | |||
82 | static inline int is_x32_frame(void) | ||
83 | { | ||
84 | return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32); | ||
85 | } | ||
86 | |||
58 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | 87 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
59 | 88 | ||
89 | static __always_inline __pure bool use_eager_fpu(void) | ||
90 | { | ||
91 | return static_cpu_has(X86_FEATURE_EAGER_FPU); | ||
92 | } | ||
93 | |||
60 | static __always_inline __pure bool use_xsaveopt(void) | 94 | static __always_inline __pure bool use_xsaveopt(void) |
61 | { | 95 | { |
62 | return static_cpu_has(X86_FEATURE_XSAVEOPT); | 96 | return static_cpu_has(X86_FEATURE_XSAVEOPT); |
@@ -72,6 +106,13 @@ static __always_inline __pure bool use_fxsr(void) | |||
72 | return static_cpu_has(X86_FEATURE_FXSR); | 106 | return static_cpu_has(X86_FEATURE_FXSR); |
73 | } | 107 | } |
74 | 108 | ||
109 | static inline void fx_finit(struct i387_fxsave_struct *fx) | ||
110 | { | ||
111 | memset(fx, 0, xstate_size); | ||
112 | fx->cwd = 0x37f; | ||
113 | fx->mxcsr = MXCSR_DEFAULT; | ||
114 | } | ||
115 | |||
75 | extern void __sanitize_i387_state(struct task_struct *); | 116 | extern void __sanitize_i387_state(struct task_struct *); |
76 | 117 | ||
77 | static inline void sanitize_i387_state(struct task_struct *tsk) | 118 | static inline void sanitize_i387_state(struct task_struct *tsk) |
@@ -81,131 +122,121 @@ static inline void sanitize_i387_state(struct task_struct *tsk) | |||
81 | __sanitize_i387_state(tsk); | 122 | __sanitize_i387_state(tsk); |
82 | } | 123 | } |
83 | 124 | ||
84 | #ifdef CONFIG_X86_64 | 125 | #define user_insn(insn, output, input...) \ |
85 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | 126 | ({ \ |
86 | { | 127 | int err; \ |
87 | int err; | 128 | asm volatile(ASM_STAC "\n" \ |
88 | 129 | "1:" #insn "\n\t" \ | |
89 | /* See comment in fxsave() below. */ | 130 | "2: " ASM_CLAC "\n" \ |
90 | #ifdef CONFIG_AS_FXSAVEQ | 131 | ".section .fixup,\"ax\"\n" \ |
91 | asm volatile("1: fxrstorq %[fx]\n\t" | 132 | "3: movl $-1,%[err]\n" \ |
92 | "2:\n" | 133 | " jmp 2b\n" \ |
93 | ".section .fixup,\"ax\"\n" | 134 | ".previous\n" \ |
94 | "3: movl $-1,%[err]\n" | 135 | _ASM_EXTABLE(1b, 3b) \ |
95 | " jmp 2b\n" | 136 | : [err] "=r" (err), output \ |
96 | ".previous\n" | 137 | : "0"(0), input); \ |
97 | _ASM_EXTABLE(1b, 3b) | 138 | err; \ |
98 | : [err] "=r" (err) | 139 | }) |
99 | : [fx] "m" (*fx), "0" (0)); | 140 | |
100 | #else | 141 | #define check_insn(insn, output, input...) \ |
101 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" | 142 | ({ \ |
102 | "2:\n" | 143 | int err; \ |
103 | ".section .fixup,\"ax\"\n" | 144 | asm volatile("1:" #insn "\n\t" \ |
104 | "3: movl $-1,%[err]\n" | 145 | "2:\n" \ |
105 | " jmp 2b\n" | 146 | ".section .fixup,\"ax\"\n" \ |
106 | ".previous\n" | 147 | "3: movl $-1,%[err]\n" \ |
107 | _ASM_EXTABLE(1b, 3b) | 148 | " jmp 2b\n" \ |
108 | : [err] "=r" (err) | 149 | ".previous\n" \ |
109 | : [fx] "R" (fx), "m" (*fx), "0" (0)); | 150 | _ASM_EXTABLE(1b, 3b) \ |
110 | #endif | 151 | : [err] "=r" (err), output \ |
111 | return err; | 152 | : "0"(0), input); \ |
153 | err; \ | ||
154 | }) | ||
155 | |||
156 | static inline int fsave_user(struct i387_fsave_struct __user *fx) | ||
157 | { | ||
158 | return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); | ||
112 | } | 159 | } |
113 | 160 | ||
114 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | 161 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
115 | { | 162 | { |
116 | int err; | 163 | if (config_enabled(CONFIG_X86_32)) |
164 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); | ||
165 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | ||
166 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); | ||
117 | 167 | ||
118 | /* | 168 | /* See comment in fpu_fxsave() below. */ |
119 | * Clear the bytes not touched by the fxsave and reserved | 169 | return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); |
120 | * for the SW usage. | ||
121 | */ | ||
122 | err = __clear_user(&fx->sw_reserved, | ||
123 | sizeof(struct _fpx_sw_bytes)); | ||
124 | if (unlikely(err)) | ||
125 | return -EFAULT; | ||
126 | |||
127 | /* See comment in fxsave() below. */ | ||
128 | #ifdef CONFIG_AS_FXSAVEQ | ||
129 | asm volatile("1: fxsaveq %[fx]\n\t" | ||
130 | "2:\n" | ||
131 | ".section .fixup,\"ax\"\n" | ||
132 | "3: movl $-1,%[err]\n" | ||
133 | " jmp 2b\n" | ||
134 | ".previous\n" | ||
135 | _ASM_EXTABLE(1b, 3b) | ||
136 | : [err] "=r" (err), [fx] "=m" (*fx) | ||
137 | : "0" (0)); | ||
138 | #else | ||
139 | asm volatile("1: rex64/fxsave (%[fx])\n\t" | ||
140 | "2:\n" | ||
141 | ".section .fixup,\"ax\"\n" | ||
142 | "3: movl $-1,%[err]\n" | ||
143 | " jmp 2b\n" | ||
144 | ".previous\n" | ||
145 | _ASM_EXTABLE(1b, 3b) | ||
146 | : [err] "=r" (err), "=m" (*fx) | ||
147 | : [fx] "R" (fx), "0" (0)); | ||
148 | #endif | ||
149 | if (unlikely(err) && | ||
150 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) | ||
151 | err = -EFAULT; | ||
152 | /* No need to clear here because the caller clears USED_MATH */ | ||
153 | return err; | ||
154 | } | 170 | } |
155 | 171 | ||
156 | static inline void fpu_fxsave(struct fpu *fpu) | 172 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
157 | { | 173 | { |
158 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | 174 | if (config_enabled(CONFIG_X86_32)) |
159 | uses any extended registers for addressing, a second REX prefix | 175 | return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
160 | will be generated (to the assembler, rex64 followed by semicolon | 176 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) |
161 | is a separate instruction), and hence the 64-bitness is lost. */ | 177 | return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
162 | 178 | ||
163 | #ifdef CONFIG_AS_FXSAVEQ | 179 | /* See comment in fpu_fxsave() below. */ |
164 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported | 180 | return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), |
165 | starting with gas 2.16. */ | 181 | "m" (*fx)); |
166 | __asm__ __volatile__("fxsaveq %0" | ||
167 | : "=m" (fpu->state->fxsave)); | ||
168 | #else | ||
169 | /* Using, as a workaround, the properly prefixed form below isn't | ||
170 | accepted by any binutils version so far released, complaining that | ||
171 | the same type of prefix is used twice if an extended register is | ||
172 | needed for addressing (fix submitted to mainline 2005-11-21). | ||
173 | asm volatile("rex64/fxsave %0" | ||
174 | : "=m" (fpu->state->fxsave)); | ||
175 | This, however, we can work around by forcing the compiler to select | ||
176 | an addressing mode that doesn't require extended registers. */ | ||
177 | asm volatile("rex64/fxsave (%[fx])" | ||
178 | : "=m" (fpu->state->fxsave) | ||
179 | : [fx] "R" (&fpu->state->fxsave)); | ||
180 | #endif | ||
181 | } | 182 | } |
182 | 183 | ||
183 | #else /* CONFIG_X86_32 */ | 184 | static inline int fxrstor_user(struct i387_fxsave_struct __user *fx) |
185 | { | ||
186 | if (config_enabled(CONFIG_X86_32)) | ||
187 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | ||
188 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | ||
189 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | ||
184 | 190 | ||
185 | /* perform fxrstor iff the processor has extended states, otherwise frstor */ | 191 | /* See comment in fpu_fxsave() below. */ |
186 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | 192 | return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), |
193 | "m" (*fx)); | ||
194 | } | ||
195 | |||
196 | static inline int frstor_checking(struct i387_fsave_struct *fx) | ||
187 | { | 197 | { |
188 | /* | 198 | return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
189 | * The "nop" is needed to make the instructions the same | 199 | } |
190 | * length. | ||
191 | */ | ||
192 | alternative_input( | ||
193 | "nop ; frstor %1", | ||
194 | "fxrstor %1", | ||
195 | X86_FEATURE_FXSR, | ||
196 | "m" (*fx)); | ||
197 | 200 | ||
198 | return 0; | 201 | static inline int frstor_user(struct i387_fsave_struct __user *fx) |
202 | { | ||
203 | return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | ||
199 | } | 204 | } |
200 | 205 | ||
201 | static inline void fpu_fxsave(struct fpu *fpu) | 206 | static inline void fpu_fxsave(struct fpu *fpu) |
202 | { | 207 | { |
203 | asm volatile("fxsave %[fx]" | 208 | if (config_enabled(CONFIG_X86_32)) |
204 | : [fx] "=m" (fpu->state->fxsave)); | 209 | asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); |
210 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | ||
211 | asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave)); | ||
212 | else { | ||
213 | /* Using "rex64; fxsave %0" is broken because, if the memory | ||
214 | * operand uses any extended registers for addressing, a second | ||
215 | * REX prefix will be generated (to the assembler, rex64 | ||
216 | * followed by semicolon is a separate instruction), and hence | ||
217 | * the 64-bitness is lost. | ||
218 | * | ||
219 | * Using "fxsaveq %0" would be the ideal choice, but is only | ||
220 | * supported starting with gas 2.16. | ||
221 | * | ||
222 | * Using, as a workaround, the properly prefixed form below | ||
223 | * isn't accepted by any binutils version so far released, | ||
224 | * complaining that the same type of prefix is used twice if | ||
225 | * an extended register is needed for addressing (fix submitted | ||
226 | * to mainline 2005-11-21). | ||
227 | * | ||
228 | * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave)); | ||
229 | * | ||
230 | * This, however, we can work around by forcing the compiler to | ||
231 | * select an addressing mode that doesn't require extended | ||
232 | * registers. | ||
233 | */ | ||
234 | asm volatile( "rex64/fxsave (%[fx])" | ||
235 | : "=m" (fpu->state->fxsave) | ||
236 | : [fx] "R" (&fpu->state->fxsave)); | ||
237 | } | ||
205 | } | 238 | } |
206 | 239 | ||
207 | #endif /* CONFIG_X86_64 */ | ||
208 | |||
209 | /* | 240 | /* |
210 | * These must be called with preempt disabled. Returns | 241 | * These must be called with preempt disabled. Returns |
211 | * 'true' if the FPU state is still intact. | 242 | * 'true' if the FPU state is still intact. |
@@ -248,17 +279,14 @@ static inline int __save_init_fpu(struct task_struct *tsk) | |||
248 | return fpu_save_init(&tsk->thread.fpu); | 279 | return fpu_save_init(&tsk->thread.fpu); |
249 | } | 280 | } |
250 | 281 | ||
251 | static inline int fpu_fxrstor_checking(struct fpu *fpu) | ||
252 | { | ||
253 | return fxrstor_checking(&fpu->state->fxsave); | ||
254 | } | ||
255 | |||
256 | static inline int fpu_restore_checking(struct fpu *fpu) | 282 | static inline int fpu_restore_checking(struct fpu *fpu) |
257 | { | 283 | { |
258 | if (use_xsave()) | 284 | if (use_xsave()) |
259 | return fpu_xrstor_checking(fpu); | 285 | return fpu_xrstor_checking(&fpu->state->xsave); |
286 | else if (use_fxsr()) | ||
287 | return fxrstor_checking(&fpu->state->fxsave); | ||
260 | else | 288 | else |
261 | return fpu_fxrstor_checking(fpu); | 289 | return frstor_checking(&fpu->state->fsave); |
262 | } | 290 | } |
263 | 291 | ||
264 | static inline int restore_fpu_checking(struct task_struct *tsk) | 292 | static inline int restore_fpu_checking(struct task_struct *tsk) |
@@ -310,15 +338,52 @@ static inline void __thread_set_has_fpu(struct task_struct *tsk) | |||
310 | static inline void __thread_fpu_end(struct task_struct *tsk) | 338 | static inline void __thread_fpu_end(struct task_struct *tsk) |
311 | { | 339 | { |
312 | __thread_clear_has_fpu(tsk); | 340 | __thread_clear_has_fpu(tsk); |
313 | stts(); | 341 | if (!use_eager_fpu()) |
342 | stts(); | ||
314 | } | 343 | } |
315 | 344 | ||
316 | static inline void __thread_fpu_begin(struct task_struct *tsk) | 345 | static inline void __thread_fpu_begin(struct task_struct *tsk) |
317 | { | 346 | { |
318 | clts(); | 347 | if (!use_eager_fpu()) |
348 | clts(); | ||
319 | __thread_set_has_fpu(tsk); | 349 | __thread_set_has_fpu(tsk); |
320 | } | 350 | } |
321 | 351 | ||
352 | static inline void __drop_fpu(struct task_struct *tsk) | ||
353 | { | ||
354 | if (__thread_has_fpu(tsk)) { | ||
355 | /* Ignore delayed exceptions from user space */ | ||
356 | asm volatile("1: fwait\n" | ||
357 | "2:\n" | ||
358 | _ASM_EXTABLE(1b, 2b)); | ||
359 | __thread_fpu_end(tsk); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | static inline void drop_fpu(struct task_struct *tsk) | ||
364 | { | ||
365 | /* | ||
366 | * Forget coprocessor state.. | ||
367 | */ | ||
368 | preempt_disable(); | ||
369 | tsk->fpu_counter = 0; | ||
370 | __drop_fpu(tsk); | ||
371 | clear_used_math(); | ||
372 | preempt_enable(); | ||
373 | } | ||
374 | |||
375 | static inline void drop_init_fpu(struct task_struct *tsk) | ||
376 | { | ||
377 | if (!use_eager_fpu()) | ||
378 | drop_fpu(tsk); | ||
379 | else { | ||
380 | if (use_xsave()) | ||
381 | xrstor_state(init_xstate_buf, -1); | ||
382 | else | ||
383 | fxrstor_checking(&init_xstate_buf->i387); | ||
384 | } | ||
385 | } | ||
386 | |||
322 | /* | 387 | /* |
323 | * FPU state switching for scheduling. | 388 | * FPU state switching for scheduling. |
324 | * | 389 | * |
@@ -352,7 +417,12 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta | |||
352 | { | 417 | { |
353 | fpu_switch_t fpu; | 418 | fpu_switch_t fpu; |
354 | 419 | ||
355 | fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; | 420 | /* |
421 | * If the task has used the math, pre-load the FPU on xsave processors | ||
422 | * or if the past 5 consecutive context-switches used math. | ||
423 | */ | ||
424 | fpu.preload = tsk_used_math(new) && (use_eager_fpu() || | ||
425 | new->fpu_counter > 5); | ||
356 | if (__thread_has_fpu(old)) { | 426 | if (__thread_has_fpu(old)) { |
357 | if (!__save_init_fpu(old)) | 427 | if (!__save_init_fpu(old)) |
358 | cpu = ~0; | 428 | cpu = ~0; |
@@ -364,14 +434,14 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta | |||
364 | new->fpu_counter++; | 434 | new->fpu_counter++; |
365 | __thread_set_has_fpu(new); | 435 | __thread_set_has_fpu(new); |
366 | prefetch(new->thread.fpu.state); | 436 | prefetch(new->thread.fpu.state); |
367 | } else | 437 | } else if (!use_eager_fpu()) |
368 | stts(); | 438 | stts(); |
369 | } else { | 439 | } else { |
370 | old->fpu_counter = 0; | 440 | old->fpu_counter = 0; |
371 | old->thread.fpu.last_cpu = ~0; | 441 | old->thread.fpu.last_cpu = ~0; |
372 | if (fpu.preload) { | 442 | if (fpu.preload) { |
373 | new->fpu_counter++; | 443 | new->fpu_counter++; |
374 | if (fpu_lazy_restore(new, cpu)) | 444 | if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) |
375 | fpu.preload = 0; | 445 | fpu.preload = 0; |
376 | else | 446 | else |
377 | prefetch(new->thread.fpu.state); | 447 | prefetch(new->thread.fpu.state); |
@@ -391,44 +461,40 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) | |||
391 | { | 461 | { |
392 | if (fpu.preload) { | 462 | if (fpu.preload) { |
393 | if (unlikely(restore_fpu_checking(new))) | 463 | if (unlikely(restore_fpu_checking(new))) |
394 | __thread_fpu_end(new); | 464 | drop_init_fpu(new); |
395 | } | 465 | } |
396 | } | 466 | } |
397 | 467 | ||
398 | /* | 468 | /* |
399 | * Signal frame handlers... | 469 | * Signal frame handlers... |
400 | */ | 470 | */ |
401 | extern int save_i387_xstate(void __user *buf); | 471 | extern int save_xstate_sig(void __user *buf, void __user *fx, int size); |
402 | extern int restore_i387_xstate(void __user *buf); | 472 | extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size); |
403 | 473 | ||
404 | static inline void __clear_fpu(struct task_struct *tsk) | 474 | static inline int xstate_sigframe_size(void) |
405 | { | 475 | { |
406 | if (__thread_has_fpu(tsk)) { | 476 | return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size; |
407 | /* Ignore delayed exceptions from user space */ | 477 | } |
408 | asm volatile("1: fwait\n" | 478 | |
409 | "2:\n" | 479 | static inline int restore_xstate_sig(void __user *buf, int ia32_frame) |
410 | _ASM_EXTABLE(1b, 2b)); | 480 | { |
411 | __thread_fpu_end(tsk); | 481 | void __user *buf_fx = buf; |
482 | int size = xstate_sigframe_size(); | ||
483 | |||
484 | if (ia32_frame && use_fxsr()) { | ||
485 | buf_fx = buf + sizeof(struct i387_fsave_struct); | ||
486 | size += sizeof(struct i387_fsave_struct); | ||
412 | } | 487 | } |
488 | |||
489 | return __restore_xstate_sig(buf, buf_fx, size); | ||
413 | } | 490 | } |
414 | 491 | ||
415 | /* | 492 | /* |
416 | * The actual user_fpu_begin/end() functions | 493 | * Need to be preemption-safe. |
417 | * need to be preemption-safe. | ||
418 | * | 494 | * |
419 | * NOTE! user_fpu_end() must be used only after you | 495 | * NOTE! user_fpu_begin() must be used only immediately before restoring |
420 | * have saved the FP state, and user_fpu_begin() must | 496 | * it. This function does not do any save/restore on their own. |
421 | * be used only immediately before restoring it. | ||
422 | * These functions do not do any save/restore on | ||
423 | * their own. | ||
424 | */ | 497 | */ |
425 | static inline void user_fpu_end(void) | ||
426 | { | ||
427 | preempt_disable(); | ||
428 | __thread_fpu_end(current); | ||
429 | preempt_enable(); | ||
430 | } | ||
431 | |||
432 | static inline void user_fpu_begin(void) | 498 | static inline void user_fpu_begin(void) |
433 | { | 499 | { |
434 | preempt_disable(); | 500 | preempt_disable(); |
@@ -437,25 +503,32 @@ static inline void user_fpu_begin(void) | |||
437 | preempt_enable(); | 503 | preempt_enable(); |
438 | } | 504 | } |
439 | 505 | ||
506 | static inline void __save_fpu(struct task_struct *tsk) | ||
507 | { | ||
508 | if (use_xsave()) | ||
509 | xsave_state(&tsk->thread.fpu.state->xsave, -1); | ||
510 | else | ||
511 | fpu_fxsave(&tsk->thread.fpu); | ||
512 | } | ||
513 | |||
440 | /* | 514 | /* |
441 | * These disable preemption on their own and are safe | 515 | * These disable preemption on their own and are safe |
442 | */ | 516 | */ |
443 | static inline void save_init_fpu(struct task_struct *tsk) | 517 | static inline void save_init_fpu(struct task_struct *tsk) |
444 | { | 518 | { |
445 | WARN_ON_ONCE(!__thread_has_fpu(tsk)); | 519 | WARN_ON_ONCE(!__thread_has_fpu(tsk)); |
520 | |||
521 | if (use_eager_fpu()) { | ||
522 | __save_fpu(tsk); | ||
523 | return; | ||
524 | } | ||
525 | |||
446 | preempt_disable(); | 526 | preempt_disable(); |
447 | __save_init_fpu(tsk); | 527 | __save_init_fpu(tsk); |
448 | __thread_fpu_end(tsk); | 528 | __thread_fpu_end(tsk); |
449 | preempt_enable(); | 529 | preempt_enable(); |
450 | } | 530 | } |
451 | 531 | ||
452 | static inline void clear_fpu(struct task_struct *tsk) | ||
453 | { | ||
454 | preempt_disable(); | ||
455 | __clear_fpu(tsk); | ||
456 | preempt_enable(); | ||
457 | } | ||
458 | |||
459 | /* | 532 | /* |
460 | * i387 state interaction | 533 | * i387 state interaction |
461 | */ | 534 | */ |
@@ -510,11 +583,34 @@ static inline void fpu_free(struct fpu *fpu) | |||
510 | } | 583 | } |
511 | } | 584 | } |
512 | 585 | ||
513 | static inline void fpu_copy(struct fpu *dst, struct fpu *src) | 586 | static inline void fpu_copy(struct task_struct *dst, struct task_struct *src) |
514 | { | 587 | { |
515 | memcpy(dst->state, src->state, xstate_size); | 588 | if (use_eager_fpu()) { |
589 | memset(&dst->thread.fpu.state->xsave, 0, xstate_size); | ||
590 | __save_fpu(dst); | ||
591 | } else { | ||
592 | struct fpu *dfpu = &dst->thread.fpu; | ||
593 | struct fpu *sfpu = &src->thread.fpu; | ||
594 | |||
595 | unlazy_fpu(src); | ||
596 | memcpy(dfpu->state, sfpu->state, xstate_size); | ||
597 | } | ||
516 | } | 598 | } |
517 | 599 | ||
518 | extern void fpu_finit(struct fpu *fpu); | 600 | static inline unsigned long |
601 | alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, | ||
602 | unsigned long *size) | ||
603 | { | ||
604 | unsigned long frame_size = xstate_sigframe_size(); | ||
605 | |||
606 | *buf_fx = sp = round_down(sp - frame_size, 64); | ||
607 | if (ia32_frame && use_fxsr()) { | ||
608 | frame_size += sizeof(struct i387_fsave_struct); | ||
609 | sp -= sizeof(struct i387_fsave_struct); | ||
610 | } | ||
611 | |||
612 | *size = frame_size; | ||
613 | return sp; | ||
614 | } | ||
519 | 615 | ||
520 | #endif | 616 | #endif |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index b0767bc08740..9a25b522d377 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -3,38 +3,54 @@ | |||
3 | 3 | ||
4 | #ifdef __ASSEMBLY__ | 4 | #ifdef __ASSEMBLY__ |
5 | 5 | ||
6 | .macro MCOUNT_SAVE_FRAME | 6 | /* skip is set if the stack was already partially adjusted */ |
7 | /* taken from glibc */ | 7 | .macro MCOUNT_SAVE_FRAME skip=0 |
8 | subq $0x38, %rsp | 8 | /* |
9 | movq %rax, (%rsp) | 9 | * We add enough stack to save all regs. |
10 | movq %rcx, 8(%rsp) | 10 | */ |
11 | movq %rdx, 16(%rsp) | 11 | subq $(SS+8-\skip), %rsp |
12 | movq %rsi, 24(%rsp) | 12 | movq %rax, RAX(%rsp) |
13 | movq %rdi, 32(%rsp) | 13 | movq %rcx, RCX(%rsp) |
14 | movq %r8, 40(%rsp) | 14 | movq %rdx, RDX(%rsp) |
15 | movq %r9, 48(%rsp) | 15 | movq %rsi, RSI(%rsp) |
16 | movq %rdi, RDI(%rsp) | ||
17 | movq %r8, R8(%rsp) | ||
18 | movq %r9, R9(%rsp) | ||
19 | /* Move RIP to its proper location */ | ||
20 | movq SS+8(%rsp), %rdx | ||
21 | movq %rdx, RIP(%rsp) | ||
16 | .endm | 22 | .endm |
17 | 23 | ||
18 | .macro MCOUNT_RESTORE_FRAME | 24 | .macro MCOUNT_RESTORE_FRAME skip=0 |
19 | movq 48(%rsp), %r9 | 25 | movq R9(%rsp), %r9 |
20 | movq 40(%rsp), %r8 | 26 | movq R8(%rsp), %r8 |
21 | movq 32(%rsp), %rdi | 27 | movq RDI(%rsp), %rdi |
22 | movq 24(%rsp), %rsi | 28 | movq RSI(%rsp), %rsi |
23 | movq 16(%rsp), %rdx | 29 | movq RDX(%rsp), %rdx |
24 | movq 8(%rsp), %rcx | 30 | movq RCX(%rsp), %rcx |
25 | movq (%rsp), %rax | 31 | movq RAX(%rsp), %rax |
26 | addq $0x38, %rsp | 32 | addq $(SS+8-\skip), %rsp |
27 | .endm | 33 | .endm |
28 | 34 | ||
29 | #endif | 35 | #endif |
30 | 36 | ||
31 | #ifdef CONFIG_FUNCTION_TRACER | 37 | #ifdef CONFIG_FUNCTION_TRACER |
32 | #define MCOUNT_ADDR ((long)(mcount)) | 38 | #ifdef CC_USING_FENTRY |
39 | # define MCOUNT_ADDR ((long)(__fentry__)) | ||
40 | #else | ||
41 | # define MCOUNT_ADDR ((long)(mcount)) | ||
42 | #endif | ||
33 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ | 43 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
34 | 44 | ||
45 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
46 | #define ARCH_SUPPORTS_FTRACE_OPS 1 | ||
47 | #define ARCH_SUPPORTS_FTRACE_SAVE_REGS | ||
48 | #endif | ||
49 | |||
35 | #ifndef __ASSEMBLY__ | 50 | #ifndef __ASSEMBLY__ |
36 | extern void mcount(void); | 51 | extern void mcount(void); |
37 | extern atomic_t modifying_ftrace_code; | 52 | extern atomic_t modifying_ftrace_code; |
53 | extern void __fentry__(void); | ||
38 | 54 | ||
39 | static inline unsigned long ftrace_call_adjust(unsigned long addr) | 55 | static inline unsigned long ftrace_call_adjust(unsigned long addr) |
40 | { | 56 | { |
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index 71ecbcba1a4e..f373046e63ec 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h | |||
@@ -9,10 +9,13 @@ | |||
9 | #include <asm/asm.h> | 9 | #include <asm/asm.h> |
10 | #include <asm/errno.h> | 10 | #include <asm/errno.h> |
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/smap.h> | ||
12 | 13 | ||
13 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ | 14 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ |
14 | asm volatile("1:\t" insn "\n" \ | 15 | asm volatile("\t" ASM_STAC "\n" \ |
15 | "2:\t.section .fixup,\"ax\"\n" \ | 16 | "1:\t" insn "\n" \ |
17 | "2:\t" ASM_CLAC "\n" \ | ||
18 | "\t.section .fixup,\"ax\"\n" \ | ||
16 | "3:\tmov\t%3, %1\n" \ | 19 | "3:\tmov\t%3, %1\n" \ |
17 | "\tjmp\t2b\n" \ | 20 | "\tjmp\t2b\n" \ |
18 | "\t.previous\n" \ | 21 | "\t.previous\n" \ |
@@ -21,12 +24,14 @@ | |||
21 | : "i" (-EFAULT), "0" (oparg), "1" (0)) | 24 | : "i" (-EFAULT), "0" (oparg), "1" (0)) |
22 | 25 | ||
23 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ | 26 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ |
24 | asm volatile("1:\tmovl %2, %0\n" \ | 27 | asm volatile("\t" ASM_STAC "\n" \ |
28 | "1:\tmovl %2, %0\n" \ | ||
25 | "\tmovl\t%0, %3\n" \ | 29 | "\tmovl\t%0, %3\n" \ |
26 | "\t" insn "\n" \ | 30 | "\t" insn "\n" \ |
27 | "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ | 31 | "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ |
28 | "\tjnz\t1b\n" \ | 32 | "\tjnz\t1b\n" \ |
29 | "3:\t.section .fixup,\"ax\"\n" \ | 33 | "3:\t" ASM_CLAC "\n" \ |
34 | "\t.section .fixup,\"ax\"\n" \ | ||
30 | "4:\tmov\t%5, %1\n" \ | 35 | "4:\tmov\t%5, %1\n" \ |
31 | "\tjmp\t3b\n" \ | 36 | "\tjmp\t3b\n" \ |
32 | "\t.previous\n" \ | 37 | "\t.previous\n" \ |
@@ -122,8 +127,10 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
122 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | 127 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
123 | return -EFAULT; | 128 | return -EFAULT; |
124 | 129 | ||
125 | asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" | 130 | asm volatile("\t" ASM_STAC "\n" |
126 | "2:\t.section .fixup, \"ax\"\n" | 131 | "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" |
132 | "2:\t" ASM_CLAC "\n" | ||
133 | "\t.section .fixup, \"ax\"\n" | ||
127 | "3:\tmov %3, %0\n" | 134 | "3:\tmov %3, %0\n" |
128 | "\tjmp 2b\n" | 135 | "\tjmp 2b\n" |
129 | "\t.previous\n" | 136 | "\t.previous\n" |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index d3895dbf4ddb..81f04cee5f74 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
@@ -18,6 +18,10 @@ typedef struct { | |||
18 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
19 | unsigned int irq_resched_count; | 19 | unsigned int irq_resched_count; |
20 | unsigned int irq_call_count; | 20 | unsigned int irq_call_count; |
21 | /* | ||
22 | * irq_tlb_count is double-counted in irq_call_count, so it must be | ||
23 | * subtracted from irq_call_count when displaying irq_call_count | ||
24 | */ | ||
21 | unsigned int irq_tlb_count; | 25 | unsigned int irq_tlb_count; |
22 | #endif | 26 | #endif |
23 | #ifdef CONFIG_X86_THERMAL_VECTOR | 27 | #ifdef CONFIG_X86_THERMAL_VECTOR |
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 2c392d663dce..434e2106cc87 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
@@ -35,8 +35,6 @@ | |||
35 | #define HPET_ID_NUMBER_SHIFT 8 | 35 | #define HPET_ID_NUMBER_SHIFT 8 |
36 | #define HPET_ID_VENDOR_SHIFT 16 | 36 | #define HPET_ID_VENDOR_SHIFT 16 |
37 | 37 | ||
38 | #define HPET_ID_VENDOR_8086 0x8086 | ||
39 | |||
40 | #define HPET_CFG_ENABLE 0x001 | 38 | #define HPET_CFG_ENABLE 0x001 |
41 | #define HPET_CFG_LEGACY 0x002 | 39 | #define HPET_CFG_LEGACY 0x002 |
42 | #define HPET_LEGACY_8254 2 | 40 | #define HPET_LEGACY_8254 2 |
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h index 439a9acc132d..bdd35dbd0605 100644 --- a/arch/x86/include/asm/hugetlb.h +++ b/arch/x86/include/asm/hugetlb.h | |||
@@ -90,4 +90,8 @@ static inline void arch_release_hugepage(struct page *page) | |||
90 | { | 90 | { |
91 | } | 91 | } |
92 | 92 | ||
93 | static inline void arch_clear_hugepage_flags(struct page *page) | ||
94 | { | ||
95 | } | ||
96 | |||
93 | #endif /* _ASM_X86_HUGETLB_H */ | 97 | #endif /* _ASM_X86_HUGETLB_H */ |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 257d9cca214f..ed8089d69094 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -19,12 +19,37 @@ struct pt_regs; | |||
19 | struct user_i387_struct; | 19 | struct user_i387_struct; |
20 | 20 | ||
21 | extern int init_fpu(struct task_struct *child); | 21 | extern int init_fpu(struct task_struct *child); |
22 | extern void fpu_finit(struct fpu *fpu); | ||
22 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | 23 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
23 | extern void math_state_restore(void); | 24 | extern void math_state_restore(void); |
24 | 25 | ||
25 | extern bool irq_fpu_usable(void); | 26 | extern bool irq_fpu_usable(void); |
26 | extern void kernel_fpu_begin(void); | 27 | |
27 | extern void kernel_fpu_end(void); | 28 | /* |
29 | * Careful: __kernel_fpu_begin/end() must be called with preempt disabled | ||
30 | * and they don't touch the preempt state on their own. | ||
31 | * If you enable preemption after __kernel_fpu_begin(), preempt notifier | ||
32 | * should call the __kernel_fpu_end() to prevent the kernel/user FPU | ||
33 | * state from getting corrupted. KVM for example uses this model. | ||
34 | * | ||
35 | * All other cases use kernel_fpu_begin/end() which disable preemption | ||
36 | * during kernel FPU usage. | ||
37 | */ | ||
38 | extern void __kernel_fpu_begin(void); | ||
39 | extern void __kernel_fpu_end(void); | ||
40 | |||
41 | static inline void kernel_fpu_begin(void) | ||
42 | { | ||
43 | WARN_ON_ONCE(!irq_fpu_usable()); | ||
44 | preempt_disable(); | ||
45 | __kernel_fpu_begin(); | ||
46 | } | ||
47 | |||
48 | static inline void kernel_fpu_end(void) | ||
49 | { | ||
50 | __kernel_fpu_end(); | ||
51 | preempt_enable(); | ||
52 | } | ||
28 | 53 | ||
29 | /* | 54 | /* |
30 | * Some instructions like VIA's padlock instructions generate a spurious | 55 | * Some instructions like VIA's padlock instructions generate a spurious |
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h index b04cbdb138cd..e6232773ce49 100644 --- a/arch/x86/include/asm/ia32.h +++ b/arch/x86/include/asm/ia32.h | |||
@@ -86,73 +86,6 @@ struct stat64 { | |||
86 | unsigned long long st_ino; | 86 | unsigned long long st_ino; |
87 | } __attribute__((packed)); | 87 | } __attribute__((packed)); |
88 | 88 | ||
89 | typedef struct compat_siginfo { | ||
90 | int si_signo; | ||
91 | int si_errno; | ||
92 | int si_code; | ||
93 | |||
94 | union { | ||
95 | int _pad[((128 / sizeof(int)) - 3)]; | ||
96 | |||
97 | /* kill() */ | ||
98 | struct { | ||
99 | unsigned int _pid; /* sender's pid */ | ||
100 | unsigned int _uid; /* sender's uid */ | ||
101 | } _kill; | ||
102 | |||
103 | /* POSIX.1b timers */ | ||
104 | struct { | ||
105 | compat_timer_t _tid; /* timer id */ | ||
106 | int _overrun; /* overrun count */ | ||
107 | compat_sigval_t _sigval; /* same as below */ | ||
108 | int _sys_private; /* not to be passed to user */ | ||
109 | int _overrun_incr; /* amount to add to overrun */ | ||
110 | } _timer; | ||
111 | |||
112 | /* POSIX.1b signals */ | ||
113 | struct { | ||
114 | unsigned int _pid; /* sender's pid */ | ||
115 | unsigned int _uid; /* sender's uid */ | ||
116 | compat_sigval_t _sigval; | ||
117 | } _rt; | ||
118 | |||
119 | /* SIGCHLD */ | ||
120 | struct { | ||
121 | unsigned int _pid; /* which child */ | ||
122 | unsigned int _uid; /* sender's uid */ | ||
123 | int _status; /* exit code */ | ||
124 | compat_clock_t _utime; | ||
125 | compat_clock_t _stime; | ||
126 | } _sigchld; | ||
127 | |||
128 | /* SIGCHLD (x32 version) */ | ||
129 | struct { | ||
130 | unsigned int _pid; /* which child */ | ||
131 | unsigned int _uid; /* sender's uid */ | ||
132 | int _status; /* exit code */ | ||
133 | compat_s64 _utime; | ||
134 | compat_s64 _stime; | ||
135 | } _sigchld_x32; | ||
136 | |||
137 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | ||
138 | struct { | ||
139 | unsigned int _addr; /* faulting insn/memory ref. */ | ||
140 | } _sigfault; | ||
141 | |||
142 | /* SIGPOLL */ | ||
143 | struct { | ||
144 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
145 | int _fd; | ||
146 | } _sigpoll; | ||
147 | |||
148 | struct { | ||
149 | unsigned int _call_addr; /* calling insn */ | ||
150 | int _syscall; /* triggering system call number */ | ||
151 | unsigned int _arch; /* AUDIT_ARCH_* of syscall */ | ||
152 | } _sigsys; | ||
153 | } _sifields; | ||
154 | } compat_siginfo_t; | ||
155 | |||
156 | #define IA32_STACK_TOP IA32_PAGE_OFFSET | 89 | #define IA32_STACK_TOP IA32_PAGE_OFFSET |
157 | 90 | ||
158 | #ifdef __KERNEL__ | 91 | #ifdef __KERNEL__ |
diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h index f229b13a5f30..f42a04735a0a 100644 --- a/arch/x86/include/asm/iommu_table.h +++ b/arch/x86/include/asm/iommu_table.h | |||
@@ -48,7 +48,7 @@ struct iommu_table_entry { | |||
48 | 48 | ||
49 | 49 | ||
50 | #define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\ | 50 | #define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\ |
51 | static const struct iommu_table_entry const \ | 51 | static const struct iommu_table_entry \ |
52 | __iommu_entry_##_detect __used \ | 52 | __iommu_entry_##_detect __used \ |
53 | __attribute__ ((unused, __section__(".iommu_table"), \ | 53 | __attribute__ ((unused, __section__(".iommu_table"), \ |
54 | aligned((sizeof(void *))))) \ | 54 | aligned((sizeof(void *))))) \ |
@@ -63,10 +63,10 @@ struct iommu_table_entry { | |||
63 | * to stop detecting the other IOMMUs after yours has been detected. | 63 | * to stop detecting the other IOMMUs after yours has been detected. |
64 | */ | 64 | */ |
65 | #define IOMMU_INIT_POST(_detect) \ | 65 | #define IOMMU_INIT_POST(_detect) \ |
66 | __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 0) | 66 | __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 0) |
67 | 67 | ||
68 | #define IOMMU_INIT_POST_FINISH(detect) \ | 68 | #define IOMMU_INIT_POST_FINISH(detect) \ |
69 | __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 1) | 69 | __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 1) |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * A more sophisticated version of IOMMU_INIT. This variant requires: | 72 | * A more sophisticated version of IOMMU_INIT. This variant requires: |
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 547882539157..d3ddd17405d0 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/insn.h> | 27 | #include <asm/insn.h> |
28 | 28 | ||
29 | #define __ARCH_WANT_KPROBES_INSN_SLOT | 29 | #define __ARCH_WANT_KPROBES_INSN_SLOT |
30 | #define ARCH_SUPPORTS_KPROBES_ON_FTRACE | ||
30 | 31 | ||
31 | struct pt_regs; | 32 | struct pt_regs; |
32 | struct kprobe; | 33 | struct kprobe; |
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 246617efd67f..a65ec29e6ffb 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h | |||
@@ -9,6 +9,22 @@ | |||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/ioctl.h> | 10 | #include <linux/ioctl.h> |
11 | 11 | ||
12 | #define DE_VECTOR 0 | ||
13 | #define DB_VECTOR 1 | ||
14 | #define BP_VECTOR 3 | ||
15 | #define OF_VECTOR 4 | ||
16 | #define BR_VECTOR 5 | ||
17 | #define UD_VECTOR 6 | ||
18 | #define NM_VECTOR 7 | ||
19 | #define DF_VECTOR 8 | ||
20 | #define TS_VECTOR 10 | ||
21 | #define NP_VECTOR 11 | ||
22 | #define SS_VECTOR 12 | ||
23 | #define GP_VECTOR 13 | ||
24 | #define PF_VECTOR 14 | ||
25 | #define MF_VECTOR 16 | ||
26 | #define MC_VECTOR 18 | ||
27 | |||
12 | /* Select x86 specific features in <linux/kvm.h> */ | 28 | /* Select x86 specific features in <linux/kvm.h> */ |
13 | #define __KVM_HAVE_PIT | 29 | #define __KVM_HAVE_PIT |
14 | #define __KVM_HAVE_IOAPIC | 30 | #define __KVM_HAVE_IOAPIC |
@@ -25,6 +41,7 @@ | |||
25 | #define __KVM_HAVE_DEBUGREGS | 41 | #define __KVM_HAVE_DEBUGREGS |
26 | #define __KVM_HAVE_XSAVE | 42 | #define __KVM_HAVE_XSAVE |
27 | #define __KVM_HAVE_XCRS | 43 | #define __KVM_HAVE_XCRS |
44 | #define __KVM_HAVE_READONLY_MEM | ||
28 | 45 | ||
29 | /* Architectural interrupt line count. */ | 46 | /* Architectural interrupt line count. */ |
30 | #define KVM_NR_INTERRUPTS 256 | 47 | #define KVM_NR_INTERRUPTS 256 |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index c764f43b71c5..15f960c06ff7 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -86,6 +86,19 @@ struct x86_instruction_info { | |||
86 | 86 | ||
87 | struct x86_emulate_ops { | 87 | struct x86_emulate_ops { |
88 | /* | 88 | /* |
89 | * read_gpr: read a general purpose register (rax - r15) | ||
90 | * | ||
91 | * @reg: gpr number. | ||
92 | */ | ||
93 | ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg); | ||
94 | /* | ||
95 | * write_gpr: write a general purpose register (rax - r15) | ||
96 | * | ||
97 | * @reg: gpr number. | ||
98 | * @val: value to write. | ||
99 | */ | ||
100 | void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val); | ||
101 | /* | ||
89 | * read_std: Read bytes of standard (non-emulated/special) memory. | 102 | * read_std: Read bytes of standard (non-emulated/special) memory. |
90 | * Used for descriptor reading. | 103 | * Used for descriptor reading. |
91 | * @addr: [IN ] Linear address from which to read. | 104 | * @addr: [IN ] Linear address from which to read. |
@@ -200,8 +213,9 @@ typedef u32 __attribute__((vector_size(16))) sse128_t; | |||
200 | 213 | ||
201 | /* Type, address-of, and value of an instruction's operand. */ | 214 | /* Type, address-of, and value of an instruction's operand. */ |
202 | struct operand { | 215 | struct operand { |
203 | enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_MM, OP_NONE } type; | 216 | enum { OP_REG, OP_MEM, OP_MEM_STR, OP_IMM, OP_XMM, OP_MM, OP_NONE } type; |
204 | unsigned int bytes; | 217 | unsigned int bytes; |
218 | unsigned int count; | ||
205 | union { | 219 | union { |
206 | unsigned long orig_val; | 220 | unsigned long orig_val; |
207 | u64 orig_val64; | 221 | u64 orig_val64; |
@@ -221,6 +235,7 @@ struct operand { | |||
221 | char valptr[sizeof(unsigned long) + 2]; | 235 | char valptr[sizeof(unsigned long) + 2]; |
222 | sse128_t vec_val; | 236 | sse128_t vec_val; |
223 | u64 mm_val; | 237 | u64 mm_val; |
238 | void *data; | ||
224 | }; | 239 | }; |
225 | }; | 240 | }; |
226 | 241 | ||
@@ -236,14 +251,23 @@ struct read_cache { | |||
236 | unsigned long end; | 251 | unsigned long end; |
237 | }; | 252 | }; |
238 | 253 | ||
254 | /* Execution mode, passed to the emulator. */ | ||
255 | enum x86emul_mode { | ||
256 | X86EMUL_MODE_REAL, /* Real mode. */ | ||
257 | X86EMUL_MODE_VM86, /* Virtual 8086 mode. */ | ||
258 | X86EMUL_MODE_PROT16, /* 16-bit protected mode. */ | ||
259 | X86EMUL_MODE_PROT32, /* 32-bit protected mode. */ | ||
260 | X86EMUL_MODE_PROT64, /* 64-bit (long) mode. */ | ||
261 | }; | ||
262 | |||
239 | struct x86_emulate_ctxt { | 263 | struct x86_emulate_ctxt { |
240 | struct x86_emulate_ops *ops; | 264 | const struct x86_emulate_ops *ops; |
241 | 265 | ||
242 | /* Register state before/after emulation. */ | 266 | /* Register state before/after emulation. */ |
243 | unsigned long eflags; | 267 | unsigned long eflags; |
244 | unsigned long eip; /* eip before instruction emulation */ | 268 | unsigned long eip; /* eip before instruction emulation */ |
245 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ | 269 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ |
246 | int mode; | 270 | enum x86emul_mode mode; |
247 | 271 | ||
248 | /* interruptibility state, as a result of execution of STI or MOV SS */ | 272 | /* interruptibility state, as a result of execution of STI or MOV SS */ |
249 | int interruptibility; | 273 | int interruptibility; |
@@ -281,8 +305,10 @@ struct x86_emulate_ctxt { | |||
281 | bool rip_relative; | 305 | bool rip_relative; |
282 | unsigned long _eip; | 306 | unsigned long _eip; |
283 | struct operand memop; | 307 | struct operand memop; |
308 | u32 regs_valid; /* bitmaps of registers in _regs[] that can be read */ | ||
309 | u32 regs_dirty; /* bitmaps of registers in _regs[] that have been written */ | ||
284 | /* Fields above regs are cleared together. */ | 310 | /* Fields above regs are cleared together. */ |
285 | unsigned long regs[NR_VCPU_REGS]; | 311 | unsigned long _regs[NR_VCPU_REGS]; |
286 | struct operand *memopp; | 312 | struct operand *memopp; |
287 | struct fetch_cache fetch; | 313 | struct fetch_cache fetch; |
288 | struct read_cache io_read; | 314 | struct read_cache io_read; |
@@ -293,17 +319,6 @@ struct x86_emulate_ctxt { | |||
293 | #define REPE_PREFIX 0xf3 | 319 | #define REPE_PREFIX 0xf3 |
294 | #define REPNE_PREFIX 0xf2 | 320 | #define REPNE_PREFIX 0xf2 |
295 | 321 | ||
296 | /* Execution mode, passed to the emulator. */ | ||
297 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ | ||
298 | #define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */ | ||
299 | #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */ | ||
300 | #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ | ||
301 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ | ||
302 | |||
303 | /* any protected mode */ | ||
304 | #define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ | ||
305 | X86EMUL_MODE_PROT64) | ||
306 | |||
307 | /* CPUID vendors */ | 322 | /* CPUID vendors */ |
308 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 | 323 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 |
309 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 | 324 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 |
@@ -394,4 +409,7 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | |||
394 | u16 tss_selector, int idt_index, int reason, | 409 | u16 tss_selector, int idt_index, int reason, |
395 | bool has_error_code, u32 error_code); | 410 | bool has_error_code, u32 error_code); |
396 | int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); | 411 | int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); |
412 | void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt); | ||
413 | void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt); | ||
414 | |||
397 | #endif /* _ASM_X86_KVM_X86_EMULATE_H */ | 415 | #endif /* _ASM_X86_KVM_X86_EMULATE_H */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 09155d64cf7e..b2e11f452435 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -75,22 +75,6 @@ | |||
75 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) | 75 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) |
76 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) | 76 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) |
77 | 77 | ||
78 | #define DE_VECTOR 0 | ||
79 | #define DB_VECTOR 1 | ||
80 | #define BP_VECTOR 3 | ||
81 | #define OF_VECTOR 4 | ||
82 | #define BR_VECTOR 5 | ||
83 | #define UD_VECTOR 6 | ||
84 | #define NM_VECTOR 7 | ||
85 | #define DF_VECTOR 8 | ||
86 | #define TS_VECTOR 10 | ||
87 | #define NP_VECTOR 11 | ||
88 | #define SS_VECTOR 12 | ||
89 | #define GP_VECTOR 13 | ||
90 | #define PF_VECTOR 14 | ||
91 | #define MF_VECTOR 16 | ||
92 | #define MC_VECTOR 18 | ||
93 | |||
94 | #define SELECTOR_TI_MASK (1 << 2) | 78 | #define SELECTOR_TI_MASK (1 << 2) |
95 | #define SELECTOR_RPL_MASK 0x03 | 79 | #define SELECTOR_RPL_MASK 0x03 |
96 | 80 | ||
@@ -287,10 +271,24 @@ struct kvm_mmu { | |||
287 | union kvm_mmu_page_role base_role; | 271 | union kvm_mmu_page_role base_role; |
288 | bool direct_map; | 272 | bool direct_map; |
289 | 273 | ||
274 | /* | ||
275 | * Bitmap; bit set = permission fault | ||
276 | * Byte index: page fault error code [4:1] | ||
277 | * Bit index: pte permissions in ACC_* format | ||
278 | */ | ||
279 | u8 permissions[16]; | ||
280 | |||
290 | u64 *pae_root; | 281 | u64 *pae_root; |
291 | u64 *lm_root; | 282 | u64 *lm_root; |
292 | u64 rsvd_bits_mask[2][4]; | 283 | u64 rsvd_bits_mask[2][4]; |
293 | 284 | ||
285 | /* | ||
286 | * Bitmap: bit set = last pte in walk | ||
287 | * index[0:1]: level (zero-based) | ||
288 | * index[2]: pte.ps | ||
289 | */ | ||
290 | u8 last_pte_bitmap; | ||
291 | |||
294 | bool nx; | 292 | bool nx; |
295 | 293 | ||
296 | u64 pdptrs[4]; /* pae */ | 294 | u64 pdptrs[4]; /* pae */ |
@@ -414,12 +412,15 @@ struct kvm_vcpu_arch { | |||
414 | struct x86_emulate_ctxt emulate_ctxt; | 412 | struct x86_emulate_ctxt emulate_ctxt; |
415 | bool emulate_regs_need_sync_to_vcpu; | 413 | bool emulate_regs_need_sync_to_vcpu; |
416 | bool emulate_regs_need_sync_from_vcpu; | 414 | bool emulate_regs_need_sync_from_vcpu; |
415 | int (*complete_userspace_io)(struct kvm_vcpu *vcpu); | ||
417 | 416 | ||
418 | gpa_t time; | 417 | gpa_t time; |
419 | struct pvclock_vcpu_time_info hv_clock; | 418 | struct pvclock_vcpu_time_info hv_clock; |
420 | unsigned int hw_tsc_khz; | 419 | unsigned int hw_tsc_khz; |
421 | unsigned int time_offset; | 420 | unsigned int time_offset; |
422 | struct page *time_page; | 421 | struct page *time_page; |
422 | /* set guest stopped flag in pvclock flags field */ | ||
423 | bool pvclock_set_guest_stopped_request; | ||
423 | 424 | ||
424 | struct { | 425 | struct { |
425 | u64 msr_val; | 426 | u64 msr_val; |
@@ -454,6 +455,7 @@ struct kvm_vcpu_arch { | |||
454 | unsigned long dr6; | 455 | unsigned long dr6; |
455 | unsigned long dr7; | 456 | unsigned long dr7; |
456 | unsigned long eff_db[KVM_NR_DB_REGS]; | 457 | unsigned long eff_db[KVM_NR_DB_REGS]; |
458 | unsigned long guest_debug_dr7; | ||
457 | 459 | ||
458 | u64 mcg_cap; | 460 | u64 mcg_cap; |
459 | u64 mcg_status; | 461 | u64 mcg_status; |
@@ -500,14 +502,24 @@ struct kvm_vcpu_arch { | |||
500 | }; | 502 | }; |
501 | 503 | ||
502 | struct kvm_lpage_info { | 504 | struct kvm_lpage_info { |
503 | unsigned long rmap_pde; | ||
504 | int write_count; | 505 | int write_count; |
505 | }; | 506 | }; |
506 | 507 | ||
507 | struct kvm_arch_memory_slot { | 508 | struct kvm_arch_memory_slot { |
509 | unsigned long *rmap[KVM_NR_PAGE_SIZES]; | ||
508 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; | 510 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; |
509 | }; | 511 | }; |
510 | 512 | ||
513 | struct kvm_apic_map { | ||
514 | struct rcu_head rcu; | ||
515 | u8 ldr_bits; | ||
516 | /* fields bellow are used to decode ldr values in different modes */ | ||
517 | u32 cid_shift, cid_mask, lid_mask; | ||
518 | struct kvm_lapic *phys_map[256]; | ||
519 | /* first index is cluster id second is cpu id in a cluster */ | ||
520 | struct kvm_lapic *logical_map[16][16]; | ||
521 | }; | ||
522 | |||
511 | struct kvm_arch { | 523 | struct kvm_arch { |
512 | unsigned int n_used_mmu_pages; | 524 | unsigned int n_used_mmu_pages; |
513 | unsigned int n_requested_mmu_pages; | 525 | unsigned int n_requested_mmu_pages; |
@@ -525,6 +537,8 @@ struct kvm_arch { | |||
525 | struct kvm_ioapic *vioapic; | 537 | struct kvm_ioapic *vioapic; |
526 | struct kvm_pit *vpit; | 538 | struct kvm_pit *vpit; |
527 | int vapics_in_nmi_mode; | 539 | int vapics_in_nmi_mode; |
540 | struct mutex apic_map_lock; | ||
541 | struct kvm_apic_map *apic_map; | ||
528 | 542 | ||
529 | unsigned int tss_addr; | 543 | unsigned int tss_addr; |
530 | struct page *apic_access_page; | 544 | struct page *apic_access_page; |
@@ -618,8 +632,7 @@ struct kvm_x86_ops { | |||
618 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | 632 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
619 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | 633 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
620 | 634 | ||
621 | void (*set_guest_debug)(struct kvm_vcpu *vcpu, | 635 | void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu); |
622 | struct kvm_guest_debug *dbg); | ||
623 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); | 636 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); |
624 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | 637 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); |
625 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | 638 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
@@ -957,6 +970,7 @@ extern bool kvm_rebooting; | |||
957 | 970 | ||
958 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 971 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
959 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | 972 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); |
973 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); | ||
960 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); | 974 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); |
961 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | 975 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
962 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | 976 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 2f7712e08b1e..eb3e9d85e1f1 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
@@ -102,21 +102,21 @@ struct kvm_vcpu_pv_apf_data { | |||
102 | extern void kvmclock_init(void); | 102 | extern void kvmclock_init(void); |
103 | extern int kvm_register_clock(char *txt); | 103 | extern int kvm_register_clock(char *txt); |
104 | 104 | ||
105 | #ifdef CONFIG_KVM_CLOCK | 105 | #ifdef CONFIG_KVM_GUEST |
106 | bool kvm_check_and_clear_guest_paused(void); | 106 | bool kvm_check_and_clear_guest_paused(void); |
107 | #else | 107 | #else |
108 | static inline bool kvm_check_and_clear_guest_paused(void) | 108 | static inline bool kvm_check_and_clear_guest_paused(void) |
109 | { | 109 | { |
110 | return false; | 110 | return false; |
111 | } | 111 | } |
112 | #endif /* CONFIG_KVMCLOCK */ | 112 | #endif /* CONFIG_KVM_GUEST */ |
113 | 113 | ||
114 | /* This instruction is vmcall. On non-VT architectures, it will generate a | 114 | /* This instruction is vmcall. On non-VT architectures, it will generate a |
115 | * trap that we will then rewrite to the appropriate instruction. | 115 | * trap that we will then rewrite to the appropriate instruction. |
116 | */ | 116 | */ |
117 | #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" | 117 | #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" |
118 | 118 | ||
119 | /* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun | 119 | /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall |
120 | * instruction. The hypervisor may replace it with something else but only the | 120 | * instruction. The hypervisor may replace it with something else but only the |
121 | * instructions are guaranteed to be supported. | 121 | * instructions are guaranteed to be supported. |
122 | * | 122 | * |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index a3ac52b29cbf..54d73b1f00a0 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -116,19 +116,9 @@ struct mce_log { | |||
116 | /* Software defined banks */ | 116 | /* Software defined banks */ |
117 | #define MCE_EXTENDED_BANK 128 | 117 | #define MCE_EXTENDED_BANK 128 |
118 | #define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 | 118 | #define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 |
119 | 119 | #define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) | |
120 | #define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */ | ||
121 | #define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9) | ||
122 | #define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9) | ||
123 | #define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9) | ||
124 | #define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9) | ||
125 | #define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9) | ||
126 | #define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) | ||
127 | #define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) | ||
128 | |||
129 | 120 | ||
130 | #ifdef __KERNEL__ | 121 | #ifdef __KERNEL__ |
131 | |||
132 | extern void mce_register_decode_chain(struct notifier_block *nb); | 122 | extern void mce_register_decode_chain(struct notifier_block *nb); |
133 | extern void mce_unregister_decode_chain(struct notifier_block *nb); | 123 | extern void mce_unregister_decode_chain(struct notifier_block *nb); |
134 | 124 | ||
@@ -171,6 +161,7 @@ DECLARE_PER_CPU(struct device *, mce_device); | |||
171 | #ifdef CONFIG_X86_MCE_INTEL | 161 | #ifdef CONFIG_X86_MCE_INTEL |
172 | extern int mce_cmci_disabled; | 162 | extern int mce_cmci_disabled; |
173 | extern int mce_ignore_ce; | 163 | extern int mce_ignore_ce; |
164 | extern int mce_bios_cmci_threshold; | ||
174 | void mce_intel_feature_init(struct cpuinfo_x86 *c); | 165 | void mce_intel_feature_init(struct cpuinfo_x86 *c); |
175 | void cmci_clear(void); | 166 | void cmci_clear(void); |
176 | void cmci_reenable(void); | 167 | void cmci_reenable(void); |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 4ebe157bf73d..43d921b4752c 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -15,8 +15,8 @@ struct microcode_ops { | |||
15 | enum ucode_state (*request_microcode_user) (int cpu, | 15 | enum ucode_state (*request_microcode_user) (int cpu, |
16 | const void __user *buf, size_t size); | 16 | const void __user *buf, size_t size); |
17 | 17 | ||
18 | enum ucode_state (*request_microcode_fw) (int cpu, | 18 | enum ucode_state (*request_microcode_fw) (int cpu, struct device *, |
19 | struct device *device); | 19 | bool refresh_fw); |
20 | 20 | ||
21 | void (*microcode_fini_cpu) (int cpu); | 21 | void (*microcode_fini_cpu) (int cpu); |
22 | 22 | ||
@@ -49,12 +49,6 @@ static inline struct microcode_ops * __init init_intel_microcode(void) | |||
49 | #ifdef CONFIG_MICROCODE_AMD | 49 | #ifdef CONFIG_MICROCODE_AMD |
50 | extern struct microcode_ops * __init init_amd_microcode(void); | 50 | extern struct microcode_ops * __init init_amd_microcode(void); |
51 | extern void __exit exit_amd_microcode(void); | 51 | extern void __exit exit_amd_microcode(void); |
52 | |||
53 | static inline void get_ucode_data(void *to, const u8 *from, size_t n) | ||
54 | { | ||
55 | memcpy(to, from, n); | ||
56 | } | ||
57 | |||
58 | #else | 52 | #else |
59 | static inline struct microcode_ops * __init init_amd_microcode(void) | 53 | static inline struct microcode_ops * __init init_amd_microcode(void) |
60 | { | 54 | { |
diff --git a/arch/x86/include/asm/mmzone.h b/arch/x86/include/asm/mmzone.h index 64217ea16a36..d497bc425cae 100644 --- a/arch/x86/include/asm/mmzone.h +++ b/arch/x86/include/asm/mmzone.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifdef CONFIG_X86_32 |
2 | # include "mmzone_32.h" | 2 | # include <asm/mmzone_32.h> |
3 | #else | 3 | #else |
4 | # include "mmzone_64.h" | 4 | # include <asm/mmzone_64.h> |
5 | #endif | 5 | #endif |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 957ec87385af..7f0edceb7563 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -121,6 +121,11 @@ | |||
121 | #define MSR_P6_EVNTSEL0 0x00000186 | 121 | #define MSR_P6_EVNTSEL0 0x00000186 |
122 | #define MSR_P6_EVNTSEL1 0x00000187 | 122 | #define MSR_P6_EVNTSEL1 0x00000187 |
123 | 123 | ||
124 | #define MSR_KNC_PERFCTR0 0x00000020 | ||
125 | #define MSR_KNC_PERFCTR1 0x00000021 | ||
126 | #define MSR_KNC_EVNTSEL0 0x00000028 | ||
127 | #define MSR_KNC_EVNTSEL1 0x00000029 | ||
128 | |||
124 | /* AMD64 MSRs. Not complete. See the architecture manual for a more | 129 | /* AMD64 MSRs. Not complete. See the architecture manual for a more |
125 | complete list. */ | 130 | complete list. */ |
126 | 131 | ||
@@ -248,6 +253,9 @@ | |||
248 | 253 | ||
249 | #define MSR_IA32_PERF_STATUS 0x00000198 | 254 | #define MSR_IA32_PERF_STATUS 0x00000198 |
250 | #define MSR_IA32_PERF_CTL 0x00000199 | 255 | #define MSR_IA32_PERF_CTL 0x00000199 |
256 | #define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 | ||
257 | #define MSR_AMD_PERF_STATUS 0xc0010063 | ||
258 | #define MSR_AMD_PERF_CTL 0xc0010062 | ||
251 | 259 | ||
252 | #define MSR_IA32_MPERF 0x000000e7 | 260 | #define MSR_IA32_MPERF 0x000000e7 |
253 | #define MSR_IA32_APERF 0x000000e8 | 261 | #define MSR_IA32_APERF 0x000000e8 |
diff --git a/arch/x86/include/asm/mutex.h b/arch/x86/include/asm/mutex.h index a731b9c573a6..7d3a48275394 100644 --- a/arch/x86/include/asm/mutex.h +++ b/arch/x86/include/asm/mutex.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifdef CONFIG_X86_32 |
2 | # include "mutex_32.h" | 2 | # include <asm/mutex_32.h> |
3 | #else | 3 | #else |
4 | # include "mutex_64.h" | 4 | # include <asm/mutex_64.h> |
5 | #endif | 5 | #endif |
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index bfacd2ccf651..49119fcea2dc 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h | |||
@@ -53,9 +53,9 @@ static inline int numa_cpu_node(int cpu) | |||
53 | #endif /* CONFIG_NUMA */ | 53 | #endif /* CONFIG_NUMA */ |
54 | 54 | ||
55 | #ifdef CONFIG_X86_32 | 55 | #ifdef CONFIG_X86_32 |
56 | # include "numa_32.h" | 56 | # include <asm/numa_32.h> |
57 | #else | 57 | #else |
58 | # include "numa_64.h" | 58 | # include <asm/numa_64.h> |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #ifdef CONFIG_NUMA | 61 | #ifdef CONFIG_NUMA |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index df75d07571ce..6e41b9343928 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -141,7 +141,7 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq); | |||
141 | #endif /* __KERNEL__ */ | 141 | #endif /* __KERNEL__ */ |
142 | 142 | ||
143 | #ifdef CONFIG_X86_64 | 143 | #ifdef CONFIG_X86_64 |
144 | #include "pci_64.h" | 144 | #include <asm/pci_64.h> |
145 | #endif | 145 | #endif |
146 | 146 | ||
147 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | 147 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index cb4e43bce98a..4fabcdf1cfa7 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -262,4 +262,6 @@ static inline void perf_check_microcode(void) { } | |||
262 | static inline void amd_pmu_disable_virt(void) { } | 262 | static inline void amd_pmu_disable_virt(void) { } |
263 | #endif | 263 | #endif |
264 | 264 | ||
265 | #define arch_perf_out_copy_user copy_from_user_nmi | ||
266 | |||
265 | #endif /* _ASM_X86_PERF_EVENT_H */ | 267 | #endif /* _ASM_X86_PERF_EVENT_H */ |
diff --git a/arch/x86/include/asm/perf_regs.h b/arch/x86/include/asm/perf_regs.h new file mode 100644 index 000000000000..3f2207bfd17b --- /dev/null +++ b/arch/x86/include/asm/perf_regs.h | |||
@@ -0,0 +1,33 @@ | |||
1 | #ifndef _ASM_X86_PERF_REGS_H | ||
2 | #define _ASM_X86_PERF_REGS_H | ||
3 | |||
4 | enum perf_event_x86_regs { | ||
5 | PERF_REG_X86_AX, | ||
6 | PERF_REG_X86_BX, | ||
7 | PERF_REG_X86_CX, | ||
8 | PERF_REG_X86_DX, | ||
9 | PERF_REG_X86_SI, | ||
10 | PERF_REG_X86_DI, | ||
11 | PERF_REG_X86_BP, | ||
12 | PERF_REG_X86_SP, | ||
13 | PERF_REG_X86_IP, | ||
14 | PERF_REG_X86_FLAGS, | ||
15 | PERF_REG_X86_CS, | ||
16 | PERF_REG_X86_SS, | ||
17 | PERF_REG_X86_DS, | ||
18 | PERF_REG_X86_ES, | ||
19 | PERF_REG_X86_FS, | ||
20 | PERF_REG_X86_GS, | ||
21 | PERF_REG_X86_R8, | ||
22 | PERF_REG_X86_R9, | ||
23 | PERF_REG_X86_R10, | ||
24 | PERF_REG_X86_R11, | ||
25 | PERF_REG_X86_R12, | ||
26 | PERF_REG_X86_R13, | ||
27 | PERF_REG_X86_R14, | ||
28 | PERF_REG_X86_R15, | ||
29 | |||
30 | PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1, | ||
31 | PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1, | ||
32 | }; | ||
33 | #endif /* _ASM_X86_PERF_REGS_H */ | ||
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 49afb3f41eb6..a1f780d45f76 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -146,8 +146,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd) | |||
146 | 146 | ||
147 | static inline int pmd_large(pmd_t pte) | 147 | static inline int pmd_large(pmd_t pte) |
148 | { | 148 | { |
149 | return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == | 149 | return pmd_flags(pte) & _PAGE_PSE; |
150 | (_PAGE_PSE | _PAGE_PRESENT); | ||
151 | } | 150 | } |
152 | 151 | ||
153 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 152 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -384,9 +383,9 @@ pte_t *populate_extra_pte(unsigned long vaddr); | |||
384 | #endif /* __ASSEMBLY__ */ | 383 | #endif /* __ASSEMBLY__ */ |
385 | 384 | ||
386 | #ifdef CONFIG_X86_32 | 385 | #ifdef CONFIG_X86_32 |
387 | # include "pgtable_32.h" | 386 | # include <asm/pgtable_32.h> |
388 | #else | 387 | #else |
389 | # include "pgtable_64.h" | 388 | # include <asm/pgtable_64.h> |
390 | #endif | 389 | #endif |
391 | 390 | ||
392 | #ifndef __ASSEMBLY__ | 391 | #ifndef __ASSEMBLY__ |
@@ -415,7 +414,13 @@ static inline int pte_hidden(pte_t pte) | |||
415 | 414 | ||
416 | static inline int pmd_present(pmd_t pmd) | 415 | static inline int pmd_present(pmd_t pmd) |
417 | { | 416 | { |
418 | return pmd_flags(pmd) & _PAGE_PRESENT; | 417 | /* |
418 | * Checking for _PAGE_PSE is needed too because | ||
419 | * split_huge_page will temporarily clear the present bit (but | ||
420 | * the _PAGE_PSE flag will remain set at all times while the | ||
421 | * _PAGE_PRESENT bit is clear). | ||
422 | */ | ||
423 | return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); | ||
419 | } | 424 | } |
420 | 425 | ||
421 | static inline int pmd_none(pmd_t pmd) | 426 | static inline int pmd_none(pmd_t pmd) |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 0c92113c4cb6..8faa215a503e 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -71,6 +71,7 @@ do { \ | |||
71 | * tables contain all the necessary information. | 71 | * tables contain all the necessary information. |
72 | */ | 72 | */ |
73 | #define update_mmu_cache(vma, address, ptep) do { } while (0) | 73 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
74 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
74 | 75 | ||
75 | #endif /* !__ASSEMBLY__ */ | 76 | #endif /* !__ASSEMBLY__ */ |
76 | 77 | ||
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 8251be02301e..47356f9df82e 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -143,6 +143,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } | |||
143 | #define pte_unmap(pte) ((void)(pte))/* NOP */ | 143 | #define pte_unmap(pte) ((void)(pte))/* NOP */ |
144 | 144 | ||
145 | #define update_mmu_cache(vma, address, ptep) do { } while (0) | 145 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
146 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
146 | 147 | ||
147 | /* Encode and de-code a swap entry */ | 148 | /* Encode and de-code a swap entry */ |
148 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE | 149 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index db8fec6d2953..ec8a1fc9505d 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -174,9 +174,9 @@ | |||
174 | #endif | 174 | #endif |
175 | 175 | ||
176 | #ifdef CONFIG_X86_32 | 176 | #ifdef CONFIG_X86_32 |
177 | # include "pgtable_32_types.h" | 177 | # include <asm/pgtable_32_types.h> |
178 | #else | 178 | #else |
179 | # include "pgtable_64_types.h" | 179 | # include <asm/pgtable_64_types.h> |
180 | #endif | 180 | #endif |
181 | 181 | ||
182 | #ifndef __ASSEMBLY__ | 182 | #ifndef __ASSEMBLY__ |
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h index 7ef7c3020e5c..bad3665c25fc 100644 --- a/arch/x86/include/asm/posix_types.h +++ b/arch/x86/include/asm/posix_types.h | |||
@@ -1,15 +1,15 @@ | |||
1 | #ifdef __KERNEL__ | 1 | #ifdef __KERNEL__ |
2 | # ifdef CONFIG_X86_32 | 2 | # ifdef CONFIG_X86_32 |
3 | # include "posix_types_32.h" | 3 | # include <asm/posix_types_32.h> |
4 | # else | 4 | # else |
5 | # include "posix_types_64.h" | 5 | # include <asm/posix_types_64.h> |
6 | # endif | 6 | # endif |
7 | #else | 7 | #else |
8 | # ifdef __i386__ | 8 | # ifdef __i386__ |
9 | # include "posix_types_32.h" | 9 | # include <asm/posix_types_32.h> |
10 | # elif defined(__ILP32__) | 10 | # elif defined(__ILP32__) |
11 | # include "posix_types_x32.h" | 11 | # include <asm/posix_types_x32.h> |
12 | # else | 12 | # else |
13 | # include "posix_types_64.h" | 13 | # include <asm/posix_types_64.h> |
14 | # endif | 14 | # endif |
15 | #endif | 15 | #endif |
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h index aea1d1d848c7..680cf09ed100 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h | |||
@@ -65,6 +65,7 @@ | |||
65 | #define X86_CR4_PCIDE 0x00020000 /* enable PCID support */ | 65 | #define X86_CR4_PCIDE 0x00020000 /* enable PCID support */ |
66 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ | 66 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ |
67 | #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ | 67 | #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ |
68 | #define X86_CR4_SMAP 0x00200000 /* enable SMAP support */ | ||
68 | 69 | ||
69 | /* | 70 | /* |
70 | * x86-64 Task Priority Register, CR8 | 71 | * x86-64 Task Priority Register, CR8 |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index d048cad9bcad..ad1fc8511674 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -423,7 +423,6 @@ DECLARE_INIT_PER_CPU(irq_stack_union); | |||
423 | 423 | ||
424 | DECLARE_PER_CPU(char *, irq_stack_ptr); | 424 | DECLARE_PER_CPU(char *, irq_stack_ptr); |
425 | DECLARE_PER_CPU(unsigned int, irq_count); | 425 | DECLARE_PER_CPU(unsigned int, irq_count); |
426 | extern unsigned long kernel_eflags; | ||
427 | extern asmlinkage void ignore_sysret(void); | 426 | extern asmlinkage void ignore_sysret(void); |
428 | #else /* X86_64 */ | 427 | #else /* X86_64 */ |
429 | #ifdef CONFIG_CC_STACKPROTECTOR | 428 | #ifdef CONFIG_CC_STACKPROTECTOR |
@@ -589,11 +588,6 @@ typedef struct { | |||
589 | } mm_segment_t; | 588 | } mm_segment_t; |
590 | 589 | ||
591 | 590 | ||
592 | /* | ||
593 | * create a kernel thread without removing it from tasklists | ||
594 | */ | ||
595 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
596 | |||
597 | /* Free all resources held by a thread. */ | 591 | /* Free all resources held by a thread. */ |
598 | extern void release_thread(struct task_struct *); | 592 | extern void release_thread(struct task_struct *); |
599 | 593 | ||
@@ -759,6 +753,8 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr) | |||
759 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | 753 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); |
760 | } | 754 | } |
761 | 755 | ||
756 | extern void set_task_blockstep(struct task_struct *task, bool on); | ||
757 | |||
762 | /* | 758 | /* |
763 | * from system description table in BIOS. Mostly for MCA use, but | 759 | * from system description table in BIOS. Mostly for MCA use, but |
764 | * others may find it useful: | 760 | * others may find it useful: |
diff --git a/arch/x86/include/asm/rcu.h b/arch/x86/include/asm/rcu.h new file mode 100644 index 000000000000..d1ac07a23979 --- /dev/null +++ b/arch/x86/include/asm/rcu.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef _ASM_X86_RCU_H | ||
2 | #define _ASM_X86_RCU_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | #include <linux/rcupdate.h> | ||
7 | #include <asm/ptrace.h> | ||
8 | |||
9 | static inline void exception_enter(struct pt_regs *regs) | ||
10 | { | ||
11 | rcu_user_exit(); | ||
12 | } | ||
13 | |||
14 | static inline void exception_exit(struct pt_regs *regs) | ||
15 | { | ||
16 | #ifdef CONFIG_RCU_USER_QS | ||
17 | if (user_mode(regs)) | ||
18 | rcu_user_enter(); | ||
19 | #endif | ||
20 | } | ||
21 | |||
22 | #else /* __ASSEMBLY__ */ | ||
23 | |||
24 | #ifdef CONFIG_RCU_USER_QS | ||
25 | # define SCHEDULE_USER call schedule_user | ||
26 | #else | ||
27 | # define SCHEDULE_USER call schedule | ||
28 | #endif | ||
29 | |||
30 | #endif /* !__ASSEMBLY__ */ | ||
31 | |||
32 | #endif | ||
diff --git a/arch/x86/include/asm/seccomp.h b/arch/x86/include/asm/seccomp.h index c62e58a5a90d..0f3d7f099224 100644 --- a/arch/x86/include/asm/seccomp.h +++ b/arch/x86/include/asm/seccomp.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifdef CONFIG_X86_32 |
2 | # include "seccomp_32.h" | 2 | # include <asm/seccomp_32.h> |
3 | #else | 3 | #else |
4 | # include "seccomp_64.h" | 4 | # include <asm/seccomp_64.h> |
5 | #endif | 5 | #endif |
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 598457cbd0f8..323973f4abf1 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h | |||
@@ -31,6 +31,10 @@ typedef struct { | |||
31 | unsigned long sig[_NSIG_WORDS]; | 31 | unsigned long sig[_NSIG_WORDS]; |
32 | } sigset_t; | 32 | } sigset_t; |
33 | 33 | ||
34 | #ifndef CONFIG_COMPAT | ||
35 | typedef sigset_t compat_sigset_t; | ||
36 | #endif | ||
37 | |||
34 | #else | 38 | #else |
35 | /* Here we must cater to libcs that poke about in kernel headers. */ | 39 | /* Here we must cater to libcs that poke about in kernel headers. */ |
36 | 40 | ||
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h new file mode 100644 index 000000000000..8d3120f4e270 --- /dev/null +++ b/arch/x86/include/asm/smap.h | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Supervisor Mode Access Prevention support | ||
3 | * | ||
4 | * Copyright (C) 2012 Intel Corporation | ||
5 | * Author: H. Peter Anvin <hpa@linux.intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; version 2 | ||
10 | * of the License. | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_X86_SMAP_H | ||
14 | #define _ASM_X86_SMAP_H | ||
15 | |||
16 | #include <linux/stringify.h> | ||
17 | #include <asm/nops.h> | ||
18 | #include <asm/cpufeature.h> | ||
19 | |||
20 | /* "Raw" instruction opcodes */ | ||
21 | #define __ASM_CLAC .byte 0x0f,0x01,0xca | ||
22 | #define __ASM_STAC .byte 0x0f,0x01,0xcb | ||
23 | |||
24 | #ifdef __ASSEMBLY__ | ||
25 | |||
26 | #include <asm/alternative-asm.h> | ||
27 | |||
28 | #ifdef CONFIG_X86_SMAP | ||
29 | |||
30 | #define ASM_CLAC \ | ||
31 | 661: ASM_NOP3 ; \ | ||
32 | .pushsection .altinstr_replacement, "ax" ; \ | ||
33 | 662: __ASM_CLAC ; \ | ||
34 | .popsection ; \ | ||
35 | .pushsection .altinstructions, "a" ; \ | ||
36 | altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \ | ||
37 | .popsection | ||
38 | |||
39 | #define ASM_STAC \ | ||
40 | 661: ASM_NOP3 ; \ | ||
41 | .pushsection .altinstr_replacement, "ax" ; \ | ||
42 | 662: __ASM_STAC ; \ | ||
43 | .popsection ; \ | ||
44 | .pushsection .altinstructions, "a" ; \ | ||
45 | altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \ | ||
46 | .popsection | ||
47 | |||
48 | #else /* CONFIG_X86_SMAP */ | ||
49 | |||
50 | #define ASM_CLAC | ||
51 | #define ASM_STAC | ||
52 | |||
53 | #endif /* CONFIG_X86_SMAP */ | ||
54 | |||
55 | #else /* __ASSEMBLY__ */ | ||
56 | |||
57 | #include <asm/alternative.h> | ||
58 | |||
59 | #ifdef CONFIG_X86_SMAP | ||
60 | |||
61 | static __always_inline void clac(void) | ||
62 | { | ||
63 | /* Note: a barrier is implicit in alternative() */ | ||
64 | alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP); | ||
65 | } | ||
66 | |||
67 | static __always_inline void stac(void) | ||
68 | { | ||
69 | /* Note: a barrier is implicit in alternative() */ | ||
70 | alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP); | ||
71 | } | ||
72 | |||
73 | /* These macros can be used in asm() statements */ | ||
74 | #define ASM_CLAC \ | ||
75 | ALTERNATIVE(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP) | ||
76 | #define ASM_STAC \ | ||
77 | ALTERNATIVE(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP) | ||
78 | |||
79 | #else /* CONFIG_X86_SMAP */ | ||
80 | |||
81 | static inline void clac(void) { } | ||
82 | static inline void stac(void) { } | ||
83 | |||
84 | #define ASM_CLAC | ||
85 | #define ASM_STAC | ||
86 | |||
87 | #endif /* CONFIG_X86_SMAP */ | ||
88 | |||
89 | #endif /* __ASSEMBLY__ */ | ||
90 | |||
91 | #endif /* _ASM_X86_SMAP_H */ | ||
diff --git a/arch/x86/include/asm/string.h b/arch/x86/include/asm/string.h index 6dfd6d9373a0..09224d7a5862 100644 --- a/arch/x86/include/asm/string.h +++ b/arch/x86/include/asm/string.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifdef CONFIG_X86_32 |
2 | # include "string_32.h" | 2 | # include <asm/string_32.h> |
3 | #else | 3 | #else |
4 | # include "string_64.h" | 4 | # include <asm/string_64.h> |
5 | #endif | 5 | #endif |
diff --git a/arch/x86/include/asm/suspend.h b/arch/x86/include/asm/suspend.h index 9bd521fe4570..2fab6c2c3575 100644 --- a/arch/x86/include/asm/suspend.h +++ b/arch/x86/include/asm/suspend.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifdef CONFIG_X86_32 |
2 | # include "suspend_32.h" | 2 | # include <asm/suspend_32.h> |
3 | #else | 3 | #else |
4 | # include "suspend_64.h" | 4 | # include <asm/suspend_64.h> |
5 | #endif | 5 | #endif |
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index f2b83bc7d784..cdf5674dd23a 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h | |||
@@ -1,6 +1,135 @@ | |||
1 | #ifndef __SVM_H | 1 | #ifndef __SVM_H |
2 | #define __SVM_H | 2 | #define __SVM_H |
3 | 3 | ||
4 | #define SVM_EXIT_READ_CR0 0x000 | ||
5 | #define SVM_EXIT_READ_CR3 0x003 | ||
6 | #define SVM_EXIT_READ_CR4 0x004 | ||
7 | #define SVM_EXIT_READ_CR8 0x008 | ||
8 | #define SVM_EXIT_WRITE_CR0 0x010 | ||
9 | #define SVM_EXIT_WRITE_CR3 0x013 | ||
10 | #define SVM_EXIT_WRITE_CR4 0x014 | ||
11 | #define SVM_EXIT_WRITE_CR8 0x018 | ||
12 | #define SVM_EXIT_READ_DR0 0x020 | ||
13 | #define SVM_EXIT_READ_DR1 0x021 | ||
14 | #define SVM_EXIT_READ_DR2 0x022 | ||
15 | #define SVM_EXIT_READ_DR3 0x023 | ||
16 | #define SVM_EXIT_READ_DR4 0x024 | ||
17 | #define SVM_EXIT_READ_DR5 0x025 | ||
18 | #define SVM_EXIT_READ_DR6 0x026 | ||
19 | #define SVM_EXIT_READ_DR7 0x027 | ||
20 | #define SVM_EXIT_WRITE_DR0 0x030 | ||
21 | #define SVM_EXIT_WRITE_DR1 0x031 | ||
22 | #define SVM_EXIT_WRITE_DR2 0x032 | ||
23 | #define SVM_EXIT_WRITE_DR3 0x033 | ||
24 | #define SVM_EXIT_WRITE_DR4 0x034 | ||
25 | #define SVM_EXIT_WRITE_DR5 0x035 | ||
26 | #define SVM_EXIT_WRITE_DR6 0x036 | ||
27 | #define SVM_EXIT_WRITE_DR7 0x037 | ||
28 | #define SVM_EXIT_EXCP_BASE 0x040 | ||
29 | #define SVM_EXIT_INTR 0x060 | ||
30 | #define SVM_EXIT_NMI 0x061 | ||
31 | #define SVM_EXIT_SMI 0x062 | ||
32 | #define SVM_EXIT_INIT 0x063 | ||
33 | #define SVM_EXIT_VINTR 0x064 | ||
34 | #define SVM_EXIT_CR0_SEL_WRITE 0x065 | ||
35 | #define SVM_EXIT_IDTR_READ 0x066 | ||
36 | #define SVM_EXIT_GDTR_READ 0x067 | ||
37 | #define SVM_EXIT_LDTR_READ 0x068 | ||
38 | #define SVM_EXIT_TR_READ 0x069 | ||
39 | #define SVM_EXIT_IDTR_WRITE 0x06a | ||
40 | #define SVM_EXIT_GDTR_WRITE 0x06b | ||
41 | #define SVM_EXIT_LDTR_WRITE 0x06c | ||
42 | #define SVM_EXIT_TR_WRITE 0x06d | ||
43 | #define SVM_EXIT_RDTSC 0x06e | ||
44 | #define SVM_EXIT_RDPMC 0x06f | ||
45 | #define SVM_EXIT_PUSHF 0x070 | ||
46 | #define SVM_EXIT_POPF 0x071 | ||
47 | #define SVM_EXIT_CPUID 0x072 | ||
48 | #define SVM_EXIT_RSM 0x073 | ||
49 | #define SVM_EXIT_IRET 0x074 | ||
50 | #define SVM_EXIT_SWINT 0x075 | ||
51 | #define SVM_EXIT_INVD 0x076 | ||
52 | #define SVM_EXIT_PAUSE 0x077 | ||
53 | #define SVM_EXIT_HLT 0x078 | ||
54 | #define SVM_EXIT_INVLPG 0x079 | ||
55 | #define SVM_EXIT_INVLPGA 0x07a | ||
56 | #define SVM_EXIT_IOIO 0x07b | ||
57 | #define SVM_EXIT_MSR 0x07c | ||
58 | #define SVM_EXIT_TASK_SWITCH 0x07d | ||
59 | #define SVM_EXIT_FERR_FREEZE 0x07e | ||
60 | #define SVM_EXIT_SHUTDOWN 0x07f | ||
61 | #define SVM_EXIT_VMRUN 0x080 | ||
62 | #define SVM_EXIT_VMMCALL 0x081 | ||
63 | #define SVM_EXIT_VMLOAD 0x082 | ||
64 | #define SVM_EXIT_VMSAVE 0x083 | ||
65 | #define SVM_EXIT_STGI 0x084 | ||
66 | #define SVM_EXIT_CLGI 0x085 | ||
67 | #define SVM_EXIT_SKINIT 0x086 | ||
68 | #define SVM_EXIT_RDTSCP 0x087 | ||
69 | #define SVM_EXIT_ICEBP 0x088 | ||
70 | #define SVM_EXIT_WBINVD 0x089 | ||
71 | #define SVM_EXIT_MONITOR 0x08a | ||
72 | #define SVM_EXIT_MWAIT 0x08b | ||
73 | #define SVM_EXIT_MWAIT_COND 0x08c | ||
74 | #define SVM_EXIT_XSETBV 0x08d | ||
75 | #define SVM_EXIT_NPF 0x400 | ||
76 | |||
77 | #define SVM_EXIT_ERR -1 | ||
78 | |||
79 | #define SVM_EXIT_REASONS \ | ||
80 | { SVM_EXIT_READ_CR0, "read_cr0" }, \ | ||
81 | { SVM_EXIT_READ_CR3, "read_cr3" }, \ | ||
82 | { SVM_EXIT_READ_CR4, "read_cr4" }, \ | ||
83 | { SVM_EXIT_READ_CR8, "read_cr8" }, \ | ||
84 | { SVM_EXIT_WRITE_CR0, "write_cr0" }, \ | ||
85 | { SVM_EXIT_WRITE_CR3, "write_cr3" }, \ | ||
86 | { SVM_EXIT_WRITE_CR4, "write_cr4" }, \ | ||
87 | { SVM_EXIT_WRITE_CR8, "write_cr8" }, \ | ||
88 | { SVM_EXIT_READ_DR0, "read_dr0" }, \ | ||
89 | { SVM_EXIT_READ_DR1, "read_dr1" }, \ | ||
90 | { SVM_EXIT_READ_DR2, "read_dr2" }, \ | ||
91 | { SVM_EXIT_READ_DR3, "read_dr3" }, \ | ||
92 | { SVM_EXIT_WRITE_DR0, "write_dr0" }, \ | ||
93 | { SVM_EXIT_WRITE_DR1, "write_dr1" }, \ | ||
94 | { SVM_EXIT_WRITE_DR2, "write_dr2" }, \ | ||
95 | { SVM_EXIT_WRITE_DR3, "write_dr3" }, \ | ||
96 | { SVM_EXIT_WRITE_DR5, "write_dr5" }, \ | ||
97 | { SVM_EXIT_WRITE_DR7, "write_dr7" }, \ | ||
98 | { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \ | ||
99 | { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \ | ||
100 | { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \ | ||
101 | { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \ | ||
102 | { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \ | ||
103 | { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \ | ||
104 | { SVM_EXIT_INTR, "interrupt" }, \ | ||
105 | { SVM_EXIT_NMI, "nmi" }, \ | ||
106 | { SVM_EXIT_SMI, "smi" }, \ | ||
107 | { SVM_EXIT_INIT, "init" }, \ | ||
108 | { SVM_EXIT_VINTR, "vintr" }, \ | ||
109 | { SVM_EXIT_CPUID, "cpuid" }, \ | ||
110 | { SVM_EXIT_INVD, "invd" }, \ | ||
111 | { SVM_EXIT_HLT, "hlt" }, \ | ||
112 | { SVM_EXIT_INVLPG, "invlpg" }, \ | ||
113 | { SVM_EXIT_INVLPGA, "invlpga" }, \ | ||
114 | { SVM_EXIT_IOIO, "io" }, \ | ||
115 | { SVM_EXIT_MSR, "msr" }, \ | ||
116 | { SVM_EXIT_TASK_SWITCH, "task_switch" }, \ | ||
117 | { SVM_EXIT_SHUTDOWN, "shutdown" }, \ | ||
118 | { SVM_EXIT_VMRUN, "vmrun" }, \ | ||
119 | { SVM_EXIT_VMMCALL, "hypercall" }, \ | ||
120 | { SVM_EXIT_VMLOAD, "vmload" }, \ | ||
121 | { SVM_EXIT_VMSAVE, "vmsave" }, \ | ||
122 | { SVM_EXIT_STGI, "stgi" }, \ | ||
123 | { SVM_EXIT_CLGI, "clgi" }, \ | ||
124 | { SVM_EXIT_SKINIT, "skinit" }, \ | ||
125 | { SVM_EXIT_WBINVD, "wbinvd" }, \ | ||
126 | { SVM_EXIT_MONITOR, "monitor" }, \ | ||
127 | { SVM_EXIT_MWAIT, "mwait" }, \ | ||
128 | { SVM_EXIT_XSETBV, "xsetbv" }, \ | ||
129 | { SVM_EXIT_NPF, "npf" } | ||
130 | |||
131 | #ifdef __KERNEL__ | ||
132 | |||
4 | enum { | 133 | enum { |
5 | INTERCEPT_INTR, | 134 | INTERCEPT_INTR, |
6 | INTERCEPT_NMI, | 135 | INTERCEPT_NMI, |
@@ -264,81 +393,6 @@ struct __attribute__ ((__packed__)) vmcb { | |||
264 | 393 | ||
265 | #define SVM_EXITINFO_REG_MASK 0x0F | 394 | #define SVM_EXITINFO_REG_MASK 0x0F |
266 | 395 | ||
267 | #define SVM_EXIT_READ_CR0 0x000 | ||
268 | #define SVM_EXIT_READ_CR3 0x003 | ||
269 | #define SVM_EXIT_READ_CR4 0x004 | ||
270 | #define SVM_EXIT_READ_CR8 0x008 | ||
271 | #define SVM_EXIT_WRITE_CR0 0x010 | ||
272 | #define SVM_EXIT_WRITE_CR3 0x013 | ||
273 | #define SVM_EXIT_WRITE_CR4 0x014 | ||
274 | #define SVM_EXIT_WRITE_CR8 0x018 | ||
275 | #define SVM_EXIT_READ_DR0 0x020 | ||
276 | #define SVM_EXIT_READ_DR1 0x021 | ||
277 | #define SVM_EXIT_READ_DR2 0x022 | ||
278 | #define SVM_EXIT_READ_DR3 0x023 | ||
279 | #define SVM_EXIT_READ_DR4 0x024 | ||
280 | #define SVM_EXIT_READ_DR5 0x025 | ||
281 | #define SVM_EXIT_READ_DR6 0x026 | ||
282 | #define SVM_EXIT_READ_DR7 0x027 | ||
283 | #define SVM_EXIT_WRITE_DR0 0x030 | ||
284 | #define SVM_EXIT_WRITE_DR1 0x031 | ||
285 | #define SVM_EXIT_WRITE_DR2 0x032 | ||
286 | #define SVM_EXIT_WRITE_DR3 0x033 | ||
287 | #define SVM_EXIT_WRITE_DR4 0x034 | ||
288 | #define SVM_EXIT_WRITE_DR5 0x035 | ||
289 | #define SVM_EXIT_WRITE_DR6 0x036 | ||
290 | #define SVM_EXIT_WRITE_DR7 0x037 | ||
291 | #define SVM_EXIT_EXCP_BASE 0x040 | ||
292 | #define SVM_EXIT_INTR 0x060 | ||
293 | #define SVM_EXIT_NMI 0x061 | ||
294 | #define SVM_EXIT_SMI 0x062 | ||
295 | #define SVM_EXIT_INIT 0x063 | ||
296 | #define SVM_EXIT_VINTR 0x064 | ||
297 | #define SVM_EXIT_CR0_SEL_WRITE 0x065 | ||
298 | #define SVM_EXIT_IDTR_READ 0x066 | ||
299 | #define SVM_EXIT_GDTR_READ 0x067 | ||
300 | #define SVM_EXIT_LDTR_READ 0x068 | ||
301 | #define SVM_EXIT_TR_READ 0x069 | ||
302 | #define SVM_EXIT_IDTR_WRITE 0x06a | ||
303 | #define SVM_EXIT_GDTR_WRITE 0x06b | ||
304 | #define SVM_EXIT_LDTR_WRITE 0x06c | ||
305 | #define SVM_EXIT_TR_WRITE 0x06d | ||
306 | #define SVM_EXIT_RDTSC 0x06e | ||
307 | #define SVM_EXIT_RDPMC 0x06f | ||
308 | #define SVM_EXIT_PUSHF 0x070 | ||
309 | #define SVM_EXIT_POPF 0x071 | ||
310 | #define SVM_EXIT_CPUID 0x072 | ||
311 | #define SVM_EXIT_RSM 0x073 | ||
312 | #define SVM_EXIT_IRET 0x074 | ||
313 | #define SVM_EXIT_SWINT 0x075 | ||
314 | #define SVM_EXIT_INVD 0x076 | ||
315 | #define SVM_EXIT_PAUSE 0x077 | ||
316 | #define SVM_EXIT_HLT 0x078 | ||
317 | #define SVM_EXIT_INVLPG 0x079 | ||
318 | #define SVM_EXIT_INVLPGA 0x07a | ||
319 | #define SVM_EXIT_IOIO 0x07b | ||
320 | #define SVM_EXIT_MSR 0x07c | ||
321 | #define SVM_EXIT_TASK_SWITCH 0x07d | ||
322 | #define SVM_EXIT_FERR_FREEZE 0x07e | ||
323 | #define SVM_EXIT_SHUTDOWN 0x07f | ||
324 | #define SVM_EXIT_VMRUN 0x080 | ||
325 | #define SVM_EXIT_VMMCALL 0x081 | ||
326 | #define SVM_EXIT_VMLOAD 0x082 | ||
327 | #define SVM_EXIT_VMSAVE 0x083 | ||
328 | #define SVM_EXIT_STGI 0x084 | ||
329 | #define SVM_EXIT_CLGI 0x085 | ||
330 | #define SVM_EXIT_SKINIT 0x086 | ||
331 | #define SVM_EXIT_RDTSCP 0x087 | ||
332 | #define SVM_EXIT_ICEBP 0x088 | ||
333 | #define SVM_EXIT_WBINVD 0x089 | ||
334 | #define SVM_EXIT_MONITOR 0x08a | ||
335 | #define SVM_EXIT_MWAIT 0x08b | ||
336 | #define SVM_EXIT_MWAIT_COND 0x08c | ||
337 | #define SVM_EXIT_XSETBV 0x08d | ||
338 | #define SVM_EXIT_NPF 0x400 | ||
339 | |||
340 | #define SVM_EXIT_ERR -1 | ||
341 | |||
342 | #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) | 396 | #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) |
343 | 397 | ||
344 | #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" | 398 | #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" |
@@ -350,3 +404,4 @@ struct __attribute__ ((__packed__)) vmcb { | |||
350 | 404 | ||
351 | #endif | 405 | #endif |
352 | 406 | ||
407 | #endif | ||
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index 3fda9db48819..a9a8cf3da49d 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h | |||
@@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, | |||
40 | struct old_sigaction32 __user *); | 40 | struct old_sigaction32 __user *); |
41 | asmlinkage long sys32_alarm(unsigned int); | 41 | asmlinkage long sys32_alarm(unsigned int); |
42 | 42 | ||
43 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); | 43 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); |
44 | asmlinkage long sys32_sysfs(int, u32, u32); | 44 | asmlinkage long sys32_sysfs(int, u32, u32); |
45 | 45 | ||
46 | asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, | 46 | asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, |
@@ -54,8 +54,6 @@ asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32); | |||
54 | asmlinkage long sys32_personality(unsigned long); | 54 | asmlinkage long sys32_personality(unsigned long); |
55 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); | 55 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); |
56 | 56 | ||
57 | asmlinkage long sys32_execve(const char __user *, compat_uptr_t __user *, | ||
58 | compat_uptr_t __user *, struct pt_regs *); | ||
59 | asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); | 57 | asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); |
60 | 58 | ||
61 | long sys32_lseek(unsigned int, int, unsigned int); | 59 | long sys32_lseek(unsigned int, int, unsigned int); |
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index f1d8b441fc77..2be0b880417e 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h | |||
@@ -25,7 +25,7 @@ int sys_fork(struct pt_regs *); | |||
25 | int sys_vfork(struct pt_regs *); | 25 | int sys_vfork(struct pt_regs *); |
26 | long sys_execve(const char __user *, | 26 | long sys_execve(const char __user *, |
27 | const char __user *const __user *, | 27 | const char __user *const __user *, |
28 | const char __user *const __user *, struct pt_regs *); | 28 | const char __user *const __user *); |
29 | long sys_clone(unsigned long, unsigned long, void __user *, | 29 | long sys_clone(unsigned long, unsigned long, void __user *, |
30 | void __user *, struct pt_regs *); | 30 | void __user *, struct pt_regs *); |
31 | 31 | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 89f794f007ec..2d946e63ee82 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -79,7 +79,6 @@ struct thread_info { | |||
79 | #define TIF_SIGPENDING 2 /* signal pending */ | 79 | #define TIF_SIGPENDING 2 /* signal pending */ |
80 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 80 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
81 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ | 81 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ |
82 | #define TIF_IRET 5 /* force IRET */ | ||
83 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ | 82 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ |
84 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 83 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
85 | #define TIF_SECCOMP 8 /* secure computing */ | 84 | #define TIF_SECCOMP 8 /* secure computing */ |
@@ -89,6 +88,7 @@ struct thread_info { | |||
89 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ | 88 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ |
90 | #define TIF_IA32 17 /* IA32 compatibility process */ | 89 | #define TIF_IA32 17 /* IA32 compatibility process */ |
91 | #define TIF_FORK 18 /* ret_from_fork */ | 90 | #define TIF_FORK 18 /* ret_from_fork */ |
91 | #define TIF_NOHZ 19 /* in adaptive nohz mode */ | ||
92 | #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ | 92 | #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ |
93 | #define TIF_DEBUG 21 /* uses debug registers */ | 93 | #define TIF_DEBUG 21 /* uses debug registers */ |
94 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ | 94 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ |
@@ -104,7 +104,6 @@ struct thread_info { | |||
104 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 104 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
105 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | 105 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
106 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 106 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
107 | #define _TIF_IRET (1 << TIF_IRET) | ||
108 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) | 107 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
109 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 108 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
110 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 109 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
@@ -114,6 +113,7 @@ struct thread_info { | |||
114 | #define _TIF_NOTSC (1 << TIF_NOTSC) | 113 | #define _TIF_NOTSC (1 << TIF_NOTSC) |
115 | #define _TIF_IA32 (1 << TIF_IA32) | 114 | #define _TIF_IA32 (1 << TIF_IA32) |
116 | #define _TIF_FORK (1 << TIF_FORK) | 115 | #define _TIF_FORK (1 << TIF_FORK) |
116 | #define _TIF_NOHZ (1 << TIF_NOHZ) | ||
117 | #define _TIF_DEBUG (1 << TIF_DEBUG) | 117 | #define _TIF_DEBUG (1 << TIF_DEBUG) |
118 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) | 118 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) |
119 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) | 119 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) |
@@ -126,12 +126,13 @@ struct thread_info { | |||
126 | /* work to do in syscall_trace_enter() */ | 126 | /* work to do in syscall_trace_enter() */ |
127 | #define _TIF_WORK_SYSCALL_ENTRY \ | 127 | #define _TIF_WORK_SYSCALL_ENTRY \ |
128 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ | 128 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ |
129 | _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) | 129 | _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \ |
130 | _TIF_NOHZ) | ||
130 | 131 | ||
131 | /* work to do in syscall_trace_leave() */ | 132 | /* work to do in syscall_trace_leave() */ |
132 | #define _TIF_WORK_SYSCALL_EXIT \ | 133 | #define _TIF_WORK_SYSCALL_EXIT \ |
133 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ | 134 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ |
134 | _TIF_SYSCALL_TRACEPOINT) | 135 | _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ) |
135 | 136 | ||
136 | /* work to do on interrupt/exception return */ | 137 | /* work to do on interrupt/exception return */ |
137 | #define _TIF_WORK_MASK \ | 138 | #define _TIF_WORK_MASK \ |
@@ -141,7 +142,8 @@ struct thread_info { | |||
141 | 142 | ||
142 | /* work to do on any return to user space */ | 143 | /* work to do on any return to user space */ |
143 | #define _TIF_ALLWORK_MASK \ | 144 | #define _TIF_ALLWORK_MASK \ |
144 | ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT) | 145 | ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ |
146 | _TIF_NOHZ) | ||
145 | 147 | ||
146 | /* Only used for 64 bit */ | 148 | /* Only used for 64 bit */ |
147 | #define _TIF_DO_NOTIFY_MASK \ | 149 | #define _TIF_DO_NOTIFY_MASK \ |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index e1f3a17034fc..7ccf8d131535 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/string.h> | 9 | #include <linux/string.h> |
10 | #include <asm/asm.h> | 10 | #include <asm/asm.h> |
11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
12 | #include <asm/smap.h> | ||
12 | 13 | ||
13 | #define VERIFY_READ 0 | 14 | #define VERIFY_READ 0 |
14 | #define VERIFY_WRITE 1 | 15 | #define VERIFY_WRITE 1 |
@@ -192,9 +193,10 @@ extern int __get_user_bad(void); | |||
192 | 193 | ||
193 | #ifdef CONFIG_X86_32 | 194 | #ifdef CONFIG_X86_32 |
194 | #define __put_user_asm_u64(x, addr, err, errret) \ | 195 | #define __put_user_asm_u64(x, addr, err, errret) \ |
195 | asm volatile("1: movl %%eax,0(%2)\n" \ | 196 | asm volatile(ASM_STAC "\n" \ |
197 | "1: movl %%eax,0(%2)\n" \ | ||
196 | "2: movl %%edx,4(%2)\n" \ | 198 | "2: movl %%edx,4(%2)\n" \ |
197 | "3:\n" \ | 199 | "3: " ASM_CLAC "\n" \ |
198 | ".section .fixup,\"ax\"\n" \ | 200 | ".section .fixup,\"ax\"\n" \ |
199 | "4: movl %3,%0\n" \ | 201 | "4: movl %3,%0\n" \ |
200 | " jmp 3b\n" \ | 202 | " jmp 3b\n" \ |
@@ -205,9 +207,10 @@ extern int __get_user_bad(void); | |||
205 | : "A" (x), "r" (addr), "i" (errret), "0" (err)) | 207 | : "A" (x), "r" (addr), "i" (errret), "0" (err)) |
206 | 208 | ||
207 | #define __put_user_asm_ex_u64(x, addr) \ | 209 | #define __put_user_asm_ex_u64(x, addr) \ |
208 | asm volatile("1: movl %%eax,0(%1)\n" \ | 210 | asm volatile(ASM_STAC "\n" \ |
211 | "1: movl %%eax,0(%1)\n" \ | ||
209 | "2: movl %%edx,4(%1)\n" \ | 212 | "2: movl %%edx,4(%1)\n" \ |
210 | "3:\n" \ | 213 | "3: " ASM_CLAC "\n" \ |
211 | _ASM_EXTABLE_EX(1b, 2b) \ | 214 | _ASM_EXTABLE_EX(1b, 2b) \ |
212 | _ASM_EXTABLE_EX(2b, 3b) \ | 215 | _ASM_EXTABLE_EX(2b, 3b) \ |
213 | : : "A" (x), "r" (addr)) | 216 | : : "A" (x), "r" (addr)) |
@@ -379,8 +382,9 @@ do { \ | |||
379 | } while (0) | 382 | } while (0) |
380 | 383 | ||
381 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | 384 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
382 | asm volatile("1: mov"itype" %2,%"rtype"1\n" \ | 385 | asm volatile(ASM_STAC "\n" \ |
383 | "2:\n" \ | 386 | "1: mov"itype" %2,%"rtype"1\n" \ |
387 | "2: " ASM_CLAC "\n" \ | ||
384 | ".section .fixup,\"ax\"\n" \ | 388 | ".section .fixup,\"ax\"\n" \ |
385 | "3: mov %3,%0\n" \ | 389 | "3: mov %3,%0\n" \ |
386 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | 390 | " xor"itype" %"rtype"1,%"rtype"1\n" \ |
@@ -443,8 +447,9 @@ struct __large_struct { unsigned long buf[100]; }; | |||
443 | * aliasing issues. | 447 | * aliasing issues. |
444 | */ | 448 | */ |
445 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | 449 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
446 | asm volatile("1: mov"itype" %"rtype"1,%2\n" \ | 450 | asm volatile(ASM_STAC "\n" \ |
447 | "2:\n" \ | 451 | "1: mov"itype" %"rtype"1,%2\n" \ |
452 | "2: " ASM_CLAC "\n" \ | ||
448 | ".section .fixup,\"ax\"\n" \ | 453 | ".section .fixup,\"ax\"\n" \ |
449 | "3: mov %3,%0\n" \ | 454 | "3: mov %3,%0\n" \ |
450 | " jmp 2b\n" \ | 455 | " jmp 2b\n" \ |
@@ -463,13 +468,13 @@ struct __large_struct { unsigned long buf[100]; }; | |||
463 | * uaccess_try and catch | 468 | * uaccess_try and catch |
464 | */ | 469 | */ |
465 | #define uaccess_try do { \ | 470 | #define uaccess_try do { \ |
466 | int prev_err = current_thread_info()->uaccess_err; \ | ||
467 | current_thread_info()->uaccess_err = 0; \ | 471 | current_thread_info()->uaccess_err = 0; \ |
472 | stac(); \ | ||
468 | barrier(); | 473 | barrier(); |
469 | 474 | ||
470 | #define uaccess_catch(err) \ | 475 | #define uaccess_catch(err) \ |
476 | clac(); \ | ||
471 | (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ | 477 | (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ |
472 | current_thread_info()->uaccess_err = prev_err; \ | ||
473 | } while (0) | 478 | } while (0) |
474 | 479 | ||
475 | /** | 480 | /** |
@@ -569,6 +574,9 @@ strncpy_from_user(char *dst, const char __user *src, long count); | |||
569 | extern __must_check long strlen_user(const char __user *str); | 574 | extern __must_check long strlen_user(const char __user *str); |
570 | extern __must_check long strnlen_user(const char __user *str, long n); | 575 | extern __must_check long strnlen_user(const char __user *str, long n); |
571 | 576 | ||
577 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); | ||
578 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); | ||
579 | |||
572 | /* | 580 | /* |
573 | * movsl can be slow when source and dest are not both 8-byte aligned | 581 | * movsl can be slow when source and dest are not both 8-byte aligned |
574 | */ | 582 | */ |
@@ -581,9 +589,9 @@ extern struct movsl_mask { | |||
581 | #define ARCH_HAS_NOCACHE_UACCESS 1 | 589 | #define ARCH_HAS_NOCACHE_UACCESS 1 |
582 | 590 | ||
583 | #ifdef CONFIG_X86_32 | 591 | #ifdef CONFIG_X86_32 |
584 | # include "uaccess_32.h" | 592 | # include <asm/uaccess_32.h> |
585 | #else | 593 | #else |
586 | # include "uaccess_64.h" | 594 | # include <asm/uaccess_64.h> |
587 | #endif | 595 | #endif |
588 | 596 | ||
589 | #endif /* _ASM_X86_UACCESS_H */ | 597 | #endif /* _ASM_X86_UACCESS_H */ |
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 576e39bca6ad..7f760a9f1f61 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h | |||
@@ -213,7 +213,4 @@ static inline unsigned long __must_check copy_from_user(void *to, | |||
213 | return n; | 213 | return n; |
214 | } | 214 | } |
215 | 215 | ||
216 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); | ||
217 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); | ||
218 | |||
219 | #endif /* _ASM_X86_UACCESS_32_H */ | 216 | #endif /* _ASM_X86_UACCESS_32_H */ |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index d8def8b3dba0..142810c457dc 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -217,9 +217,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | |||
217 | } | 217 | } |
218 | } | 218 | } |
219 | 219 | ||
220 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); | ||
221 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); | ||
222 | |||
223 | static __must_check __always_inline int | 220 | static __must_check __always_inline int |
224 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) | 221 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) |
225 | { | 222 | { |
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h index 0d9776e9e2dc..16f3fc6ebf2e 100644 --- a/arch/x86/include/asm/unistd.h +++ b/arch/x86/include/asm/unistd.h | |||
@@ -50,6 +50,7 @@ | |||
50 | # define __ARCH_WANT_SYS_TIME | 50 | # define __ARCH_WANT_SYS_TIME |
51 | # define __ARCH_WANT_SYS_UTIME | 51 | # define __ARCH_WANT_SYS_UTIME |
52 | # define __ARCH_WANT_SYS_WAITPID | 52 | # define __ARCH_WANT_SYS_WAITPID |
53 | # define __ARCH_WANT_SYS_EXECVE | ||
53 | 54 | ||
54 | /* | 55 | /* |
55 | * "Conditional" syscalls | 56 | * "Conditional" syscalls |
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index f3971bbcd1de..8ff8be7835ab 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h | |||
@@ -42,10 +42,11 @@ struct arch_uprobe { | |||
42 | }; | 42 | }; |
43 | 43 | ||
44 | struct arch_uprobe_task { | 44 | struct arch_uprobe_task { |
45 | unsigned long saved_trap_nr; | ||
46 | #ifdef CONFIG_X86_64 | 45 | #ifdef CONFIG_X86_64 |
47 | unsigned long saved_scratch_register; | 46 | unsigned long saved_scratch_register; |
48 | #endif | 47 | #endif |
48 | unsigned int saved_trap_nr; | ||
49 | unsigned int saved_tf; | ||
49 | }; | 50 | }; |
50 | 51 | ||
51 | extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); | 52 | extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); |
diff --git a/arch/x86/include/asm/user.h b/arch/x86/include/asm/user.h index 24532c7da3d6..ccab4af1646d 100644 --- a/arch/x86/include/asm/user.h +++ b/arch/x86/include/asm/user.h | |||
@@ -2,9 +2,9 @@ | |||
2 | #define _ASM_X86_USER_H | 2 | #define _ASM_X86_USER_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_32 | 4 | #ifdef CONFIG_X86_32 |
5 | # include "user_32.h" | 5 | # include <asm/user_32.h> |
6 | #else | 6 | #else |
7 | # include "user_64.h" | 7 | # include <asm/user_64.h> |
8 | #endif | 8 | #endif |
9 | 9 | ||
10 | #include <asm/types.h> | 10 | #include <asm/types.h> |
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h index bb0522850b74..fddb53d63915 100644 --- a/arch/x86/include/asm/vdso.h +++ b/arch/x86/include/asm/vdso.h | |||
@@ -11,7 +11,8 @@ extern const char VDSO32_PRELINK[]; | |||
11 | #define VDSO32_SYMBOL(base, name) \ | 11 | #define VDSO32_SYMBOL(base, name) \ |
12 | ({ \ | 12 | ({ \ |
13 | extern const char VDSO32_##name[]; \ | 13 | extern const char VDSO32_##name[]; \ |
14 | (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \ | 14 | (void __user *)(VDSO32_##name - VDSO32_PRELINK + \ |
15 | (unsigned long)(base)); \ | ||
15 | }) | 16 | }) |
16 | #endif | 17 | #endif |
17 | 18 | ||
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 8b38be2de9e1..46e24d36b7da 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h | |||
@@ -17,8 +17,8 @@ struct vsyscall_gtod_data { | |||
17 | 17 | ||
18 | /* open coded 'struct timespec' */ | 18 | /* open coded 'struct timespec' */ |
19 | time_t wall_time_sec; | 19 | time_t wall_time_sec; |
20 | u32 wall_time_nsec; | 20 | u64 wall_time_snsec; |
21 | u32 monotonic_time_nsec; | 21 | u64 monotonic_time_snsec; |
22 | time_t monotonic_time_sec; | 22 | time_t monotonic_time_sec; |
23 | 23 | ||
24 | struct timezone sys_tz; | 24 | struct timezone sys_tz; |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 74fcb963595b..36ec21c36d68 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -25,6 +25,88 @@ | |||
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 | ||
29 | |||
30 | #define EXIT_REASON_EXCEPTION_NMI 0 | ||
31 | #define EXIT_REASON_EXTERNAL_INTERRUPT 1 | ||
32 | #define EXIT_REASON_TRIPLE_FAULT 2 | ||
33 | |||
34 | #define EXIT_REASON_PENDING_INTERRUPT 7 | ||
35 | #define EXIT_REASON_NMI_WINDOW 8 | ||
36 | #define EXIT_REASON_TASK_SWITCH 9 | ||
37 | #define EXIT_REASON_CPUID 10 | ||
38 | #define EXIT_REASON_HLT 12 | ||
39 | #define EXIT_REASON_INVD 13 | ||
40 | #define EXIT_REASON_INVLPG 14 | ||
41 | #define EXIT_REASON_RDPMC 15 | ||
42 | #define EXIT_REASON_RDTSC 16 | ||
43 | #define EXIT_REASON_VMCALL 18 | ||
44 | #define EXIT_REASON_VMCLEAR 19 | ||
45 | #define EXIT_REASON_VMLAUNCH 20 | ||
46 | #define EXIT_REASON_VMPTRLD 21 | ||
47 | #define EXIT_REASON_VMPTRST 22 | ||
48 | #define EXIT_REASON_VMREAD 23 | ||
49 | #define EXIT_REASON_VMRESUME 24 | ||
50 | #define EXIT_REASON_VMWRITE 25 | ||
51 | #define EXIT_REASON_VMOFF 26 | ||
52 | #define EXIT_REASON_VMON 27 | ||
53 | #define EXIT_REASON_CR_ACCESS 28 | ||
54 | #define EXIT_REASON_DR_ACCESS 29 | ||
55 | #define EXIT_REASON_IO_INSTRUCTION 30 | ||
56 | #define EXIT_REASON_MSR_READ 31 | ||
57 | #define EXIT_REASON_MSR_WRITE 32 | ||
58 | #define EXIT_REASON_INVALID_STATE 33 | ||
59 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | ||
60 | #define EXIT_REASON_MONITOR_INSTRUCTION 39 | ||
61 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 | ||
62 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 | ||
63 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | ||
64 | #define EXIT_REASON_APIC_ACCESS 44 | ||
65 | #define EXIT_REASON_EPT_VIOLATION 48 | ||
66 | #define EXIT_REASON_EPT_MISCONFIG 49 | ||
67 | #define EXIT_REASON_WBINVD 54 | ||
68 | #define EXIT_REASON_XSETBV 55 | ||
69 | #define EXIT_REASON_INVPCID 58 | ||
70 | |||
71 | #define VMX_EXIT_REASONS \ | ||
72 | { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ | ||
73 | { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \ | ||
74 | { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \ | ||
75 | { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \ | ||
76 | { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \ | ||
77 | { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ | ||
78 | { EXIT_REASON_CPUID, "CPUID" }, \ | ||
79 | { EXIT_REASON_HLT, "HLT" }, \ | ||
80 | { EXIT_REASON_INVLPG, "INVLPG" }, \ | ||
81 | { EXIT_REASON_RDPMC, "RDPMC" }, \ | ||
82 | { EXIT_REASON_RDTSC, "RDTSC" }, \ | ||
83 | { EXIT_REASON_VMCALL, "VMCALL" }, \ | ||
84 | { EXIT_REASON_VMCLEAR, "VMCLEAR" }, \ | ||
85 | { EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \ | ||
86 | { EXIT_REASON_VMPTRLD, "VMPTRLD" }, \ | ||
87 | { EXIT_REASON_VMPTRST, "VMPTRST" }, \ | ||
88 | { EXIT_REASON_VMREAD, "VMREAD" }, \ | ||
89 | { EXIT_REASON_VMRESUME, "VMRESUME" }, \ | ||
90 | { EXIT_REASON_VMWRITE, "VMWRITE" }, \ | ||
91 | { EXIT_REASON_VMOFF, "VMOFF" }, \ | ||
92 | { EXIT_REASON_VMON, "VMON" }, \ | ||
93 | { EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \ | ||
94 | { EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \ | ||
95 | { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \ | ||
96 | { EXIT_REASON_MSR_READ, "MSR_READ" }, \ | ||
97 | { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ | ||
98 | { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ | ||
99 | { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ | ||
100 | { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \ | ||
101 | { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ | ||
102 | { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \ | ||
103 | { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ | ||
104 | { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ | ||
105 | { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ | ||
106 | { EXIT_REASON_WBINVD, "WBINVD" } | ||
107 | |||
108 | #ifdef __KERNEL__ | ||
109 | |||
28 | #include <linux/types.h> | 110 | #include <linux/types.h> |
29 | 111 | ||
30 | /* | 112 | /* |
@@ -241,49 +323,6 @@ enum vmcs_field { | |||
241 | HOST_RIP = 0x00006c16, | 323 | HOST_RIP = 0x00006c16, |
242 | }; | 324 | }; |
243 | 325 | ||
244 | #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 | ||
245 | |||
246 | #define EXIT_REASON_EXCEPTION_NMI 0 | ||
247 | #define EXIT_REASON_EXTERNAL_INTERRUPT 1 | ||
248 | #define EXIT_REASON_TRIPLE_FAULT 2 | ||
249 | |||
250 | #define EXIT_REASON_PENDING_INTERRUPT 7 | ||
251 | #define EXIT_REASON_NMI_WINDOW 8 | ||
252 | #define EXIT_REASON_TASK_SWITCH 9 | ||
253 | #define EXIT_REASON_CPUID 10 | ||
254 | #define EXIT_REASON_HLT 12 | ||
255 | #define EXIT_REASON_INVD 13 | ||
256 | #define EXIT_REASON_INVLPG 14 | ||
257 | #define EXIT_REASON_RDPMC 15 | ||
258 | #define EXIT_REASON_RDTSC 16 | ||
259 | #define EXIT_REASON_VMCALL 18 | ||
260 | #define EXIT_REASON_VMCLEAR 19 | ||
261 | #define EXIT_REASON_VMLAUNCH 20 | ||
262 | #define EXIT_REASON_VMPTRLD 21 | ||
263 | #define EXIT_REASON_VMPTRST 22 | ||
264 | #define EXIT_REASON_VMREAD 23 | ||
265 | #define EXIT_REASON_VMRESUME 24 | ||
266 | #define EXIT_REASON_VMWRITE 25 | ||
267 | #define EXIT_REASON_VMOFF 26 | ||
268 | #define EXIT_REASON_VMON 27 | ||
269 | #define EXIT_REASON_CR_ACCESS 28 | ||
270 | #define EXIT_REASON_DR_ACCESS 29 | ||
271 | #define EXIT_REASON_IO_INSTRUCTION 30 | ||
272 | #define EXIT_REASON_MSR_READ 31 | ||
273 | #define EXIT_REASON_MSR_WRITE 32 | ||
274 | #define EXIT_REASON_INVALID_STATE 33 | ||
275 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | ||
276 | #define EXIT_REASON_MONITOR_INSTRUCTION 39 | ||
277 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 | ||
278 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 | ||
279 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | ||
280 | #define EXIT_REASON_APIC_ACCESS 44 | ||
281 | #define EXIT_REASON_EPT_VIOLATION 48 | ||
282 | #define EXIT_REASON_EPT_MISCONFIG 49 | ||
283 | #define EXIT_REASON_WBINVD 54 | ||
284 | #define EXIT_REASON_XSETBV 55 | ||
285 | #define EXIT_REASON_INVPCID 58 | ||
286 | |||
287 | /* | 326 | /* |
288 | * Interruption-information format | 327 | * Interruption-information format |
289 | */ | 328 | */ |
@@ -488,3 +527,5 @@ enum vm_instruction_error_number { | |||
488 | }; | 527 | }; |
489 | 528 | ||
490 | #endif | 529 | #endif |
530 | |||
531 | #endif | ||
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h index ca9487d4f176..54d52ff1304a 100644 --- a/arch/x86/include/asm/xen/interface.h +++ b/arch/x86/include/asm/xen/interface.h | |||
@@ -122,9 +122,9 @@ struct arch_shared_info { | |||
122 | #endif /* !__ASSEMBLY__ */ | 122 | #endif /* !__ASSEMBLY__ */ |
123 | 123 | ||
124 | #ifdef CONFIG_X86_32 | 124 | #ifdef CONFIG_X86_32 |
125 | #include "interface_32.h" | 125 | #include <asm/xen/interface_32.h> |
126 | #else | 126 | #else |
127 | #include "interface_64.h" | 127 | #include <asm/xen/interface_64.h> |
128 | #endif | 128 | #endif |
129 | 129 | ||
130 | #include <asm/pvclock-abi.h> | 130 | #include <asm/pvclock-abi.h> |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 93971e841dd5..472b9b783019 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -51,7 +51,8 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s, | |||
51 | 51 | ||
52 | extern int m2p_add_override(unsigned long mfn, struct page *page, | 52 | extern int m2p_add_override(unsigned long mfn, struct page *page, |
53 | struct gnttab_map_grant_ref *kmap_op); | 53 | struct gnttab_map_grant_ref *kmap_op); |
54 | extern int m2p_remove_override(struct page *page, bool clear_pte); | 54 | extern int m2p_remove_override(struct page *page, |
55 | struct gnttab_map_grant_ref *kmap_op); | ||
55 | extern struct page *m2p_find_override(unsigned long mfn); | 56 | extern struct page *m2p_find_override(unsigned long mfn); |
56 | extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); | 57 | extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); |
57 | 58 | ||
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h index 7fcf6f3dbcc3..f8fde90bc45e 100644 --- a/arch/x86/include/asm/xor.h +++ b/arch/x86/include/asm/xor.h | |||
@@ -3,8 +3,8 @@ | |||
3 | # include <asm-generic/xor.h> | 3 | # include <asm-generic/xor.h> |
4 | #else | 4 | #else |
5 | #ifdef CONFIG_X86_32 | 5 | #ifdef CONFIG_X86_32 |
6 | # include "xor_32.h" | 6 | # include <asm/xor_32.h> |
7 | #else | 7 | #else |
8 | # include "xor_64.h" | 8 | # include <asm/xor_64.h> |
9 | #endif | 9 | #endif |
10 | #endif | 10 | #endif |
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h index 454570891bdc..f79cb7ec0e06 100644 --- a/arch/x86/include/asm/xor_32.h +++ b/arch/x86/include/asm/xor_32.h | |||
@@ -534,38 +534,6 @@ static struct xor_block_template xor_block_p5_mmx = { | |||
534 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) | 534 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) |
535 | */ | 535 | */ |
536 | 536 | ||
537 | #define XMMS_SAVE \ | ||
538 | do { \ | ||
539 | preempt_disable(); \ | ||
540 | cr0 = read_cr0(); \ | ||
541 | clts(); \ | ||
542 | asm volatile( \ | ||
543 | "movups %%xmm0,(%0) ;\n\t" \ | ||
544 | "movups %%xmm1,0x10(%0) ;\n\t" \ | ||
545 | "movups %%xmm2,0x20(%0) ;\n\t" \ | ||
546 | "movups %%xmm3,0x30(%0) ;\n\t" \ | ||
547 | : \ | ||
548 | : "r" (xmm_save) \ | ||
549 | : "memory"); \ | ||
550 | } while (0) | ||
551 | |||
552 | #define XMMS_RESTORE \ | ||
553 | do { \ | ||
554 | asm volatile( \ | ||
555 | "sfence ;\n\t" \ | ||
556 | "movups (%0),%%xmm0 ;\n\t" \ | ||
557 | "movups 0x10(%0),%%xmm1 ;\n\t" \ | ||
558 | "movups 0x20(%0),%%xmm2 ;\n\t" \ | ||
559 | "movups 0x30(%0),%%xmm3 ;\n\t" \ | ||
560 | : \ | ||
561 | : "r" (xmm_save) \ | ||
562 | : "memory"); \ | ||
563 | write_cr0(cr0); \ | ||
564 | preempt_enable(); \ | ||
565 | } while (0) | ||
566 | |||
567 | #define ALIGN16 __attribute__((aligned(16))) | ||
568 | |||
569 | #define OFFS(x) "16*("#x")" | 537 | #define OFFS(x) "16*("#x")" |
570 | #define PF_OFFS(x) "256+16*("#x")" | 538 | #define PF_OFFS(x) "256+16*("#x")" |
571 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" | 539 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" |
@@ -587,10 +555,8 @@ static void | |||
587 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | 555 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) |
588 | { | 556 | { |
589 | unsigned long lines = bytes >> 8; | 557 | unsigned long lines = bytes >> 8; |
590 | char xmm_save[16*4] ALIGN16; | ||
591 | int cr0; | ||
592 | 558 | ||
593 | XMMS_SAVE; | 559 | kernel_fpu_begin(); |
594 | 560 | ||
595 | asm volatile( | 561 | asm volatile( |
596 | #undef BLOCK | 562 | #undef BLOCK |
@@ -633,7 +599,7 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | |||
633 | : | 599 | : |
634 | : "memory"); | 600 | : "memory"); |
635 | 601 | ||
636 | XMMS_RESTORE; | 602 | kernel_fpu_end(); |
637 | } | 603 | } |
638 | 604 | ||
639 | static void | 605 | static void |
@@ -641,10 +607,8 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
641 | unsigned long *p3) | 607 | unsigned long *p3) |
642 | { | 608 | { |
643 | unsigned long lines = bytes >> 8; | 609 | unsigned long lines = bytes >> 8; |
644 | char xmm_save[16*4] ALIGN16; | ||
645 | int cr0; | ||
646 | 610 | ||
647 | XMMS_SAVE; | 611 | kernel_fpu_begin(); |
648 | 612 | ||
649 | asm volatile( | 613 | asm volatile( |
650 | #undef BLOCK | 614 | #undef BLOCK |
@@ -694,7 +658,7 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
694 | : | 658 | : |
695 | : "memory" ); | 659 | : "memory" ); |
696 | 660 | ||
697 | XMMS_RESTORE; | 661 | kernel_fpu_end(); |
698 | } | 662 | } |
699 | 663 | ||
700 | static void | 664 | static void |
@@ -702,10 +666,8 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
702 | unsigned long *p3, unsigned long *p4) | 666 | unsigned long *p3, unsigned long *p4) |
703 | { | 667 | { |
704 | unsigned long lines = bytes >> 8; | 668 | unsigned long lines = bytes >> 8; |
705 | char xmm_save[16*4] ALIGN16; | ||
706 | int cr0; | ||
707 | 669 | ||
708 | XMMS_SAVE; | 670 | kernel_fpu_begin(); |
709 | 671 | ||
710 | asm volatile( | 672 | asm volatile( |
711 | #undef BLOCK | 673 | #undef BLOCK |
@@ -762,7 +724,7 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
762 | : | 724 | : |
763 | : "memory" ); | 725 | : "memory" ); |
764 | 726 | ||
765 | XMMS_RESTORE; | 727 | kernel_fpu_end(); |
766 | } | 728 | } |
767 | 729 | ||
768 | static void | 730 | static void |
@@ -770,10 +732,8 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
770 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | 732 | unsigned long *p3, unsigned long *p4, unsigned long *p5) |
771 | { | 733 | { |
772 | unsigned long lines = bytes >> 8; | 734 | unsigned long lines = bytes >> 8; |
773 | char xmm_save[16*4] ALIGN16; | ||
774 | int cr0; | ||
775 | 735 | ||
776 | XMMS_SAVE; | 736 | kernel_fpu_begin(); |
777 | 737 | ||
778 | /* Make sure GCC forgets anything it knows about p4 or p5, | 738 | /* Make sure GCC forgets anything it knows about p4 or p5, |
779 | such that it won't pass to the asm volatile below a | 739 | such that it won't pass to the asm volatile below a |
@@ -850,7 +810,7 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
850 | like assuming they have some legal value. */ | 810 | like assuming they have some legal value. */ |
851 | asm("" : "=r" (p4), "=r" (p5)); | 811 | asm("" : "=r" (p4), "=r" (p5)); |
852 | 812 | ||
853 | XMMS_RESTORE; | 813 | kernel_fpu_end(); |
854 | } | 814 | } |
855 | 815 | ||
856 | static struct xor_block_template xor_block_pIII_sse = { | 816 | static struct xor_block_template xor_block_pIII_sse = { |
@@ -862,7 +822,7 @@ static struct xor_block_template xor_block_pIII_sse = { | |||
862 | }; | 822 | }; |
863 | 823 | ||
864 | /* Also try the AVX routines */ | 824 | /* Also try the AVX routines */ |
865 | #include "xor_avx.h" | 825 | #include <asm/xor_avx.h> |
866 | 826 | ||
867 | /* Also try the generic routines. */ | 827 | /* Also try the generic routines. */ |
868 | #include <asm-generic/xor.h> | 828 | #include <asm-generic/xor.h> |
diff --git a/arch/x86/include/asm/xor_64.h b/arch/x86/include/asm/xor_64.h index b9b2323e90fe..87ac522c4af5 100644 --- a/arch/x86/include/asm/xor_64.h +++ b/arch/x86/include/asm/xor_64.h | |||
@@ -34,41 +34,7 @@ | |||
34 | * no advantages to be gotten from x86-64 here anyways. | 34 | * no advantages to be gotten from x86-64 here anyways. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | typedef struct { | 37 | #include <asm/i387.h> |
38 | unsigned long a, b; | ||
39 | } __attribute__((aligned(16))) xmm_store_t; | ||
40 | |||
41 | /* Doesn't use gcc to save the XMM registers, because there is no easy way to | ||
42 | tell it to do a clts before the register saving. */ | ||
43 | #define XMMS_SAVE \ | ||
44 | do { \ | ||
45 | preempt_disable(); \ | ||
46 | asm volatile( \ | ||
47 | "movq %%cr0,%0 ;\n\t" \ | ||
48 | "clts ;\n\t" \ | ||
49 | "movups %%xmm0,(%1) ;\n\t" \ | ||
50 | "movups %%xmm1,0x10(%1) ;\n\t" \ | ||
51 | "movups %%xmm2,0x20(%1) ;\n\t" \ | ||
52 | "movups %%xmm3,0x30(%1) ;\n\t" \ | ||
53 | : "=&r" (cr0) \ | ||
54 | : "r" (xmm_save) \ | ||
55 | : "memory"); \ | ||
56 | } while (0) | ||
57 | |||
58 | #define XMMS_RESTORE \ | ||
59 | do { \ | ||
60 | asm volatile( \ | ||
61 | "sfence ;\n\t" \ | ||
62 | "movups (%1),%%xmm0 ;\n\t" \ | ||
63 | "movups 0x10(%1),%%xmm1 ;\n\t" \ | ||
64 | "movups 0x20(%1),%%xmm2 ;\n\t" \ | ||
65 | "movups 0x30(%1),%%xmm3 ;\n\t" \ | ||
66 | "movq %0,%%cr0 ;\n\t" \ | ||
67 | : \ | ||
68 | : "r" (cr0), "r" (xmm_save) \ | ||
69 | : "memory"); \ | ||
70 | preempt_enable(); \ | ||
71 | } while (0) | ||
72 | 38 | ||
73 | #define OFFS(x) "16*("#x")" | 39 | #define OFFS(x) "16*("#x")" |
74 | #define PF_OFFS(x) "256+16*("#x")" | 40 | #define PF_OFFS(x) "256+16*("#x")" |
@@ -91,10 +57,8 @@ static void | |||
91 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | 57 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) |
92 | { | 58 | { |
93 | unsigned int lines = bytes >> 8; | 59 | unsigned int lines = bytes >> 8; |
94 | unsigned long cr0; | ||
95 | xmm_store_t xmm_save[4]; | ||
96 | 60 | ||
97 | XMMS_SAVE; | 61 | kernel_fpu_begin(); |
98 | 62 | ||
99 | asm volatile( | 63 | asm volatile( |
100 | #undef BLOCK | 64 | #undef BLOCK |
@@ -135,7 +99,7 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | |||
135 | : [inc] "r" (256UL) | 99 | : [inc] "r" (256UL) |
136 | : "memory"); | 100 | : "memory"); |
137 | 101 | ||
138 | XMMS_RESTORE; | 102 | kernel_fpu_end(); |
139 | } | 103 | } |
140 | 104 | ||
141 | static void | 105 | static void |
@@ -143,11 +107,8 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
143 | unsigned long *p3) | 107 | unsigned long *p3) |
144 | { | 108 | { |
145 | unsigned int lines = bytes >> 8; | 109 | unsigned int lines = bytes >> 8; |
146 | xmm_store_t xmm_save[4]; | ||
147 | unsigned long cr0; | ||
148 | |||
149 | XMMS_SAVE; | ||
150 | 110 | ||
111 | kernel_fpu_begin(); | ||
151 | asm volatile( | 112 | asm volatile( |
152 | #undef BLOCK | 113 | #undef BLOCK |
153 | #define BLOCK(i) \ | 114 | #define BLOCK(i) \ |
@@ -194,7 +155,7 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
194 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) | 155 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) |
195 | : [inc] "r" (256UL) | 156 | : [inc] "r" (256UL) |
196 | : "memory"); | 157 | : "memory"); |
197 | XMMS_RESTORE; | 158 | kernel_fpu_end(); |
198 | } | 159 | } |
199 | 160 | ||
200 | static void | 161 | static void |
@@ -202,10 +163,8 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
202 | unsigned long *p3, unsigned long *p4) | 163 | unsigned long *p3, unsigned long *p4) |
203 | { | 164 | { |
204 | unsigned int lines = bytes >> 8; | 165 | unsigned int lines = bytes >> 8; |
205 | xmm_store_t xmm_save[4]; | ||
206 | unsigned long cr0; | ||
207 | 166 | ||
208 | XMMS_SAVE; | 167 | kernel_fpu_begin(); |
209 | 168 | ||
210 | asm volatile( | 169 | asm volatile( |
211 | #undef BLOCK | 170 | #undef BLOCK |
@@ -261,7 +220,7 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
261 | : [inc] "r" (256UL) | 220 | : [inc] "r" (256UL) |
262 | : "memory" ); | 221 | : "memory" ); |
263 | 222 | ||
264 | XMMS_RESTORE; | 223 | kernel_fpu_end(); |
265 | } | 224 | } |
266 | 225 | ||
267 | static void | 226 | static void |
@@ -269,10 +228,8 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
269 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | 228 | unsigned long *p3, unsigned long *p4, unsigned long *p5) |
270 | { | 229 | { |
271 | unsigned int lines = bytes >> 8; | 230 | unsigned int lines = bytes >> 8; |
272 | xmm_store_t xmm_save[4]; | ||
273 | unsigned long cr0; | ||
274 | 231 | ||
275 | XMMS_SAVE; | 232 | kernel_fpu_begin(); |
276 | 233 | ||
277 | asm volatile( | 234 | asm volatile( |
278 | #undef BLOCK | 235 | #undef BLOCK |
@@ -336,7 +293,7 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
336 | : [inc] "r" (256UL) | 293 | : [inc] "r" (256UL) |
337 | : "memory"); | 294 | : "memory"); |
338 | 295 | ||
339 | XMMS_RESTORE; | 296 | kernel_fpu_end(); |
340 | } | 297 | } |
341 | 298 | ||
342 | static struct xor_block_template xor_block_sse = { | 299 | static struct xor_block_template xor_block_sse = { |
@@ -349,7 +306,7 @@ static struct xor_block_template xor_block_sse = { | |||
349 | 306 | ||
350 | 307 | ||
351 | /* Also try the AVX routines */ | 308 | /* Also try the AVX routines */ |
352 | #include "xor_avx.h" | 309 | #include <asm/xor_avx.h> |
353 | 310 | ||
354 | #undef XOR_TRY_TEMPLATES | 311 | #undef XOR_TRY_TEMPLATES |
355 | #define XOR_TRY_TEMPLATES \ | 312 | #define XOR_TRY_TEMPLATES \ |
diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h index 2510d35f480e..7ea79c5fa1f2 100644 --- a/arch/x86/include/asm/xor_avx.h +++ b/arch/x86/include/asm/xor_avx.h | |||
@@ -20,32 +20,6 @@ | |||
20 | #include <linux/compiler.h> | 20 | #include <linux/compiler.h> |
21 | #include <asm/i387.h> | 21 | #include <asm/i387.h> |
22 | 22 | ||
23 | #define ALIGN32 __aligned(32) | ||
24 | |||
25 | #define YMM_SAVED_REGS 4 | ||
26 | |||
27 | #define YMMS_SAVE \ | ||
28 | do { \ | ||
29 | preempt_disable(); \ | ||
30 | cr0 = read_cr0(); \ | ||
31 | clts(); \ | ||
32 | asm volatile("vmovaps %%ymm0, %0" : "=m" (ymm_save[0]) : : "memory"); \ | ||
33 | asm volatile("vmovaps %%ymm1, %0" : "=m" (ymm_save[32]) : : "memory"); \ | ||
34 | asm volatile("vmovaps %%ymm2, %0" : "=m" (ymm_save[64]) : : "memory"); \ | ||
35 | asm volatile("vmovaps %%ymm3, %0" : "=m" (ymm_save[96]) : : "memory"); \ | ||
36 | } while (0); | ||
37 | |||
38 | #define YMMS_RESTORE \ | ||
39 | do { \ | ||
40 | asm volatile("sfence" : : : "memory"); \ | ||
41 | asm volatile("vmovaps %0, %%ymm3" : : "m" (ymm_save[96])); \ | ||
42 | asm volatile("vmovaps %0, %%ymm2" : : "m" (ymm_save[64])); \ | ||
43 | asm volatile("vmovaps %0, %%ymm1" : : "m" (ymm_save[32])); \ | ||
44 | asm volatile("vmovaps %0, %%ymm0" : : "m" (ymm_save[0])); \ | ||
45 | write_cr0(cr0); \ | ||
46 | preempt_enable(); \ | ||
47 | } while (0); | ||
48 | |||
49 | #define BLOCK4(i) \ | 23 | #define BLOCK4(i) \ |
50 | BLOCK(32 * i, 0) \ | 24 | BLOCK(32 * i, 0) \ |
51 | BLOCK(32 * (i + 1), 1) \ | 25 | BLOCK(32 * (i + 1), 1) \ |
@@ -60,10 +34,9 @@ do { \ | |||
60 | 34 | ||
61 | static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1) | 35 | static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1) |
62 | { | 36 | { |
63 | unsigned long cr0, lines = bytes >> 9; | 37 | unsigned long lines = bytes >> 9; |
64 | char ymm_save[32 * YMM_SAVED_REGS] ALIGN32; | ||
65 | 38 | ||
66 | YMMS_SAVE | 39 | kernel_fpu_begin(); |
67 | 40 | ||
68 | while (lines--) { | 41 | while (lines--) { |
69 | #undef BLOCK | 42 | #undef BLOCK |
@@ -82,16 +55,15 @@ do { \ | |||
82 | p1 = (unsigned long *)((uintptr_t)p1 + 512); | 55 | p1 = (unsigned long *)((uintptr_t)p1 + 512); |
83 | } | 56 | } |
84 | 57 | ||
85 | YMMS_RESTORE | 58 | kernel_fpu_end(); |
86 | } | 59 | } |
87 | 60 | ||
88 | static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1, | 61 | static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1, |
89 | unsigned long *p2) | 62 | unsigned long *p2) |
90 | { | 63 | { |
91 | unsigned long cr0, lines = bytes >> 9; | 64 | unsigned long lines = bytes >> 9; |
92 | char ymm_save[32 * YMM_SAVED_REGS] ALIGN32; | ||
93 | 65 | ||
94 | YMMS_SAVE | 66 | kernel_fpu_begin(); |
95 | 67 | ||
96 | while (lines--) { | 68 | while (lines--) { |
97 | #undef BLOCK | 69 | #undef BLOCK |
@@ -113,16 +85,15 @@ do { \ | |||
113 | p2 = (unsigned long *)((uintptr_t)p2 + 512); | 85 | p2 = (unsigned long *)((uintptr_t)p2 + 512); |
114 | } | 86 | } |
115 | 87 | ||
116 | YMMS_RESTORE | 88 | kernel_fpu_end(); |
117 | } | 89 | } |
118 | 90 | ||
119 | static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1, | 91 | static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1, |
120 | unsigned long *p2, unsigned long *p3) | 92 | unsigned long *p2, unsigned long *p3) |
121 | { | 93 | { |
122 | unsigned long cr0, lines = bytes >> 9; | 94 | unsigned long lines = bytes >> 9; |
123 | char ymm_save[32 * YMM_SAVED_REGS] ALIGN32; | ||
124 | 95 | ||
125 | YMMS_SAVE | 96 | kernel_fpu_begin(); |
126 | 97 | ||
127 | while (lines--) { | 98 | while (lines--) { |
128 | #undef BLOCK | 99 | #undef BLOCK |
@@ -147,16 +118,15 @@ do { \ | |||
147 | p3 = (unsigned long *)((uintptr_t)p3 + 512); | 118 | p3 = (unsigned long *)((uintptr_t)p3 + 512); |
148 | } | 119 | } |
149 | 120 | ||
150 | YMMS_RESTORE | 121 | kernel_fpu_end(); |
151 | } | 122 | } |
152 | 123 | ||
153 | static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1, | 124 | static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1, |
154 | unsigned long *p2, unsigned long *p3, unsigned long *p4) | 125 | unsigned long *p2, unsigned long *p3, unsigned long *p4) |
155 | { | 126 | { |
156 | unsigned long cr0, lines = bytes >> 9; | 127 | unsigned long lines = bytes >> 9; |
157 | char ymm_save[32 * YMM_SAVED_REGS] ALIGN32; | ||
158 | 128 | ||
159 | YMMS_SAVE | 129 | kernel_fpu_begin(); |
160 | 130 | ||
161 | while (lines--) { | 131 | while (lines--) { |
162 | #undef BLOCK | 132 | #undef BLOCK |
@@ -184,7 +154,7 @@ do { \ | |||
184 | p4 = (unsigned long *)((uintptr_t)p4 + 512); | 154 | p4 = (unsigned long *)((uintptr_t)p4 + 512); |
185 | } | 155 | } |
186 | 156 | ||
187 | YMMS_RESTORE | 157 | kernel_fpu_end(); |
188 | } | 158 | } |
189 | 159 | ||
190 | static struct xor_block_template xor_block_avx = { | 160 | static struct xor_block_template xor_block_avx = { |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 8a1b6f9b594a..0415cdabb5a6 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -34,17 +34,14 @@ | |||
34 | extern unsigned int xstate_size; | 34 | extern unsigned int xstate_size; |
35 | extern u64 pcntxt_mask; | 35 | extern u64 pcntxt_mask; |
36 | extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; | 36 | extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; |
37 | extern struct xsave_struct *init_xstate_buf; | ||
37 | 38 | ||
38 | extern void xsave_init(void); | 39 | extern void xsave_init(void); |
39 | extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); | 40 | extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); |
40 | extern int init_fpu(struct task_struct *child); | 41 | extern int init_fpu(struct task_struct *child); |
41 | extern int check_for_xstate(struct i387_fxsave_struct __user *buf, | ||
42 | void __user *fpstate, | ||
43 | struct _fpx_sw_bytes *sw); | ||
44 | 42 | ||
45 | static inline int fpu_xrstor_checking(struct fpu *fpu) | 43 | static inline int fpu_xrstor_checking(struct xsave_struct *fx) |
46 | { | 44 | { |
47 | struct xsave_struct *fx = &fpu->state->xsave; | ||
48 | int err; | 45 | int err; |
49 | 46 | ||
50 | asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" | 47 | asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" |
@@ -69,13 +66,13 @@ static inline int xsave_user(struct xsave_struct __user *buf) | |||
69 | * Clear the xsave header first, so that reserved fields are | 66 | * Clear the xsave header first, so that reserved fields are |
70 | * initialized to zero. | 67 | * initialized to zero. |
71 | */ | 68 | */ |
72 | err = __clear_user(&buf->xsave_hdr, | 69 | err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr)); |
73 | sizeof(struct xsave_hdr_struct)); | ||
74 | if (unlikely(err)) | 70 | if (unlikely(err)) |
75 | return -EFAULT; | 71 | return -EFAULT; |
76 | 72 | ||
77 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" | 73 | __asm__ __volatile__(ASM_STAC "\n" |
78 | "2:\n" | 74 | "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" |
75 | "2: " ASM_CLAC "\n" | ||
79 | ".section .fixup,\"ax\"\n" | 76 | ".section .fixup,\"ax\"\n" |
80 | "3: movl $-1,%[err]\n" | 77 | "3: movl $-1,%[err]\n" |
81 | " jmp 2b\n" | 78 | " jmp 2b\n" |
@@ -84,9 +81,6 @@ static inline int xsave_user(struct xsave_struct __user *buf) | |||
84 | : [err] "=r" (err) | 81 | : [err] "=r" (err) |
85 | : "D" (buf), "a" (-1), "d" (-1), "0" (0) | 82 | : "D" (buf), "a" (-1), "d" (-1), "0" (0) |
86 | : "memory"); | 83 | : "memory"); |
87 | if (unlikely(err) && __clear_user(buf, xstate_size)) | ||
88 | err = -EFAULT; | ||
89 | /* No need to clear here because the caller clears USED_MATH */ | ||
90 | return err; | 84 | return err; |
91 | } | 85 | } |
92 | 86 | ||
@@ -97,8 +91,9 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) | |||
97 | u32 lmask = mask; | 91 | u32 lmask = mask; |
98 | u32 hmask = mask >> 32; | 92 | u32 hmask = mask >> 32; |
99 | 93 | ||
100 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" | 94 | __asm__ __volatile__(ASM_STAC "\n" |
101 | "2:\n" | 95 | "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" |
96 | "2: " ASM_CLAC "\n" | ||
102 | ".section .fixup,\"ax\"\n" | 97 | ".section .fixup,\"ax\"\n" |
103 | "3: movl $-1,%[err]\n" | 98 | "3: movl $-1,%[err]\n" |
104 | " jmp 2b\n" | 99 | " jmp 2b\n" |
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild new file mode 100644 index 000000000000..83b6e9a0dce4 --- /dev/null +++ b/arch/x86/include/uapi/asm/Kbuild | |||
@@ -0,0 +1,6 @@ | |||
1 | # UAPI Header export list | ||
2 | include include/uapi/asm-generic/Kbuild.asm | ||
3 | |||
4 | genhdr-y += unistd_32.h | ||
5 | genhdr-y += unistd_64.h | ||
6 | genhdr-y += unistd_x32.h | ||