diff options
Diffstat (limited to 'arch/x86/include')
50 files changed, 414 insertions, 216 deletions
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index f9c0d3ba9e84..66e5f0ef0523 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild | |||
@@ -26,3 +26,5 @@ header-y += vsyscall.h | |||
26 | genhdr-y += unistd_32.h | 26 | genhdr-y += unistd_32.h |
27 | genhdr-y += unistd_64.h | 27 | genhdr-y += unistd_64.h |
28 | genhdr-y += unistd_x32.h | 28 | genhdr-y += unistd_x32.h |
29 | |||
30 | generic-y += clkdev.h | ||
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index 952bd0100c5c..372231c22a47 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h | |||
@@ -1,3 +1,6 @@ | |||
1 | #ifndef _ASM_X86_ALTERNATIVE_ASM_H | ||
2 | #define _ASM_X86_ALTERNATIVE_ASM_H | ||
3 | |||
1 | #ifdef __ASSEMBLY__ | 4 | #ifdef __ASSEMBLY__ |
2 | 5 | ||
3 | #include <asm/asm.h> | 6 | #include <asm/asm.h> |
@@ -5,10 +8,10 @@ | |||
5 | #ifdef CONFIG_SMP | 8 | #ifdef CONFIG_SMP |
6 | .macro LOCK_PREFIX | 9 | .macro LOCK_PREFIX |
7 | 672: lock | 10 | 672: lock |
8 | .section .smp_locks,"a" | 11 | .pushsection .smp_locks,"a" |
9 | .balign 4 | 12 | .balign 4 |
10 | .long 672b - . | 13 | .long 672b - . |
11 | .previous | 14 | .popsection |
12 | .endm | 15 | .endm |
13 | #else | 16 | #else |
14 | .macro LOCK_PREFIX | 17 | .macro LOCK_PREFIX |
@@ -24,3 +27,5 @@ | |||
24 | .endm | 27 | .endm |
25 | 28 | ||
26 | #endif /* __ASSEMBLY__ */ | 29 | #endif /* __ASSEMBLY__ */ |
30 | |||
31 | #endif /* _ASM_X86_ALTERNATIVE_ASM_H */ | ||
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 444704c8e186..58ed6d96a6ac 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -29,10 +29,10 @@ | |||
29 | 29 | ||
30 | #ifdef CONFIG_SMP | 30 | #ifdef CONFIG_SMP |
31 | #define LOCK_PREFIX_HERE \ | 31 | #define LOCK_PREFIX_HERE \ |
32 | ".section .smp_locks,\"a\"\n" \ | 32 | ".pushsection .smp_locks,\"a\"\n" \ |
33 | ".balign 4\n" \ | 33 | ".balign 4\n" \ |
34 | ".long 671f - .\n" /* offset */ \ | 34 | ".long 671f - .\n" /* offset */ \ |
35 | ".previous\n" \ | 35 | ".popsection\n" \ |
36 | "671:" | 36 | "671:" |
37 | 37 | ||
38 | #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " | 38 | #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " |
@@ -99,30 +99,30 @@ static inline int alternatives_text_reserved(void *start, void *end) | |||
99 | /* alternative assembly primitive: */ | 99 | /* alternative assembly primitive: */ |
100 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ | 100 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ |
101 | OLDINSTR(oldinstr) \ | 101 | OLDINSTR(oldinstr) \ |
102 | ".section .altinstructions,\"a\"\n" \ | 102 | ".pushsection .altinstructions,\"a\"\n" \ |
103 | ALTINSTR_ENTRY(feature, 1) \ | 103 | ALTINSTR_ENTRY(feature, 1) \ |
104 | ".previous\n" \ | 104 | ".popsection\n" \ |
105 | ".section .discard,\"aw\",@progbits\n" \ | 105 | ".pushsection .discard,\"aw\",@progbits\n" \ |
106 | DISCARD_ENTRY(1) \ | 106 | DISCARD_ENTRY(1) \ |
107 | ".previous\n" \ | 107 | ".popsection\n" \ |
108 | ".section .altinstr_replacement, \"ax\"\n" \ | 108 | ".pushsection .altinstr_replacement, \"ax\"\n" \ |
109 | ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ | 109 | ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ |
110 | ".previous" | 110 | ".popsection" |
111 | 111 | ||
112 | #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ | 112 | #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ |
113 | OLDINSTR(oldinstr) \ | 113 | OLDINSTR(oldinstr) \ |
114 | ".section .altinstructions,\"a\"\n" \ | 114 | ".pushsection .altinstructions,\"a\"\n" \ |
115 | ALTINSTR_ENTRY(feature1, 1) \ | 115 | ALTINSTR_ENTRY(feature1, 1) \ |
116 | ALTINSTR_ENTRY(feature2, 2) \ | 116 | ALTINSTR_ENTRY(feature2, 2) \ |
117 | ".previous\n" \ | 117 | ".popsection\n" \ |
118 | ".section .discard,\"aw\",@progbits\n" \ | 118 | ".pushsection .discard,\"aw\",@progbits\n" \ |
119 | DISCARD_ENTRY(1) \ | 119 | DISCARD_ENTRY(1) \ |
120 | DISCARD_ENTRY(2) \ | 120 | DISCARD_ENTRY(2) \ |
121 | ".previous\n" \ | 121 | ".popsection\n" \ |
122 | ".section .altinstr_replacement, \"ax\"\n" \ | 122 | ".pushsection .altinstr_replacement, \"ax\"\n" \ |
123 | ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ | 123 | ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ |
124 | ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ | 124 | ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ |
125 | ".previous" | 125 | ".popsection" |
126 | 126 | ||
127 | /* | 127 | /* |
128 | * This must be included *after* the definition of ALTERNATIVE due to | 128 | * This must be included *after* the definition of ALTERNATIVE due to |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index f34261296ffb..338803422239 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -409,7 +409,7 @@ extern struct apic *apic; | |||
409 | * to enforce the order with in them. | 409 | * to enforce the order with in them. |
410 | */ | 410 | */ |
411 | #define apic_driver(sym) \ | 411 | #define apic_driver(sym) \ |
412 | static struct apic *__apicdrivers_##sym __used \ | 412 | static const struct apic *__apicdrivers_##sym __used \ |
413 | __aligned(sizeof(struct apic *)) \ | 413 | __aligned(sizeof(struct apic *)) \ |
414 | __section(.apicdrivers) = { &sym } | 414 | __section(.apicdrivers) = { &sym } |
415 | 415 | ||
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 58cb6d4085f7..b6c3b821acf6 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
@@ -240,30 +240,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
240 | return c; | 240 | return c; |
241 | } | 241 | } |
242 | 242 | ||
243 | |||
244 | /* | ||
245 | * atomic_dec_if_positive - decrement by 1 if old value positive | ||
246 | * @v: pointer of type atomic_t | ||
247 | * | ||
248 | * The function returns the old value of *v minus 1, even if | ||
249 | * the atomic variable, v, was not decremented. | ||
250 | */ | ||
251 | static inline int atomic_dec_if_positive(atomic_t *v) | ||
252 | { | ||
253 | int c, old, dec; | ||
254 | c = atomic_read(v); | ||
255 | for (;;) { | ||
256 | dec = c - 1; | ||
257 | if (unlikely(dec < 0)) | ||
258 | break; | ||
259 | old = atomic_cmpxchg((v), c, dec); | ||
260 | if (likely(old == c)) | ||
261 | break; | ||
262 | c = old; | ||
263 | } | ||
264 | return dec; | ||
265 | } | ||
266 | |||
267 | /** | 243 | /** |
268 | * atomic_inc_short - increment of a short integer | 244 | * atomic_inc_short - increment of a short integer |
269 | * @v: pointer to type int | 245 | * @v: pointer to type int |
@@ -309,9 +285,9 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) | |||
309 | #define smp_mb__after_atomic_inc() barrier() | 285 | #define smp_mb__after_atomic_inc() barrier() |
310 | 286 | ||
311 | #ifdef CONFIG_X86_32 | 287 | #ifdef CONFIG_X86_32 |
312 | # include "atomic64_32.h" | 288 | # include <asm/atomic64_32.h> |
313 | #else | 289 | #else |
314 | # include "atomic64_64.h" | 290 | # include <asm/atomic64_64.h> |
315 | #endif | 291 | #endif |
316 | 292 | ||
317 | #endif /* _ASM_X86_ATOMIC_H */ | 293 | #endif /* _ASM_X86_ATOMIC_H */ |
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h index 7f8422a28a46..0fa675033912 100644 --- a/arch/x86/include/asm/calling.h +++ b/arch/x86/include/asm/calling.h | |||
@@ -46,7 +46,7 @@ For 32-bit we have the following conventions - kernel is built with | |||
46 | 46 | ||
47 | */ | 47 | */ |
48 | 48 | ||
49 | #include "dwarf2.h" | 49 | #include <asm/dwarf2.h> |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * 64-bit system call stack frame layout defines and helpers, | 52 | * 64-bit system call stack frame layout defines and helpers, |
diff --git a/arch/x86/include/asm/checksum.h b/arch/x86/include/asm/checksum.h index 848850fd7d62..5f5bb0f97361 100644 --- a/arch/x86/include/asm/checksum.h +++ b/arch/x86/include/asm/checksum.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifdef CONFIG_X86_32 |
2 | # include "checksum_32.h" | 2 | # include <asm/checksum_32.h> |
3 | #else | 3 | #else |
4 | # include "checksum_64.h" | 4 | # include <asm/checksum_64.h> |
5 | #endif | 5 | #endif |
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 99480e55973d..8d871eaddb66 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h | |||
@@ -138,9 +138,9 @@ extern void __add_wrong_size(void) | |||
138 | __raw_cmpxchg((ptr), (old), (new), (size), "") | 138 | __raw_cmpxchg((ptr), (old), (new), (size), "") |
139 | 139 | ||
140 | #ifdef CONFIG_X86_32 | 140 | #ifdef CONFIG_X86_32 |
141 | # include "cmpxchg_32.h" | 141 | # include <asm/cmpxchg_32.h> |
142 | #else | 142 | #else |
143 | # include "cmpxchg_64.h" | 143 | # include <asm/cmpxchg_64.h> |
144 | #endif | 144 | #endif |
145 | 145 | ||
146 | #ifdef __HAVE_ARCH_CMPXCHG | 146 | #ifdef __HAVE_ARCH_CMPXCHG |
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index fedf32b73e65..59c6c401f79f 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h | |||
@@ -41,6 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64; | |||
41 | typedef u32 compat_uint_t; | 41 | typedef u32 compat_uint_t; |
42 | typedef u32 compat_ulong_t; | 42 | typedef u32 compat_ulong_t; |
43 | typedef u64 __attribute__((aligned(4))) compat_u64; | 43 | typedef u64 __attribute__((aligned(4))) compat_u64; |
44 | typedef u32 compat_uptr_t; | ||
44 | 45 | ||
45 | struct compat_timespec { | 46 | struct compat_timespec { |
46 | compat_time_t tv_sec; | 47 | compat_time_t tv_sec; |
@@ -124,6 +125,78 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */ | |||
124 | 125 | ||
125 | typedef u32 compat_sigset_word; | 126 | typedef u32 compat_sigset_word; |
126 | 127 | ||
128 | typedef union compat_sigval { | ||
129 | compat_int_t sival_int; | ||
130 | compat_uptr_t sival_ptr; | ||
131 | } compat_sigval_t; | ||
132 | |||
133 | typedef struct compat_siginfo { | ||
134 | int si_signo; | ||
135 | int si_errno; | ||
136 | int si_code; | ||
137 | |||
138 | union { | ||
139 | int _pad[128/sizeof(int) - 3]; | ||
140 | |||
141 | /* kill() */ | ||
142 | struct { | ||
143 | unsigned int _pid; /* sender's pid */ | ||
144 | unsigned int _uid; /* sender's uid */ | ||
145 | } _kill; | ||
146 | |||
147 | /* POSIX.1b timers */ | ||
148 | struct { | ||
149 | compat_timer_t _tid; /* timer id */ | ||
150 | int _overrun; /* overrun count */ | ||
151 | compat_sigval_t _sigval; /* same as below */ | ||
152 | int _sys_private; /* not to be passed to user */ | ||
153 | int _overrun_incr; /* amount to add to overrun */ | ||
154 | } _timer; | ||
155 | |||
156 | /* POSIX.1b signals */ | ||
157 | struct { | ||
158 | unsigned int _pid; /* sender's pid */ | ||
159 | unsigned int _uid; /* sender's uid */ | ||
160 | compat_sigval_t _sigval; | ||
161 | } _rt; | ||
162 | |||
163 | /* SIGCHLD */ | ||
164 | struct { | ||
165 | unsigned int _pid; /* which child */ | ||
166 | unsigned int _uid; /* sender's uid */ | ||
167 | int _status; /* exit code */ | ||
168 | compat_clock_t _utime; | ||
169 | compat_clock_t _stime; | ||
170 | } _sigchld; | ||
171 | |||
172 | /* SIGCHLD (x32 version) */ | ||
173 | struct { | ||
174 | unsigned int _pid; /* which child */ | ||
175 | unsigned int _uid; /* sender's uid */ | ||
176 | int _status; /* exit code */ | ||
177 | compat_s64 _utime; | ||
178 | compat_s64 _stime; | ||
179 | } _sigchld_x32; | ||
180 | |||
181 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | ||
182 | struct { | ||
183 | unsigned int _addr; /* faulting insn/memory ref. */ | ||
184 | } _sigfault; | ||
185 | |||
186 | /* SIGPOLL */ | ||
187 | struct { | ||
188 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
189 | int _fd; | ||
190 | } _sigpoll; | ||
191 | |||
192 | struct { | ||
193 | unsigned int _call_addr; /* calling insn */ | ||
194 | int _syscall; /* triggering system call number */ | ||
195 | unsigned int _arch; /* AUDIT_ARCH_* of syscall */ | ||
196 | } _sigsys; | ||
197 | } _sifields; | ||
198 | } compat_siginfo_t; | ||
199 | |||
127 | #define COMPAT_OFF_T_MAX 0x7fffffff | 200 | #define COMPAT_OFF_T_MAX 0x7fffffff |
128 | #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL | 201 | #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL |
129 | 202 | ||
@@ -209,7 +282,6 @@ typedef struct user_regs_struct32 compat_elf_gregset_t; | |||
209 | * as pointers because the syscall entry code will have | 282 | * as pointers because the syscall entry code will have |
210 | * appropriately converted them already. | 283 | * appropriately converted them already. |
211 | */ | 284 | */ |
212 | typedef u32 compat_uptr_t; | ||
213 | 285 | ||
214 | static inline void __user *compat_ptr(compat_uptr_t uptr) | 286 | static inline void __user *compat_ptr(compat_uptr_t uptr) |
215 | { | 287 | { |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 16cae425d1f8..8c297aa53eef 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -4,7 +4,9 @@ | |||
4 | #ifndef _ASM_X86_CPUFEATURE_H | 4 | #ifndef _ASM_X86_CPUFEATURE_H |
5 | #define _ASM_X86_CPUFEATURE_H | 5 | #define _ASM_X86_CPUFEATURE_H |
6 | 6 | ||
7 | #ifndef _ASM_X86_REQUIRED_FEATURES_H | ||
7 | #include <asm/required-features.h> | 8 | #include <asm/required-features.h> |
9 | #endif | ||
8 | 10 | ||
9 | #define NCAPINTS 10 /* N 32-bit words worth of info */ | 11 | #define NCAPINTS 10 /* N 32-bit words worth of info */ |
10 | 12 | ||
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 92f3c6ed817f..831dbb9c6c02 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/user.h> | 21 | #include <asm/user.h> |
22 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
23 | #include <asm/xsave.h> | 23 | #include <asm/xsave.h> |
24 | #include <asm/smap.h> | ||
24 | 25 | ||
25 | #ifdef CONFIG_X86_64 | 26 | #ifdef CONFIG_X86_64 |
26 | # include <asm/sigcontext32.h> | 27 | # include <asm/sigcontext32.h> |
@@ -121,6 +122,22 @@ static inline void sanitize_i387_state(struct task_struct *tsk) | |||
121 | __sanitize_i387_state(tsk); | 122 | __sanitize_i387_state(tsk); |
122 | } | 123 | } |
123 | 124 | ||
125 | #define user_insn(insn, output, input...) \ | ||
126 | ({ \ | ||
127 | int err; \ | ||
128 | asm volatile(ASM_STAC "\n" \ | ||
129 | "1:" #insn "\n\t" \ | ||
130 | "2: " ASM_CLAC "\n" \ | ||
131 | ".section .fixup,\"ax\"\n" \ | ||
132 | "3: movl $-1,%[err]\n" \ | ||
133 | " jmp 2b\n" \ | ||
134 | ".previous\n" \ | ||
135 | _ASM_EXTABLE(1b, 3b) \ | ||
136 | : [err] "=r" (err), output \ | ||
137 | : "0"(0), input); \ | ||
138 | err; \ | ||
139 | }) | ||
140 | |||
124 | #define check_insn(insn, output, input...) \ | 141 | #define check_insn(insn, output, input...) \ |
125 | ({ \ | 142 | ({ \ |
126 | int err; \ | 143 | int err; \ |
@@ -138,18 +155,18 @@ static inline void sanitize_i387_state(struct task_struct *tsk) | |||
138 | 155 | ||
139 | static inline int fsave_user(struct i387_fsave_struct __user *fx) | 156 | static inline int fsave_user(struct i387_fsave_struct __user *fx) |
140 | { | 157 | { |
141 | return check_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); | 158 | return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); |
142 | } | 159 | } |
143 | 160 | ||
144 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | 161 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
145 | { | 162 | { |
146 | if (config_enabled(CONFIG_X86_32)) | 163 | if (config_enabled(CONFIG_X86_32)) |
147 | return check_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); | 164 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); |
148 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | 165 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) |
149 | return check_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); | 166 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); |
150 | 167 | ||
151 | /* See comment in fpu_fxsave() below. */ | 168 | /* See comment in fpu_fxsave() below. */ |
152 | return check_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); | 169 | return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); |
153 | } | 170 | } |
154 | 171 | ||
155 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | 172 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
@@ -164,11 +181,28 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
164 | "m" (*fx)); | 181 | "m" (*fx)); |
165 | } | 182 | } |
166 | 183 | ||
184 | static inline int fxrstor_user(struct i387_fxsave_struct __user *fx) | ||
185 | { | ||
186 | if (config_enabled(CONFIG_X86_32)) | ||
187 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | ||
188 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | ||
189 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | ||
190 | |||
191 | /* See comment in fpu_fxsave() below. */ | ||
192 | return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), | ||
193 | "m" (*fx)); | ||
194 | } | ||
195 | |||
167 | static inline int frstor_checking(struct i387_fsave_struct *fx) | 196 | static inline int frstor_checking(struct i387_fsave_struct *fx) |
168 | { | 197 | { |
169 | return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | 198 | return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
170 | } | 199 | } |
171 | 200 | ||
201 | static inline int frstor_user(struct i387_fsave_struct __user *fx) | ||
202 | { | ||
203 | return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | ||
204 | } | ||
205 | |||
172 | static inline void fpu_fxsave(struct fpu *fpu) | 206 | static inline void fpu_fxsave(struct fpu *fpu) |
173 | { | 207 | { |
174 | if (config_enabled(CONFIG_X86_32)) | 208 | if (config_enabled(CONFIG_X86_32)) |
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index 71ecbcba1a4e..f373046e63ec 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h | |||
@@ -9,10 +9,13 @@ | |||
9 | #include <asm/asm.h> | 9 | #include <asm/asm.h> |
10 | #include <asm/errno.h> | 10 | #include <asm/errno.h> |
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/smap.h> | ||
12 | 13 | ||
13 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ | 14 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ |
14 | asm volatile("1:\t" insn "\n" \ | 15 | asm volatile("\t" ASM_STAC "\n" \ |
15 | "2:\t.section .fixup,\"ax\"\n" \ | 16 | "1:\t" insn "\n" \ |
17 | "2:\t" ASM_CLAC "\n" \ | ||
18 | "\t.section .fixup,\"ax\"\n" \ | ||
16 | "3:\tmov\t%3, %1\n" \ | 19 | "3:\tmov\t%3, %1\n" \ |
17 | "\tjmp\t2b\n" \ | 20 | "\tjmp\t2b\n" \ |
18 | "\t.previous\n" \ | 21 | "\t.previous\n" \ |
@@ -21,12 +24,14 @@ | |||
21 | : "i" (-EFAULT), "0" (oparg), "1" (0)) | 24 | : "i" (-EFAULT), "0" (oparg), "1" (0)) |
22 | 25 | ||
23 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ | 26 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ |
24 | asm volatile("1:\tmovl %2, %0\n" \ | 27 | asm volatile("\t" ASM_STAC "\n" \ |
28 | "1:\tmovl %2, %0\n" \ | ||
25 | "\tmovl\t%0, %3\n" \ | 29 | "\tmovl\t%0, %3\n" \ |
26 | "\t" insn "\n" \ | 30 | "\t" insn "\n" \ |
27 | "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ | 31 | "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ |
28 | "\tjnz\t1b\n" \ | 32 | "\tjnz\t1b\n" \ |
29 | "3:\t.section .fixup,\"ax\"\n" \ | 33 | "3:\t" ASM_CLAC "\n" \ |
34 | "\t.section .fixup,\"ax\"\n" \ | ||
30 | "4:\tmov\t%5, %1\n" \ | 35 | "4:\tmov\t%5, %1\n" \ |
31 | "\tjmp\t3b\n" \ | 36 | "\tjmp\t3b\n" \ |
32 | "\t.previous\n" \ | 37 | "\t.previous\n" \ |
@@ -122,8 +127,10 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
122 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | 127 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
123 | return -EFAULT; | 128 | return -EFAULT; |
124 | 129 | ||
125 | asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" | 130 | asm volatile("\t" ASM_STAC "\n" |
126 | "2:\t.section .fixup, \"ax\"\n" | 131 | "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" |
132 | "2:\t" ASM_CLAC "\n" | ||
133 | "\t.section .fixup, \"ax\"\n" | ||
127 | "3:\tmov %3, %0\n" | 134 | "3:\tmov %3, %0\n" |
128 | "\tjmp 2b\n" | 135 | "\tjmp 2b\n" |
129 | "\t.previous\n" | 136 | "\t.previous\n" |
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h index 439a9acc132d..bdd35dbd0605 100644 --- a/arch/x86/include/asm/hugetlb.h +++ b/arch/x86/include/asm/hugetlb.h | |||
@@ -90,4 +90,8 @@ static inline void arch_release_hugepage(struct page *page) | |||
90 | { | 90 | { |
91 | } | 91 | } |
92 | 92 | ||
93 | static inline void arch_clear_hugepage_flags(struct page *page) | ||
94 | { | ||
95 | } | ||
96 | |||
93 | #endif /* _ASM_X86_HUGETLB_H */ | 97 | #endif /* _ASM_X86_HUGETLB_H */ |
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h index b04cbdb138cd..e6232773ce49 100644 --- a/arch/x86/include/asm/ia32.h +++ b/arch/x86/include/asm/ia32.h | |||
@@ -86,73 +86,6 @@ struct stat64 { | |||
86 | unsigned long long st_ino; | 86 | unsigned long long st_ino; |
87 | } __attribute__((packed)); | 87 | } __attribute__((packed)); |
88 | 88 | ||
89 | typedef struct compat_siginfo { | ||
90 | int si_signo; | ||
91 | int si_errno; | ||
92 | int si_code; | ||
93 | |||
94 | union { | ||
95 | int _pad[((128 / sizeof(int)) - 3)]; | ||
96 | |||
97 | /* kill() */ | ||
98 | struct { | ||
99 | unsigned int _pid; /* sender's pid */ | ||
100 | unsigned int _uid; /* sender's uid */ | ||
101 | } _kill; | ||
102 | |||
103 | /* POSIX.1b timers */ | ||
104 | struct { | ||
105 | compat_timer_t _tid; /* timer id */ | ||
106 | int _overrun; /* overrun count */ | ||
107 | compat_sigval_t _sigval; /* same as below */ | ||
108 | int _sys_private; /* not to be passed to user */ | ||
109 | int _overrun_incr; /* amount to add to overrun */ | ||
110 | } _timer; | ||
111 | |||
112 | /* POSIX.1b signals */ | ||
113 | struct { | ||
114 | unsigned int _pid; /* sender's pid */ | ||
115 | unsigned int _uid; /* sender's uid */ | ||
116 | compat_sigval_t _sigval; | ||
117 | } _rt; | ||
118 | |||
119 | /* SIGCHLD */ | ||
120 | struct { | ||
121 | unsigned int _pid; /* which child */ | ||
122 | unsigned int _uid; /* sender's uid */ | ||
123 | int _status; /* exit code */ | ||
124 | compat_clock_t _utime; | ||
125 | compat_clock_t _stime; | ||
126 | } _sigchld; | ||
127 | |||
128 | /* SIGCHLD (x32 version) */ | ||
129 | struct { | ||
130 | unsigned int _pid; /* which child */ | ||
131 | unsigned int _uid; /* sender's uid */ | ||
132 | int _status; /* exit code */ | ||
133 | compat_s64 _utime; | ||
134 | compat_s64 _stime; | ||
135 | } _sigchld_x32; | ||
136 | |||
137 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | ||
138 | struct { | ||
139 | unsigned int _addr; /* faulting insn/memory ref. */ | ||
140 | } _sigfault; | ||
141 | |||
142 | /* SIGPOLL */ | ||
143 | struct { | ||
144 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
145 | int _fd; | ||
146 | } _sigpoll; | ||
147 | |||
148 | struct { | ||
149 | unsigned int _call_addr; /* calling insn */ | ||
150 | int _syscall; /* triggering system call number */ | ||
151 | unsigned int _arch; /* AUDIT_ARCH_* of syscall */ | ||
152 | } _sigsys; | ||
153 | } _sifields; | ||
154 | } compat_siginfo_t; | ||
155 | |||
156 | #define IA32_STACK_TOP IA32_PAGE_OFFSET | 89 | #define IA32_STACK_TOP IA32_PAGE_OFFSET |
157 | 90 | ||
158 | #ifdef __KERNEL__ | 91 | #ifdef __KERNEL__ |
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 41e08cb6a092..a65ec29e6ffb 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h | |||
@@ -41,6 +41,7 @@ | |||
41 | #define __KVM_HAVE_DEBUGREGS | 41 | #define __KVM_HAVE_DEBUGREGS |
42 | #define __KVM_HAVE_XSAVE | 42 | #define __KVM_HAVE_XSAVE |
43 | #define __KVM_HAVE_XCRS | 43 | #define __KVM_HAVE_XCRS |
44 | #define __KVM_HAVE_READONLY_MEM | ||
44 | 45 | ||
45 | /* Architectural interrupt line count. */ | 46 | /* Architectural interrupt line count. */ |
46 | #define KVM_NR_INTERRUPTS 256 | 47 | #define KVM_NR_INTERRUPTS 256 |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index c764f43b71c5..15f960c06ff7 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -86,6 +86,19 @@ struct x86_instruction_info { | |||
86 | 86 | ||
87 | struct x86_emulate_ops { | 87 | struct x86_emulate_ops { |
88 | /* | 88 | /* |
89 | * read_gpr: read a general purpose register (rax - r15) | ||
90 | * | ||
91 | * @reg: gpr number. | ||
92 | */ | ||
93 | ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg); | ||
94 | /* | ||
95 | * write_gpr: write a general purpose register (rax - r15) | ||
96 | * | ||
97 | * @reg: gpr number. | ||
98 | * @val: value to write. | ||
99 | */ | ||
100 | void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val); | ||
101 | /* | ||
89 | * read_std: Read bytes of standard (non-emulated/special) memory. | 102 | * read_std: Read bytes of standard (non-emulated/special) memory. |
90 | * Used for descriptor reading. | 103 | * Used for descriptor reading. |
91 | * @addr: [IN ] Linear address from which to read. | 104 | * @addr: [IN ] Linear address from which to read. |
@@ -200,8 +213,9 @@ typedef u32 __attribute__((vector_size(16))) sse128_t; | |||
200 | 213 | ||
201 | /* Type, address-of, and value of an instruction's operand. */ | 214 | /* Type, address-of, and value of an instruction's operand. */ |
202 | struct operand { | 215 | struct operand { |
203 | enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_MM, OP_NONE } type; | 216 | enum { OP_REG, OP_MEM, OP_MEM_STR, OP_IMM, OP_XMM, OP_MM, OP_NONE } type; |
204 | unsigned int bytes; | 217 | unsigned int bytes; |
218 | unsigned int count; | ||
205 | union { | 219 | union { |
206 | unsigned long orig_val; | 220 | unsigned long orig_val; |
207 | u64 orig_val64; | 221 | u64 orig_val64; |
@@ -221,6 +235,7 @@ struct operand { | |||
221 | char valptr[sizeof(unsigned long) + 2]; | 235 | char valptr[sizeof(unsigned long) + 2]; |
222 | sse128_t vec_val; | 236 | sse128_t vec_val; |
223 | u64 mm_val; | 237 | u64 mm_val; |
238 | void *data; | ||
224 | }; | 239 | }; |
225 | }; | 240 | }; |
226 | 241 | ||
@@ -236,14 +251,23 @@ struct read_cache { | |||
236 | unsigned long end; | 251 | unsigned long end; |
237 | }; | 252 | }; |
238 | 253 | ||
254 | /* Execution mode, passed to the emulator. */ | ||
255 | enum x86emul_mode { | ||
256 | X86EMUL_MODE_REAL, /* Real mode. */ | ||
257 | X86EMUL_MODE_VM86, /* Virtual 8086 mode. */ | ||
258 | X86EMUL_MODE_PROT16, /* 16-bit protected mode. */ | ||
259 | X86EMUL_MODE_PROT32, /* 32-bit protected mode. */ | ||
260 | X86EMUL_MODE_PROT64, /* 64-bit (long) mode. */ | ||
261 | }; | ||
262 | |||
239 | struct x86_emulate_ctxt { | 263 | struct x86_emulate_ctxt { |
240 | struct x86_emulate_ops *ops; | 264 | const struct x86_emulate_ops *ops; |
241 | 265 | ||
242 | /* Register state before/after emulation. */ | 266 | /* Register state before/after emulation. */ |
243 | unsigned long eflags; | 267 | unsigned long eflags; |
244 | unsigned long eip; /* eip before instruction emulation */ | 268 | unsigned long eip; /* eip before instruction emulation */ |
245 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ | 269 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ |
246 | int mode; | 270 | enum x86emul_mode mode; |
247 | 271 | ||
248 | /* interruptibility state, as a result of execution of STI or MOV SS */ | 272 | /* interruptibility state, as a result of execution of STI or MOV SS */ |
249 | int interruptibility; | 273 | int interruptibility; |
@@ -281,8 +305,10 @@ struct x86_emulate_ctxt { | |||
281 | bool rip_relative; | 305 | bool rip_relative; |
282 | unsigned long _eip; | 306 | unsigned long _eip; |
283 | struct operand memop; | 307 | struct operand memop; |
308 | u32 regs_valid; /* bitmaps of registers in _regs[] that can be read */ | ||
309 | u32 regs_dirty; /* bitmaps of registers in _regs[] that have been written */ | ||
284 | /* Fields above regs are cleared together. */ | 310 | /* Fields above regs are cleared together. */ |
285 | unsigned long regs[NR_VCPU_REGS]; | 311 | unsigned long _regs[NR_VCPU_REGS]; |
286 | struct operand *memopp; | 312 | struct operand *memopp; |
287 | struct fetch_cache fetch; | 313 | struct fetch_cache fetch; |
288 | struct read_cache io_read; | 314 | struct read_cache io_read; |
@@ -293,17 +319,6 @@ struct x86_emulate_ctxt { | |||
293 | #define REPE_PREFIX 0xf3 | 319 | #define REPE_PREFIX 0xf3 |
294 | #define REPNE_PREFIX 0xf2 | 320 | #define REPNE_PREFIX 0xf2 |
295 | 321 | ||
296 | /* Execution mode, passed to the emulator. */ | ||
297 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ | ||
298 | #define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */ | ||
299 | #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */ | ||
300 | #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ | ||
301 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ | ||
302 | |||
303 | /* any protected mode */ | ||
304 | #define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ | ||
305 | X86EMUL_MODE_PROT64) | ||
306 | |||
307 | /* CPUID vendors */ | 322 | /* CPUID vendors */ |
308 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 | 323 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 |
309 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 | 324 | #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 |
@@ -394,4 +409,7 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | |||
394 | u16 tss_selector, int idt_index, int reason, | 409 | u16 tss_selector, int idt_index, int reason, |
395 | bool has_error_code, u32 error_code); | 410 | bool has_error_code, u32 error_code); |
396 | int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); | 411 | int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); |
412 | void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt); | ||
413 | void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt); | ||
414 | |||
397 | #endif /* _ASM_X86_KVM_X86_EMULATE_H */ | 415 | #endif /* _ASM_X86_KVM_X86_EMULATE_H */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1eaa6b056670..b2e11f452435 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -271,10 +271,24 @@ struct kvm_mmu { | |||
271 | union kvm_mmu_page_role base_role; | 271 | union kvm_mmu_page_role base_role; |
272 | bool direct_map; | 272 | bool direct_map; |
273 | 273 | ||
274 | /* | ||
275 | * Bitmap; bit set = permission fault | ||
276 | * Byte index: page fault error code [4:1] | ||
277 | * Bit index: pte permissions in ACC_* format | ||
278 | */ | ||
279 | u8 permissions[16]; | ||
280 | |||
274 | u64 *pae_root; | 281 | u64 *pae_root; |
275 | u64 *lm_root; | 282 | u64 *lm_root; |
276 | u64 rsvd_bits_mask[2][4]; | 283 | u64 rsvd_bits_mask[2][4]; |
277 | 284 | ||
285 | /* | ||
286 | * Bitmap: bit set = last pte in walk | ||
287 | * index[0:1]: level (zero-based) | ||
288 | * index[2]: pte.ps | ||
289 | */ | ||
290 | u8 last_pte_bitmap; | ||
291 | |||
278 | bool nx; | 292 | bool nx; |
279 | 293 | ||
280 | u64 pdptrs[4]; /* pae */ | 294 | u64 pdptrs[4]; /* pae */ |
@@ -398,12 +412,15 @@ struct kvm_vcpu_arch { | |||
398 | struct x86_emulate_ctxt emulate_ctxt; | 412 | struct x86_emulate_ctxt emulate_ctxt; |
399 | bool emulate_regs_need_sync_to_vcpu; | 413 | bool emulate_regs_need_sync_to_vcpu; |
400 | bool emulate_regs_need_sync_from_vcpu; | 414 | bool emulate_regs_need_sync_from_vcpu; |
415 | int (*complete_userspace_io)(struct kvm_vcpu *vcpu); | ||
401 | 416 | ||
402 | gpa_t time; | 417 | gpa_t time; |
403 | struct pvclock_vcpu_time_info hv_clock; | 418 | struct pvclock_vcpu_time_info hv_clock; |
404 | unsigned int hw_tsc_khz; | 419 | unsigned int hw_tsc_khz; |
405 | unsigned int time_offset; | 420 | unsigned int time_offset; |
406 | struct page *time_page; | 421 | struct page *time_page; |
422 | /* set guest stopped flag in pvclock flags field */ | ||
423 | bool pvclock_set_guest_stopped_request; | ||
407 | 424 | ||
408 | struct { | 425 | struct { |
409 | u64 msr_val; | 426 | u64 msr_val; |
@@ -438,6 +455,7 @@ struct kvm_vcpu_arch { | |||
438 | unsigned long dr6; | 455 | unsigned long dr6; |
439 | unsigned long dr7; | 456 | unsigned long dr7; |
440 | unsigned long eff_db[KVM_NR_DB_REGS]; | 457 | unsigned long eff_db[KVM_NR_DB_REGS]; |
458 | unsigned long guest_debug_dr7; | ||
441 | 459 | ||
442 | u64 mcg_cap; | 460 | u64 mcg_cap; |
443 | u64 mcg_status; | 461 | u64 mcg_status; |
@@ -484,14 +502,24 @@ struct kvm_vcpu_arch { | |||
484 | }; | 502 | }; |
485 | 503 | ||
486 | struct kvm_lpage_info { | 504 | struct kvm_lpage_info { |
487 | unsigned long rmap_pde; | ||
488 | int write_count; | 505 | int write_count; |
489 | }; | 506 | }; |
490 | 507 | ||
491 | struct kvm_arch_memory_slot { | 508 | struct kvm_arch_memory_slot { |
509 | unsigned long *rmap[KVM_NR_PAGE_SIZES]; | ||
492 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; | 510 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; |
493 | }; | 511 | }; |
494 | 512 | ||
513 | struct kvm_apic_map { | ||
514 | struct rcu_head rcu; | ||
515 | u8 ldr_bits; | ||
516 | /* fields bellow are used to decode ldr values in different modes */ | ||
517 | u32 cid_shift, cid_mask, lid_mask; | ||
518 | struct kvm_lapic *phys_map[256]; | ||
519 | /* first index is cluster id second is cpu id in a cluster */ | ||
520 | struct kvm_lapic *logical_map[16][16]; | ||
521 | }; | ||
522 | |||
495 | struct kvm_arch { | 523 | struct kvm_arch { |
496 | unsigned int n_used_mmu_pages; | 524 | unsigned int n_used_mmu_pages; |
497 | unsigned int n_requested_mmu_pages; | 525 | unsigned int n_requested_mmu_pages; |
@@ -509,6 +537,8 @@ struct kvm_arch { | |||
509 | struct kvm_ioapic *vioapic; | 537 | struct kvm_ioapic *vioapic; |
510 | struct kvm_pit *vpit; | 538 | struct kvm_pit *vpit; |
511 | int vapics_in_nmi_mode; | 539 | int vapics_in_nmi_mode; |
540 | struct mutex apic_map_lock; | ||
541 | struct kvm_apic_map *apic_map; | ||
512 | 542 | ||
513 | unsigned int tss_addr; | 543 | unsigned int tss_addr; |
514 | struct page *apic_access_page; | 544 | struct page *apic_access_page; |
@@ -602,8 +632,7 @@ struct kvm_x86_ops { | |||
602 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | 632 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
603 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | 633 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
604 | 634 | ||
605 | void (*set_guest_debug)(struct kvm_vcpu *vcpu, | 635 | void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu); |
606 | struct kvm_guest_debug *dbg); | ||
607 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); | 636 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); |
608 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | 637 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); |
609 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | 638 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
@@ -941,6 +970,7 @@ extern bool kvm_rebooting; | |||
941 | 970 | ||
942 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 971 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
943 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | 972 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); |
973 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); | ||
944 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); | 974 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); |
945 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | 975 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); |
946 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | 976 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 2f7712e08b1e..eb3e9d85e1f1 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
@@ -102,21 +102,21 @@ struct kvm_vcpu_pv_apf_data { | |||
102 | extern void kvmclock_init(void); | 102 | extern void kvmclock_init(void); |
103 | extern int kvm_register_clock(char *txt); | 103 | extern int kvm_register_clock(char *txt); |
104 | 104 | ||
105 | #ifdef CONFIG_KVM_CLOCK | 105 | #ifdef CONFIG_KVM_GUEST |
106 | bool kvm_check_and_clear_guest_paused(void); | 106 | bool kvm_check_and_clear_guest_paused(void); |
107 | #else | 107 | #else |
108 | static inline bool kvm_check_and_clear_guest_paused(void) | 108 | static inline bool kvm_check_and_clear_guest_paused(void) |
109 | { | 109 | { |
110 | return false; | 110 | return false; |
111 | } | 111 | } |
112 | #endif /* CONFIG_KVMCLOCK */ | 112 | #endif /* CONFIG_KVM_GUEST */ |
113 | 113 | ||
114 | /* This instruction is vmcall. On non-VT architectures, it will generate a | 114 | /* This instruction is vmcall. On non-VT architectures, it will generate a |
115 | * trap that we will then rewrite to the appropriate instruction. | 115 | * trap that we will then rewrite to the appropriate instruction. |
116 | */ | 116 | */ |
117 | #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" | 117 | #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" |
118 | 118 | ||
119 | /* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun | 119 | /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall |
120 | * instruction. The hypervisor may replace it with something else but only the | 120 | * instruction. The hypervisor may replace it with something else but only the |
121 | * instructions are guaranteed to be supported. | 121 | * instructions are guaranteed to be supported. |
122 | * | 122 | * |
diff --git a/arch/x86/include/asm/mmzone.h b/arch/x86/include/asm/mmzone.h index 64217ea16a36..d497bc425cae 100644 --- a/arch/x86/include/asm/mmzone.h +++ b/arch/x86/include/asm/mmzone.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifdef CONFIG_X86_32 |
2 | # include "mmzone_32.h" | 2 | # include <asm/mmzone_32.h> |
3 | #else | 3 | #else |
4 | # include "mmzone_64.h" | 4 | # include <asm/mmzone_64.h> |
5 | #endif | 5 | #endif |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 07f96cb5cdb9..7f0edceb7563 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -253,6 +253,9 @@ | |||
253 | 253 | ||
254 | #define MSR_IA32_PERF_STATUS 0x00000198 | 254 | #define MSR_IA32_PERF_STATUS 0x00000198 |
255 | #define MSR_IA32_PERF_CTL 0x00000199 | 255 | #define MSR_IA32_PERF_CTL 0x00000199 |
256 | #define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 | ||
257 | #define MSR_AMD_PERF_STATUS 0xc0010063 | ||
258 | #define MSR_AMD_PERF_CTL 0xc0010062 | ||
256 | 259 | ||
257 | #define MSR_IA32_MPERF 0x000000e7 | 260 | #define MSR_IA32_MPERF 0x000000e7 |
258 | #define MSR_IA32_APERF 0x000000e8 | 261 | #define MSR_IA32_APERF 0x000000e8 |
diff --git a/arch/x86/include/asm/mutex.h b/arch/x86/include/asm/mutex.h index a731b9c573a6..7d3a48275394 100644 --- a/arch/x86/include/asm/mutex.h +++ b/arch/x86/include/asm/mutex.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifdef CONFIG_X86_32 |
2 | # include "mutex_32.h" | 2 | # include <asm/mutex_32.h> |
3 | #else | 3 | #else |
4 | # include "mutex_64.h" | 4 | # include <asm/mutex_64.h> |
5 | #endif | 5 | #endif |
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index bfacd2ccf651..49119fcea2dc 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h | |||
@@ -53,9 +53,9 @@ static inline int numa_cpu_node(int cpu) | |||
53 | #endif /* CONFIG_NUMA */ | 53 | #endif /* CONFIG_NUMA */ |
54 | 54 | ||
55 | #ifdef CONFIG_X86_32 | 55 | #ifdef CONFIG_X86_32 |
56 | # include "numa_32.h" | 56 | # include <asm/numa_32.h> |
57 | #else | 57 | #else |
58 | # include "numa_64.h" | 58 | # include <asm/numa_64.h> |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #ifdef CONFIG_NUMA | 61 | #ifdef CONFIG_NUMA |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index df75d07571ce..6e41b9343928 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -141,7 +141,7 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq); | |||
141 | #endif /* __KERNEL__ */ | 141 | #endif /* __KERNEL__ */ |
142 | 142 | ||
143 | #ifdef CONFIG_X86_64 | 143 | #ifdef CONFIG_X86_64 |
144 | #include "pci_64.h" | 144 | #include <asm/pci_64.h> |
145 | #endif | 145 | #endif |
146 | 146 | ||
147 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | 147 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 49afb3f41eb6..a1f780d45f76 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -146,8 +146,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd) | |||
146 | 146 | ||
147 | static inline int pmd_large(pmd_t pte) | 147 | static inline int pmd_large(pmd_t pte) |
148 | { | 148 | { |
149 | return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == | 149 | return pmd_flags(pte) & _PAGE_PSE; |
150 | (_PAGE_PSE | _PAGE_PRESENT); | ||
151 | } | 150 | } |
152 | 151 | ||
153 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 152 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -384,9 +383,9 @@ pte_t *populate_extra_pte(unsigned long vaddr); | |||
384 | #endif /* __ASSEMBLY__ */ | 383 | #endif /* __ASSEMBLY__ */ |
385 | 384 | ||
386 | #ifdef CONFIG_X86_32 | 385 | #ifdef CONFIG_X86_32 |
387 | # include "pgtable_32.h" | 386 | # include <asm/pgtable_32.h> |
388 | #else | 387 | #else |
389 | # include "pgtable_64.h" | 388 | # include <asm/pgtable_64.h> |
390 | #endif | 389 | #endif |
391 | 390 | ||
392 | #ifndef __ASSEMBLY__ | 391 | #ifndef __ASSEMBLY__ |
@@ -415,7 +414,13 @@ static inline int pte_hidden(pte_t pte) | |||
415 | 414 | ||
416 | static inline int pmd_present(pmd_t pmd) | 415 | static inline int pmd_present(pmd_t pmd) |
417 | { | 416 | { |
418 | return pmd_flags(pmd) & _PAGE_PRESENT; | 417 | /* |
418 | * Checking for _PAGE_PSE is needed too because | ||
419 | * split_huge_page will temporarily clear the present bit (but | ||
420 | * the _PAGE_PSE flag will remain set at all times while the | ||
421 | * _PAGE_PRESENT bit is clear). | ||
422 | */ | ||
423 | return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); | ||
419 | } | 424 | } |
420 | 425 | ||
421 | static inline int pmd_none(pmd_t pmd) | 426 | static inline int pmd_none(pmd_t pmd) |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 0c92113c4cb6..8faa215a503e 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -71,6 +71,7 @@ do { \ | |||
71 | * tables contain all the necessary information. | 71 | * tables contain all the necessary information. |
72 | */ | 72 | */ |
73 | #define update_mmu_cache(vma, address, ptep) do { } while (0) | 73 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
74 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
74 | 75 | ||
75 | #endif /* !__ASSEMBLY__ */ | 76 | #endif /* !__ASSEMBLY__ */ |
76 | 77 | ||
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 8251be02301e..47356f9df82e 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -143,6 +143,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } | |||
143 | #define pte_unmap(pte) ((void)(pte))/* NOP */ | 143 | #define pte_unmap(pte) ((void)(pte))/* NOP */ |
144 | 144 | ||
145 | #define update_mmu_cache(vma, address, ptep) do { } while (0) | 145 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
146 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
146 | 147 | ||
147 | /* Encode and de-code a swap entry */ | 148 | /* Encode and de-code a swap entry */ |
148 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE | 149 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index db8fec6d2953..ec8a1fc9505d 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -174,9 +174,9 @@ | |||
174 | #endif | 174 | #endif |
175 | 175 | ||
176 | #ifdef CONFIG_X86_32 | 176 | #ifdef CONFIG_X86_32 |
177 | # include "pgtable_32_types.h" | 177 | # include <asm/pgtable_32_types.h> |
178 | #else | 178 | #else |
179 | # include "pgtable_64_types.h" | 179 | # include <asm/pgtable_64_types.h> |
180 | #endif | 180 | #endif |
181 | 181 | ||
182 | #ifndef __ASSEMBLY__ | 182 | #ifndef __ASSEMBLY__ |
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h index 7ef7c3020e5c..bad3665c25fc 100644 --- a/arch/x86/include/asm/posix_types.h +++ b/arch/x86/include/asm/posix_types.h | |||
@@ -1,15 +1,15 @@ | |||
1 | #ifdef __KERNEL__ | 1 | #ifdef __KERNEL__ |
2 | # ifdef CONFIG_X86_32 | 2 | # ifdef CONFIG_X86_32 |
3 | # include "posix_types_32.h" | 3 | # include <asm/posix_types_32.h> |
4 | # else | 4 | # else |
5 | # include "posix_types_64.h" | 5 | # include <asm/posix_types_64.h> |
6 | # endif | 6 | # endif |
7 | #else | 7 | #else |
8 | # ifdef __i386__ | 8 | # ifdef __i386__ |
9 | # include "posix_types_32.h" | 9 | # include <asm/posix_types_32.h> |
10 | # elif defined(__ILP32__) | 10 | # elif defined(__ILP32__) |
11 | # include "posix_types_x32.h" | 11 | # include <asm/posix_types_x32.h> |
12 | # else | 12 | # else |
13 | # include "posix_types_64.h" | 13 | # include <asm/posix_types_64.h> |
14 | # endif | 14 | # endif |
15 | #endif | 15 | #endif |
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h index aea1d1d848c7..680cf09ed100 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h | |||
@@ -65,6 +65,7 @@ | |||
65 | #define X86_CR4_PCIDE 0x00020000 /* enable PCID support */ | 65 | #define X86_CR4_PCIDE 0x00020000 /* enable PCID support */ |
66 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ | 66 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ |
67 | #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ | 67 | #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ |
68 | #define X86_CR4_SMAP 0x00200000 /* enable SMAP support */ | ||
68 | 69 | ||
69 | /* | 70 | /* |
70 | * x86-64 Task Priority Register, CR8 | 71 | * x86-64 Task Priority Register, CR8 |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index b98c0d958ebb..ad1fc8511674 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -588,11 +588,6 @@ typedef struct { | |||
588 | } mm_segment_t; | 588 | } mm_segment_t; |
589 | 589 | ||
590 | 590 | ||
591 | /* | ||
592 | * create a kernel thread without removing it from tasklists | ||
593 | */ | ||
594 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
595 | |||
596 | /* Free all resources held by a thread. */ | 591 | /* Free all resources held by a thread. */ |
597 | extern void release_thread(struct task_struct *); | 592 | extern void release_thread(struct task_struct *); |
598 | 593 | ||
diff --git a/arch/x86/include/asm/seccomp.h b/arch/x86/include/asm/seccomp.h index c62e58a5a90d..0f3d7f099224 100644 --- a/arch/x86/include/asm/seccomp.h +++ b/arch/x86/include/asm/seccomp.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifdef CONFIG_X86_32 |
2 | # include "seccomp_32.h" | 2 | # include <asm/seccomp_32.h> |
3 | #else | 3 | #else |
4 | # include "seccomp_64.h" | 4 | # include <asm/seccomp_64.h> |
5 | #endif | 5 | #endif |
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h new file mode 100644 index 000000000000..8d3120f4e270 --- /dev/null +++ b/arch/x86/include/asm/smap.h | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Supervisor Mode Access Prevention support | ||
3 | * | ||
4 | * Copyright (C) 2012 Intel Corporation | ||
5 | * Author: H. Peter Anvin <hpa@linux.intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; version 2 | ||
10 | * of the License. | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_X86_SMAP_H | ||
14 | #define _ASM_X86_SMAP_H | ||
15 | |||
16 | #include <linux/stringify.h> | ||
17 | #include <asm/nops.h> | ||
18 | #include <asm/cpufeature.h> | ||
19 | |||
20 | /* "Raw" instruction opcodes */ | ||
21 | #define __ASM_CLAC .byte 0x0f,0x01,0xca | ||
22 | #define __ASM_STAC .byte 0x0f,0x01,0xcb | ||
23 | |||
24 | #ifdef __ASSEMBLY__ | ||
25 | |||
26 | #include <asm/alternative-asm.h> | ||
27 | |||
28 | #ifdef CONFIG_X86_SMAP | ||
29 | |||
30 | #define ASM_CLAC \ | ||
31 | 661: ASM_NOP3 ; \ | ||
32 | .pushsection .altinstr_replacement, "ax" ; \ | ||
33 | 662: __ASM_CLAC ; \ | ||
34 | .popsection ; \ | ||
35 | .pushsection .altinstructions, "a" ; \ | ||
36 | altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \ | ||
37 | .popsection | ||
38 | |||
39 | #define ASM_STAC \ | ||
40 | 661: ASM_NOP3 ; \ | ||
41 | .pushsection .altinstr_replacement, "ax" ; \ | ||
42 | 662: __ASM_STAC ; \ | ||
43 | .popsection ; \ | ||
44 | .pushsection .altinstructions, "a" ; \ | ||
45 | altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \ | ||
46 | .popsection | ||
47 | |||
48 | #else /* CONFIG_X86_SMAP */ | ||
49 | |||
50 | #define ASM_CLAC | ||
51 | #define ASM_STAC | ||
52 | |||
53 | #endif /* CONFIG_X86_SMAP */ | ||
54 | |||
55 | #else /* __ASSEMBLY__ */ | ||
56 | |||
57 | #include <asm/alternative.h> | ||
58 | |||
59 | #ifdef CONFIG_X86_SMAP | ||
60 | |||
61 | static __always_inline void clac(void) | ||
62 | { | ||
63 | /* Note: a barrier is implicit in alternative() */ | ||
64 | alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP); | ||
65 | } | ||
66 | |||
67 | static __always_inline void stac(void) | ||
68 | { | ||
69 | /* Note: a barrier is implicit in alternative() */ | ||
70 | alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP); | ||
71 | } | ||
72 | |||
73 | /* These macros can be used in asm() statements */ | ||
74 | #define ASM_CLAC \ | ||
75 | ALTERNATIVE(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP) | ||
76 | #define ASM_STAC \ | ||
77 | ALTERNATIVE(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP) | ||
78 | |||
79 | #else /* CONFIG_X86_SMAP */ | ||
80 | |||
81 | static inline void clac(void) { } | ||
82 | static inline void stac(void) { } | ||
83 | |||
84 | #define ASM_CLAC | ||
85 | #define ASM_STAC | ||
86 | |||
87 | #endif /* CONFIG_X86_SMAP */ | ||
88 | |||
89 | #endif /* __ASSEMBLY__ */ | ||
90 | |||
91 | #endif /* _ASM_X86_SMAP_H */ | ||
diff --git a/arch/x86/include/asm/string.h b/arch/x86/include/asm/string.h index 6dfd6d9373a0..09224d7a5862 100644 --- a/arch/x86/include/asm/string.h +++ b/arch/x86/include/asm/string.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifdef CONFIG_X86_32 |
2 | # include "string_32.h" | 2 | # include <asm/string_32.h> |
3 | #else | 3 | #else |
4 | # include "string_64.h" | 4 | # include <asm/string_64.h> |
5 | #endif | 5 | #endif |
diff --git a/arch/x86/include/asm/suspend.h b/arch/x86/include/asm/suspend.h index 9bd521fe4570..2fab6c2c3575 100644 --- a/arch/x86/include/asm/suspend.h +++ b/arch/x86/include/asm/suspend.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifdef CONFIG_X86_32 |
2 | # include "suspend_32.h" | 2 | # include <asm/suspend_32.h> |
3 | #else | 3 | #else |
4 | # include "suspend_64.h" | 4 | # include <asm/suspend_64.h> |
5 | #endif | 5 | #endif |
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index 4ca1c611b552..a9a8cf3da49d 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h | |||
@@ -54,8 +54,6 @@ asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32); | |||
54 | asmlinkage long sys32_personality(unsigned long); | 54 | asmlinkage long sys32_personality(unsigned long); |
55 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); | 55 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); |
56 | 56 | ||
57 | asmlinkage long sys32_execve(const char __user *, compat_uptr_t __user *, | ||
58 | compat_uptr_t __user *, struct pt_regs *); | ||
59 | asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); | 57 | asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); |
60 | 58 | ||
61 | long sys32_lseek(unsigned int, int, unsigned int); | 59 | long sys32_lseek(unsigned int, int, unsigned int); |
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index f1d8b441fc77..2be0b880417e 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h | |||
@@ -25,7 +25,7 @@ int sys_fork(struct pt_regs *); | |||
25 | int sys_vfork(struct pt_regs *); | 25 | int sys_vfork(struct pt_regs *); |
26 | long sys_execve(const char __user *, | 26 | long sys_execve(const char __user *, |
27 | const char __user *const __user *, | 27 | const char __user *const __user *, |
28 | const char __user *const __user *, struct pt_regs *); | 28 | const char __user *const __user *); |
29 | long sys_clone(unsigned long, unsigned long, void __user *, | 29 | long sys_clone(unsigned long, unsigned long, void __user *, |
30 | void __user *, struct pt_regs *); | 30 | void __user *, struct pt_regs *); |
31 | 31 | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index c535d847e3b5..2d946e63ee82 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -79,7 +79,6 @@ struct thread_info { | |||
79 | #define TIF_SIGPENDING 2 /* signal pending */ | 79 | #define TIF_SIGPENDING 2 /* signal pending */ |
80 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 80 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
81 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ | 81 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ |
82 | #define TIF_IRET 5 /* force IRET */ | ||
83 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ | 82 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ |
84 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 83 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
85 | #define TIF_SECCOMP 8 /* secure computing */ | 84 | #define TIF_SECCOMP 8 /* secure computing */ |
@@ -105,7 +104,6 @@ struct thread_info { | |||
105 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 104 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
106 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | 105 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
107 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 106 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
108 | #define _TIF_IRET (1 << TIF_IRET) | ||
109 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) | 107 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
110 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 108 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
111 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 109 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index e1f3a17034fc..7ccf8d131535 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/string.h> | 9 | #include <linux/string.h> |
10 | #include <asm/asm.h> | 10 | #include <asm/asm.h> |
11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
12 | #include <asm/smap.h> | ||
12 | 13 | ||
13 | #define VERIFY_READ 0 | 14 | #define VERIFY_READ 0 |
14 | #define VERIFY_WRITE 1 | 15 | #define VERIFY_WRITE 1 |
@@ -192,9 +193,10 @@ extern int __get_user_bad(void); | |||
192 | 193 | ||
193 | #ifdef CONFIG_X86_32 | 194 | #ifdef CONFIG_X86_32 |
194 | #define __put_user_asm_u64(x, addr, err, errret) \ | 195 | #define __put_user_asm_u64(x, addr, err, errret) \ |
195 | asm volatile("1: movl %%eax,0(%2)\n" \ | 196 | asm volatile(ASM_STAC "\n" \ |
197 | "1: movl %%eax,0(%2)\n" \ | ||
196 | "2: movl %%edx,4(%2)\n" \ | 198 | "2: movl %%edx,4(%2)\n" \ |
197 | "3:\n" \ | 199 | "3: " ASM_CLAC "\n" \ |
198 | ".section .fixup,\"ax\"\n" \ | 200 | ".section .fixup,\"ax\"\n" \ |
199 | "4: movl %3,%0\n" \ | 201 | "4: movl %3,%0\n" \ |
200 | " jmp 3b\n" \ | 202 | " jmp 3b\n" \ |
@@ -205,9 +207,10 @@ extern int __get_user_bad(void); | |||
205 | : "A" (x), "r" (addr), "i" (errret), "0" (err)) | 207 | : "A" (x), "r" (addr), "i" (errret), "0" (err)) |
206 | 208 | ||
207 | #define __put_user_asm_ex_u64(x, addr) \ | 209 | #define __put_user_asm_ex_u64(x, addr) \ |
208 | asm volatile("1: movl %%eax,0(%1)\n" \ | 210 | asm volatile(ASM_STAC "\n" \ |
211 | "1: movl %%eax,0(%1)\n" \ | ||
209 | "2: movl %%edx,4(%1)\n" \ | 212 | "2: movl %%edx,4(%1)\n" \ |
210 | "3:\n" \ | 213 | "3: " ASM_CLAC "\n" \ |
211 | _ASM_EXTABLE_EX(1b, 2b) \ | 214 | _ASM_EXTABLE_EX(1b, 2b) \ |
212 | _ASM_EXTABLE_EX(2b, 3b) \ | 215 | _ASM_EXTABLE_EX(2b, 3b) \ |
213 | : : "A" (x), "r" (addr)) | 216 | : : "A" (x), "r" (addr)) |
@@ -379,8 +382,9 @@ do { \ | |||
379 | } while (0) | 382 | } while (0) |
380 | 383 | ||
381 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | 384 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
382 | asm volatile("1: mov"itype" %2,%"rtype"1\n" \ | 385 | asm volatile(ASM_STAC "\n" \ |
383 | "2:\n" \ | 386 | "1: mov"itype" %2,%"rtype"1\n" \ |
387 | "2: " ASM_CLAC "\n" \ | ||
384 | ".section .fixup,\"ax\"\n" \ | 388 | ".section .fixup,\"ax\"\n" \ |
385 | "3: mov %3,%0\n" \ | 389 | "3: mov %3,%0\n" \ |
386 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | 390 | " xor"itype" %"rtype"1,%"rtype"1\n" \ |
@@ -443,8 +447,9 @@ struct __large_struct { unsigned long buf[100]; }; | |||
443 | * aliasing issues. | 447 | * aliasing issues. |
444 | */ | 448 | */ |
445 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | 449 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
446 | asm volatile("1: mov"itype" %"rtype"1,%2\n" \ | 450 | asm volatile(ASM_STAC "\n" \ |
447 | "2:\n" \ | 451 | "1: mov"itype" %"rtype"1,%2\n" \ |
452 | "2: " ASM_CLAC "\n" \ | ||
448 | ".section .fixup,\"ax\"\n" \ | 453 | ".section .fixup,\"ax\"\n" \ |
449 | "3: mov %3,%0\n" \ | 454 | "3: mov %3,%0\n" \ |
450 | " jmp 2b\n" \ | 455 | " jmp 2b\n" \ |
@@ -463,13 +468,13 @@ struct __large_struct { unsigned long buf[100]; }; | |||
463 | * uaccess_try and catch | 468 | * uaccess_try and catch |
464 | */ | 469 | */ |
465 | #define uaccess_try do { \ | 470 | #define uaccess_try do { \ |
466 | int prev_err = current_thread_info()->uaccess_err; \ | ||
467 | current_thread_info()->uaccess_err = 0; \ | 471 | current_thread_info()->uaccess_err = 0; \ |
472 | stac(); \ | ||
468 | barrier(); | 473 | barrier(); |
469 | 474 | ||
470 | #define uaccess_catch(err) \ | 475 | #define uaccess_catch(err) \ |
476 | clac(); \ | ||
471 | (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ | 477 | (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ |
472 | current_thread_info()->uaccess_err = prev_err; \ | ||
473 | } while (0) | 478 | } while (0) |
474 | 479 | ||
475 | /** | 480 | /** |
@@ -569,6 +574,9 @@ strncpy_from_user(char *dst, const char __user *src, long count); | |||
569 | extern __must_check long strlen_user(const char __user *str); | 574 | extern __must_check long strlen_user(const char __user *str); |
570 | extern __must_check long strnlen_user(const char __user *str, long n); | 575 | extern __must_check long strnlen_user(const char __user *str, long n); |
571 | 576 | ||
577 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); | ||
578 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); | ||
579 | |||
572 | /* | 580 | /* |
573 | * movsl can be slow when source and dest are not both 8-byte aligned | 581 | * movsl can be slow when source and dest are not both 8-byte aligned |
574 | */ | 582 | */ |
@@ -581,9 +589,9 @@ extern struct movsl_mask { | |||
581 | #define ARCH_HAS_NOCACHE_UACCESS 1 | 589 | #define ARCH_HAS_NOCACHE_UACCESS 1 |
582 | 590 | ||
583 | #ifdef CONFIG_X86_32 | 591 | #ifdef CONFIG_X86_32 |
584 | # include "uaccess_32.h" | 592 | # include <asm/uaccess_32.h> |
585 | #else | 593 | #else |
586 | # include "uaccess_64.h" | 594 | # include <asm/uaccess_64.h> |
587 | #endif | 595 | #endif |
588 | 596 | ||
589 | #endif /* _ASM_X86_UACCESS_H */ | 597 | #endif /* _ASM_X86_UACCESS_H */ |
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 576e39bca6ad..7f760a9f1f61 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h | |||
@@ -213,7 +213,4 @@ static inline unsigned long __must_check copy_from_user(void *to, | |||
213 | return n; | 213 | return n; |
214 | } | 214 | } |
215 | 215 | ||
216 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); | ||
217 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); | ||
218 | |||
219 | #endif /* _ASM_X86_UACCESS_32_H */ | 216 | #endif /* _ASM_X86_UACCESS_32_H */ |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index d8def8b3dba0..142810c457dc 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -217,9 +217,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | |||
217 | } | 217 | } |
218 | } | 218 | } |
219 | 219 | ||
220 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); | ||
221 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); | ||
222 | |||
223 | static __must_check __always_inline int | 220 | static __must_check __always_inline int |
224 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) | 221 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) |
225 | { | 222 | { |
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h index 0d9776e9e2dc..16f3fc6ebf2e 100644 --- a/arch/x86/include/asm/unistd.h +++ b/arch/x86/include/asm/unistd.h | |||
@@ -50,6 +50,7 @@ | |||
50 | # define __ARCH_WANT_SYS_TIME | 50 | # define __ARCH_WANT_SYS_TIME |
51 | # define __ARCH_WANT_SYS_UTIME | 51 | # define __ARCH_WANT_SYS_UTIME |
52 | # define __ARCH_WANT_SYS_WAITPID | 52 | # define __ARCH_WANT_SYS_WAITPID |
53 | # define __ARCH_WANT_SYS_EXECVE | ||
53 | 54 | ||
54 | /* | 55 | /* |
55 | * "Conditional" syscalls | 56 | * "Conditional" syscalls |
diff --git a/arch/x86/include/asm/user.h b/arch/x86/include/asm/user.h index 24532c7da3d6..ccab4af1646d 100644 --- a/arch/x86/include/asm/user.h +++ b/arch/x86/include/asm/user.h | |||
@@ -2,9 +2,9 @@ | |||
2 | #define _ASM_X86_USER_H | 2 | #define _ASM_X86_USER_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_32 | 4 | #ifdef CONFIG_X86_32 |
5 | # include "user_32.h" | 5 | # include <asm/user_32.h> |
6 | #else | 6 | #else |
7 | # include "user_64.h" | 7 | # include <asm/user_64.h> |
8 | #endif | 8 | #endif |
9 | 9 | ||
10 | #include <asm/types.h> | 10 | #include <asm/types.h> |
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 8b38be2de9e1..46e24d36b7da 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h | |||
@@ -17,8 +17,8 @@ struct vsyscall_gtod_data { | |||
17 | 17 | ||
18 | /* open coded 'struct timespec' */ | 18 | /* open coded 'struct timespec' */ |
19 | time_t wall_time_sec; | 19 | time_t wall_time_sec; |
20 | u32 wall_time_nsec; | 20 | u64 wall_time_snsec; |
21 | u32 monotonic_time_nsec; | 21 | u64 monotonic_time_snsec; |
22 | time_t monotonic_time_sec; | 22 | time_t monotonic_time_sec; |
23 | 23 | ||
24 | struct timezone sys_tz; | 24 | struct timezone sys_tz; |
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h index cbf0c9d50b92..6d2f75a82a14 100644 --- a/arch/x86/include/asm/xen/interface.h +++ b/arch/x86/include/asm/xen/interface.h | |||
@@ -47,6 +47,11 @@ | |||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #ifndef __ASSEMBLY__ | 49 | #ifndef __ASSEMBLY__ |
50 | /* Explicitly size integers that represent pfns in the public interface | ||
51 | * with Xen so that on ARM we can have one ABI that works for 32 and 64 | ||
52 | * bit guests. */ | ||
53 | typedef unsigned long xen_pfn_t; | ||
54 | typedef unsigned long xen_ulong_t; | ||
50 | /* Guest handles for primitive C types. */ | 55 | /* Guest handles for primitive C types. */ |
51 | __DEFINE_GUEST_HANDLE(uchar, unsigned char); | 56 | __DEFINE_GUEST_HANDLE(uchar, unsigned char); |
52 | __DEFINE_GUEST_HANDLE(uint, unsigned int); | 57 | __DEFINE_GUEST_HANDLE(uint, unsigned int); |
@@ -57,6 +62,7 @@ DEFINE_GUEST_HANDLE(long); | |||
57 | DEFINE_GUEST_HANDLE(void); | 62 | DEFINE_GUEST_HANDLE(void); |
58 | DEFINE_GUEST_HANDLE(uint64_t); | 63 | DEFINE_GUEST_HANDLE(uint64_t); |
59 | DEFINE_GUEST_HANDLE(uint32_t); | 64 | DEFINE_GUEST_HANDLE(uint32_t); |
65 | DEFINE_GUEST_HANDLE(xen_pfn_t); | ||
60 | #endif | 66 | #endif |
61 | 67 | ||
62 | #ifndef HYPERVISOR_VIRT_START | 68 | #ifndef HYPERVISOR_VIRT_START |
@@ -116,11 +122,13 @@ struct arch_shared_info { | |||
116 | #endif /* !__ASSEMBLY__ */ | 122 | #endif /* !__ASSEMBLY__ */ |
117 | 123 | ||
118 | #ifdef CONFIG_X86_32 | 124 | #ifdef CONFIG_X86_32 |
119 | #include "interface_32.h" | 125 | #include <asm/xen/interface_32.h> |
120 | #else | 126 | #else |
121 | #include "interface_64.h" | 127 | #include <asm/xen/interface_64.h> |
122 | #endif | 128 | #endif |
123 | 129 | ||
130 | #include <asm/pvclock-abi.h> | ||
131 | |||
124 | #ifndef __ASSEMBLY__ | 132 | #ifndef __ASSEMBLY__ |
125 | /* | 133 | /* |
126 | * The following is all CPU context. Note that the fpu_ctxt block is filled | 134 | * The following is all CPU context. Note that the fpu_ctxt block is filled |
diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h b/arch/x86/include/asm/xen/swiotlb-xen.h index 1be1ab7d6a41..ee52fcac6f72 100644 --- a/arch/x86/include/asm/xen/swiotlb-xen.h +++ b/arch/x86/include/asm/xen/swiotlb-xen.h | |||
@@ -5,10 +5,12 @@ | |||
5 | extern int xen_swiotlb; | 5 | extern int xen_swiotlb; |
6 | extern int __init pci_xen_swiotlb_detect(void); | 6 | extern int __init pci_xen_swiotlb_detect(void); |
7 | extern void __init pci_xen_swiotlb_init(void); | 7 | extern void __init pci_xen_swiotlb_init(void); |
8 | extern int pci_xen_swiotlb_init_late(void); | ||
8 | #else | 9 | #else |
9 | #define xen_swiotlb (0) | 10 | #define xen_swiotlb (0) |
10 | static inline int __init pci_xen_swiotlb_detect(void) { return 0; } | 11 | static inline int __init pci_xen_swiotlb_detect(void) { return 0; } |
11 | static inline void __init pci_xen_swiotlb_init(void) { } | 12 | static inline void __init pci_xen_swiotlb_init(void) { } |
13 | static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; } | ||
12 | #endif | 14 | #endif |
13 | 15 | ||
14 | #endif /* _ASM_X86_SWIOTLB_XEN_H */ | 16 | #endif /* _ASM_X86_SWIOTLB_XEN_H */ |
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h index 7fcf6f3dbcc3..f8fde90bc45e 100644 --- a/arch/x86/include/asm/xor.h +++ b/arch/x86/include/asm/xor.h | |||
@@ -3,8 +3,8 @@ | |||
3 | # include <asm-generic/xor.h> | 3 | # include <asm-generic/xor.h> |
4 | #else | 4 | #else |
5 | #ifdef CONFIG_X86_32 | 5 | #ifdef CONFIG_X86_32 |
6 | # include "xor_32.h" | 6 | # include <asm/xor_32.h> |
7 | #else | 7 | #else |
8 | # include "xor_64.h" | 8 | # include <asm/xor_64.h> |
9 | #endif | 9 | #endif |
10 | #endif | 10 | #endif |
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h index aabd5850bdb9..f79cb7ec0e06 100644 --- a/arch/x86/include/asm/xor_32.h +++ b/arch/x86/include/asm/xor_32.h | |||
@@ -822,7 +822,7 @@ static struct xor_block_template xor_block_pIII_sse = { | |||
822 | }; | 822 | }; |
823 | 823 | ||
824 | /* Also try the AVX routines */ | 824 | /* Also try the AVX routines */ |
825 | #include "xor_avx.h" | 825 | #include <asm/xor_avx.h> |
826 | 826 | ||
827 | /* Also try the generic routines. */ | 827 | /* Also try the generic routines. */ |
828 | #include <asm-generic/xor.h> | 828 | #include <asm-generic/xor.h> |
diff --git a/arch/x86/include/asm/xor_64.h b/arch/x86/include/asm/xor_64.h index 5fc06d0b7eb5..87ac522c4af5 100644 --- a/arch/x86/include/asm/xor_64.h +++ b/arch/x86/include/asm/xor_64.h | |||
@@ -306,7 +306,7 @@ static struct xor_block_template xor_block_sse = { | |||
306 | 306 | ||
307 | 307 | ||
308 | /* Also try the AVX routines */ | 308 | /* Also try the AVX routines */ |
309 | #include "xor_avx.h" | 309 | #include <asm/xor_avx.h> |
310 | 310 | ||
311 | #undef XOR_TRY_TEMPLATES | 311 | #undef XOR_TRY_TEMPLATES |
312 | #define XOR_TRY_TEMPLATES \ | 312 | #define XOR_TRY_TEMPLATES \ |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 2ddee1b87793..0415cdabb5a6 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -70,8 +70,9 @@ static inline int xsave_user(struct xsave_struct __user *buf) | |||
70 | if (unlikely(err)) | 70 | if (unlikely(err)) |
71 | return -EFAULT; | 71 | return -EFAULT; |
72 | 72 | ||
73 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" | 73 | __asm__ __volatile__(ASM_STAC "\n" |
74 | "2:\n" | 74 | "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" |
75 | "2: " ASM_CLAC "\n" | ||
75 | ".section .fixup,\"ax\"\n" | 76 | ".section .fixup,\"ax\"\n" |
76 | "3: movl $-1,%[err]\n" | 77 | "3: movl $-1,%[err]\n" |
77 | " jmp 2b\n" | 78 | " jmp 2b\n" |
@@ -90,8 +91,9 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) | |||
90 | u32 lmask = mask; | 91 | u32 lmask = mask; |
91 | u32 hmask = mask >> 32; | 92 | u32 hmask = mask >> 32; |
92 | 93 | ||
93 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" | 94 | __asm__ __volatile__(ASM_STAC "\n" |
94 | "2:\n" | 95 | "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" |
96 | "2: " ASM_CLAC "\n" | ||
95 | ".section .fixup,\"ax\"\n" | 97 | ".section .fixup,\"ax\"\n" |
96 | "3: movl $-1,%[err]\n" | 98 | "3: movl $-1,%[err]\n" |
97 | " jmp 2b\n" | 99 | " jmp 2b\n" |
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild new file mode 100644 index 000000000000..83b6e9a0dce4 --- /dev/null +++ b/arch/x86/include/uapi/asm/Kbuild | |||
@@ -0,0 +1,6 @@ | |||
1 | # UAPI Header export list | ||
2 | include include/uapi/asm-generic/Kbuild.asm | ||
3 | |||
4 | genhdr-y += unistd_32.h | ||
5 | genhdr-y += unistd_64.h | ||
6 | genhdr-y += unistd_x32.h | ||