diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-01 16:59:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-01 16:59:17 -0400 |
commit | 15385dfe7e0fa6866b204dd0d14aec2cc48fc0a7 (patch) | |
tree | 3ddcb000ec3b82f672fa892e8e44b1be4a5ebb33 | |
parent | a57d985e378ca69f430b85852e4187db3698a89e (diff) | |
parent | b2cc2a074de75671bbed5e2dda67a9252ef353ea (diff) |
Merge branch 'x86-smap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86/smap support from Ingo Molnar:
"This adds support for the SMAP (Supervisor Mode Access Prevention) CPU
feature on Intel CPUs: a hardware feature that prevents unintended
user-space data access from kernel privileged code.
It's turned on automatically when possible.
This, in combination with SMEP, makes it even harder to exploit kernel
bugs such as NULL pointer dereferences."
Fix up trivial conflict in arch/x86/kernel/entry_64.S due to newly added
includes right next to each other.
* 'x86-smap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, smep, smap: Make the switching functions one-way
x86, suspend: On wakeup always initialize cr4 and EFER
x86-32: Start out eflags and cr4 clean
x86, smap: Do not abuse the [f][x]rstor_checking() functions for user space
x86-32, smap: Add STAC/CLAC instructions to 32-bit kernel entry
x86, smap: Reduce the SMAP overhead for signal handling
x86, smap: A page fault due to SMAP is an oops
x86, smap: Turn on Supervisor Mode Access Prevention
x86, smap: Add STAC and CLAC instructions to control user space access
x86, uaccess: Merge prototypes for clear_user/__clear_user
x86, smap: Add a header file with macros for STAC/CLAC
x86, alternative: Add header guards to <asm/alternative-asm.h>
x86, alternative: Use .pushsection/.popsection
x86, smap: Add CR4 bit for SMAP
x86-32, mm: The WP test should be done on a kernel page
31 files changed, 410 insertions, 116 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index df551dfa8e52..df43807bb5da 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1812,8 +1812,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1812 | noexec=on: enable non-executable mappings (default) | 1812 | noexec=on: enable non-executable mappings (default) |
1813 | noexec=off: disable non-executable mappings | 1813 | noexec=off: disable non-executable mappings |
1814 | 1814 | ||
1815 | nosmap [X86] | ||
1816 | Disable SMAP (Supervisor Mode Access Prevention) | ||
1817 | even if it is supported by processor. | ||
1818 | |||
1815 | nosmep [X86] | 1819 | nosmep [X86] |
1816 | Disable SMEP (Supervisor Mode Execution Protection) | 1820 | Disable SMEP (Supervisor Mode Execution Prevention) |
1817 | even if it is supported by processor. | 1821 | even if it is supported by processor. |
1818 | 1822 | ||
1819 | noexec32 [X86-64] | 1823 | noexec32 [X86-64] |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 943667050dae..7f9a395c5254 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1493,6 +1493,17 @@ config ARCH_RANDOM | |||
1493 | If supported, this is a high bandwidth, cryptographically | 1493 | If supported, this is a high bandwidth, cryptographically |
1494 | secure hardware random number generator. | 1494 | secure hardware random number generator. |
1495 | 1495 | ||
1496 | config X86_SMAP | ||
1497 | def_bool y | ||
1498 | prompt "Supervisor Mode Access Prevention" if EXPERT | ||
1499 | ---help--- | ||
1500 | Supervisor Mode Access Prevention (SMAP) is a security | ||
1501 | feature in newer Intel processors. There is a small | ||
1502 | performance cost if this enabled and turned on; there is | ||
1503 | also a small increase in the kernel size if this is enabled. | ||
1504 | |||
1505 | If unsure, say Y. | ||
1506 | |||
1496 | config EFI | 1507 | config EFI |
1497 | bool "EFI runtime service support" | 1508 | bool "EFI runtime service support" |
1498 | depends on ACPI | 1509 | depends on ACPI |
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 8c77c64fbd27..efc6a958b71d 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/sigframe.h> | 32 | #include <asm/sigframe.h> |
33 | #include <asm/sighandling.h> | 33 | #include <asm/sighandling.h> |
34 | #include <asm/sys_ia32.h> | 34 | #include <asm/sys_ia32.h> |
35 | #include <asm/smap.h> | ||
35 | 36 | ||
36 | #define FIX_EFLAGS __FIX_EFLAGS | 37 | #define FIX_EFLAGS __FIX_EFLAGS |
37 | 38 | ||
@@ -251,11 +252,12 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, | |||
251 | 252 | ||
252 | get_user_ex(tmp, &sc->fpstate); | 253 | get_user_ex(tmp, &sc->fpstate); |
253 | buf = compat_ptr(tmp); | 254 | buf = compat_ptr(tmp); |
254 | err |= restore_xstate_sig(buf, 1); | ||
255 | 255 | ||
256 | get_user_ex(*pax, &sc->ax); | 256 | get_user_ex(*pax, &sc->ax); |
257 | } get_user_catch(err); | 257 | } get_user_catch(err); |
258 | 258 | ||
259 | err |= restore_xstate_sig(buf, 1); | ||
260 | |||
259 | return err; | 261 | return err; |
260 | } | 262 | } |
261 | 263 | ||
@@ -506,7 +508,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
506 | put_user_ex(sig, &frame->sig); | 508 | put_user_ex(sig, &frame->sig); |
507 | put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo); | 509 | put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo); |
508 | put_user_ex(ptr_to_compat(&frame->uc), &frame->puc); | 510 | put_user_ex(ptr_to_compat(&frame->uc), &frame->puc); |
509 | err |= copy_siginfo_to_user32(&frame->info, info); | ||
510 | 511 | ||
511 | /* Create the ucontext. */ | 512 | /* Create the ucontext. */ |
512 | if (cpu_has_xsave) | 513 | if (cpu_has_xsave) |
@@ -518,9 +519,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
518 | put_user_ex(sas_ss_flags(regs->sp), | 519 | put_user_ex(sas_ss_flags(regs->sp), |
519 | &frame->uc.uc_stack.ss_flags); | 520 | &frame->uc.uc_stack.ss_flags); |
520 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 521 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
521 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, | ||
522 | regs, set->sig[0]); | ||
523 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
524 | 522 | ||
525 | if (ka->sa.sa_flags & SA_RESTORER) | 523 | if (ka->sa.sa_flags & SA_RESTORER) |
526 | restorer = ka->sa.sa_restorer; | 524 | restorer = ka->sa.sa_restorer; |
@@ -536,6 +534,11 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
536 | put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); | 534 | put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); |
537 | } put_user_catch(err); | 535 | } put_user_catch(err); |
538 | 536 | ||
537 | err |= copy_siginfo_to_user32(&frame->info, info); | ||
538 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, | ||
539 | regs, set->sig[0]); | ||
540 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
541 | |||
539 | if (err) | 542 | if (err) |
540 | return -EFAULT; | 543 | return -EFAULT; |
541 | 544 | ||
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 20e5f7ba0e6b..9c289504e680 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/segment.h> | 14 | #include <asm/segment.h> |
15 | #include <asm/irqflags.h> | 15 | #include <asm/irqflags.h> |
16 | #include <asm/asm.h> | 16 | #include <asm/asm.h> |
17 | #include <asm/smap.h> | ||
17 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
18 | #include <linux/err.h> | 19 | #include <linux/err.h> |
19 | 20 | ||
@@ -146,8 +147,10 @@ ENTRY(ia32_sysenter_target) | |||
146 | SAVE_ARGS 0,1,0 | 147 | SAVE_ARGS 0,1,0 |
147 | /* no need to do an access_ok check here because rbp has been | 148 | /* no need to do an access_ok check here because rbp has been |
148 | 32bit zero extended */ | 149 | 32bit zero extended */ |
150 | ASM_STAC | ||
149 | 1: movl (%rbp),%ebp | 151 | 1: movl (%rbp),%ebp |
150 | _ASM_EXTABLE(1b,ia32_badarg) | 152 | _ASM_EXTABLE(1b,ia32_badarg) |
153 | ASM_CLAC | ||
151 | orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 154 | orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
152 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 155 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
153 | CFI_REMEMBER_STATE | 156 | CFI_REMEMBER_STATE |
@@ -301,8 +304,10 @@ ENTRY(ia32_cstar_target) | |||
301 | /* no need to do an access_ok check here because r8 has been | 304 | /* no need to do an access_ok check here because r8 has been |
302 | 32bit zero extended */ | 305 | 32bit zero extended */ |
303 | /* hardware stack frame is complete now */ | 306 | /* hardware stack frame is complete now */ |
307 | ASM_STAC | ||
304 | 1: movl (%r8),%r9d | 308 | 1: movl (%r8),%r9d |
305 | _ASM_EXTABLE(1b,ia32_badarg) | 309 | _ASM_EXTABLE(1b,ia32_badarg) |
310 | ASM_CLAC | ||
306 | orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 311 | orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
307 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | 312 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
308 | CFI_REMEMBER_STATE | 313 | CFI_REMEMBER_STATE |
@@ -365,6 +370,7 @@ cstar_tracesys: | |||
365 | END(ia32_cstar_target) | 370 | END(ia32_cstar_target) |
366 | 371 | ||
367 | ia32_badarg: | 372 | ia32_badarg: |
373 | ASM_CLAC | ||
368 | movq $-EFAULT,%rax | 374 | movq $-EFAULT,%rax |
369 | jmp ia32_sysret | 375 | jmp ia32_sysret |
370 | CFI_ENDPROC | 376 | CFI_ENDPROC |
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index 952bd0100c5c..372231c22a47 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h | |||
@@ -1,3 +1,6 @@ | |||
1 | #ifndef _ASM_X86_ALTERNATIVE_ASM_H | ||
2 | #define _ASM_X86_ALTERNATIVE_ASM_H | ||
3 | |||
1 | #ifdef __ASSEMBLY__ | 4 | #ifdef __ASSEMBLY__ |
2 | 5 | ||
3 | #include <asm/asm.h> | 6 | #include <asm/asm.h> |
@@ -5,10 +8,10 @@ | |||
5 | #ifdef CONFIG_SMP | 8 | #ifdef CONFIG_SMP |
6 | .macro LOCK_PREFIX | 9 | .macro LOCK_PREFIX |
7 | 672: lock | 10 | 672: lock |
8 | .section .smp_locks,"a" | 11 | .pushsection .smp_locks,"a" |
9 | .balign 4 | 12 | .balign 4 |
10 | .long 672b - . | 13 | .long 672b - . |
11 | .previous | 14 | .popsection |
12 | .endm | 15 | .endm |
13 | #else | 16 | #else |
14 | .macro LOCK_PREFIX | 17 | .macro LOCK_PREFIX |
@@ -24,3 +27,5 @@ | |||
24 | .endm | 27 | .endm |
25 | 28 | ||
26 | #endif /* __ASSEMBLY__ */ | 29 | #endif /* __ASSEMBLY__ */ |
30 | |||
31 | #endif /* _ASM_X86_ALTERNATIVE_ASM_H */ | ||
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 444704c8e186..58ed6d96a6ac 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -29,10 +29,10 @@ | |||
29 | 29 | ||
30 | #ifdef CONFIG_SMP | 30 | #ifdef CONFIG_SMP |
31 | #define LOCK_PREFIX_HERE \ | 31 | #define LOCK_PREFIX_HERE \ |
32 | ".section .smp_locks,\"a\"\n" \ | 32 | ".pushsection .smp_locks,\"a\"\n" \ |
33 | ".balign 4\n" \ | 33 | ".balign 4\n" \ |
34 | ".long 671f - .\n" /* offset */ \ | 34 | ".long 671f - .\n" /* offset */ \ |
35 | ".previous\n" \ | 35 | ".popsection\n" \ |
36 | "671:" | 36 | "671:" |
37 | 37 | ||
38 | #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " | 38 | #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " |
@@ -99,30 +99,30 @@ static inline int alternatives_text_reserved(void *start, void *end) | |||
99 | /* alternative assembly primitive: */ | 99 | /* alternative assembly primitive: */ |
100 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ | 100 | #define ALTERNATIVE(oldinstr, newinstr, feature) \ |
101 | OLDINSTR(oldinstr) \ | 101 | OLDINSTR(oldinstr) \ |
102 | ".section .altinstructions,\"a\"\n" \ | 102 | ".pushsection .altinstructions,\"a\"\n" \ |
103 | ALTINSTR_ENTRY(feature, 1) \ | 103 | ALTINSTR_ENTRY(feature, 1) \ |
104 | ".previous\n" \ | 104 | ".popsection\n" \ |
105 | ".section .discard,\"aw\",@progbits\n" \ | 105 | ".pushsection .discard,\"aw\",@progbits\n" \ |
106 | DISCARD_ENTRY(1) \ | 106 | DISCARD_ENTRY(1) \ |
107 | ".previous\n" \ | 107 | ".popsection\n" \ |
108 | ".section .altinstr_replacement, \"ax\"\n" \ | 108 | ".pushsection .altinstr_replacement, \"ax\"\n" \ |
109 | ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ | 109 | ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ |
110 | ".previous" | 110 | ".popsection" |
111 | 111 | ||
112 | #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ | 112 | #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ |
113 | OLDINSTR(oldinstr) \ | 113 | OLDINSTR(oldinstr) \ |
114 | ".section .altinstructions,\"a\"\n" \ | 114 | ".pushsection .altinstructions,\"a\"\n" \ |
115 | ALTINSTR_ENTRY(feature1, 1) \ | 115 | ALTINSTR_ENTRY(feature1, 1) \ |
116 | ALTINSTR_ENTRY(feature2, 2) \ | 116 | ALTINSTR_ENTRY(feature2, 2) \ |
117 | ".previous\n" \ | 117 | ".popsection\n" \ |
118 | ".section .discard,\"aw\",@progbits\n" \ | 118 | ".pushsection .discard,\"aw\",@progbits\n" \ |
119 | DISCARD_ENTRY(1) \ | 119 | DISCARD_ENTRY(1) \ |
120 | DISCARD_ENTRY(2) \ | 120 | DISCARD_ENTRY(2) \ |
121 | ".previous\n" \ | 121 | ".popsection\n" \ |
122 | ".section .altinstr_replacement, \"ax\"\n" \ | 122 | ".pushsection .altinstr_replacement, \"ax\"\n" \ |
123 | ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ | 123 | ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ |
124 | ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ | 124 | ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ |
125 | ".previous" | 125 | ".popsection" |
126 | 126 | ||
127 | /* | 127 | /* |
128 | * This must be included *after* the definition of ALTERNATIVE due to | 128 | * This must be included *after* the definition of ALTERNATIVE due to |
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 92f3c6ed817f..831dbb9c6c02 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/user.h> | 21 | #include <asm/user.h> |
22 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
23 | #include <asm/xsave.h> | 23 | #include <asm/xsave.h> |
24 | #include <asm/smap.h> | ||
24 | 25 | ||
25 | #ifdef CONFIG_X86_64 | 26 | #ifdef CONFIG_X86_64 |
26 | # include <asm/sigcontext32.h> | 27 | # include <asm/sigcontext32.h> |
@@ -121,6 +122,22 @@ static inline void sanitize_i387_state(struct task_struct *tsk) | |||
121 | __sanitize_i387_state(tsk); | 122 | __sanitize_i387_state(tsk); |
122 | } | 123 | } |
123 | 124 | ||
125 | #define user_insn(insn, output, input...) \ | ||
126 | ({ \ | ||
127 | int err; \ | ||
128 | asm volatile(ASM_STAC "\n" \ | ||
129 | "1:" #insn "\n\t" \ | ||
130 | "2: " ASM_CLAC "\n" \ | ||
131 | ".section .fixup,\"ax\"\n" \ | ||
132 | "3: movl $-1,%[err]\n" \ | ||
133 | " jmp 2b\n" \ | ||
134 | ".previous\n" \ | ||
135 | _ASM_EXTABLE(1b, 3b) \ | ||
136 | : [err] "=r" (err), output \ | ||
137 | : "0"(0), input); \ | ||
138 | err; \ | ||
139 | }) | ||
140 | |||
124 | #define check_insn(insn, output, input...) \ | 141 | #define check_insn(insn, output, input...) \ |
125 | ({ \ | 142 | ({ \ |
126 | int err; \ | 143 | int err; \ |
@@ -138,18 +155,18 @@ static inline void sanitize_i387_state(struct task_struct *tsk) | |||
138 | 155 | ||
139 | static inline int fsave_user(struct i387_fsave_struct __user *fx) | 156 | static inline int fsave_user(struct i387_fsave_struct __user *fx) |
140 | { | 157 | { |
141 | return check_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); | 158 | return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); |
142 | } | 159 | } |
143 | 160 | ||
144 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | 161 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
145 | { | 162 | { |
146 | if (config_enabled(CONFIG_X86_32)) | 163 | if (config_enabled(CONFIG_X86_32)) |
147 | return check_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); | 164 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); |
148 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | 165 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) |
149 | return check_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); | 166 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); |
150 | 167 | ||
151 | /* See comment in fpu_fxsave() below. */ | 168 | /* See comment in fpu_fxsave() below. */ |
152 | return check_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); | 169 | return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); |
153 | } | 170 | } |
154 | 171 | ||
155 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | 172 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
@@ -164,11 +181,28 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
164 | "m" (*fx)); | 181 | "m" (*fx)); |
165 | } | 182 | } |
166 | 183 | ||
184 | static inline int fxrstor_user(struct i387_fxsave_struct __user *fx) | ||
185 | { | ||
186 | if (config_enabled(CONFIG_X86_32)) | ||
187 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | ||
188 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | ||
189 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | ||
190 | |||
191 | /* See comment in fpu_fxsave() below. */ | ||
192 | return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), | ||
193 | "m" (*fx)); | ||
194 | } | ||
195 | |||
167 | static inline int frstor_checking(struct i387_fsave_struct *fx) | 196 | static inline int frstor_checking(struct i387_fsave_struct *fx) |
168 | { | 197 | { |
169 | return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | 198 | return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
170 | } | 199 | } |
171 | 200 | ||
201 | static inline int frstor_user(struct i387_fsave_struct __user *fx) | ||
202 | { | ||
203 | return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | ||
204 | } | ||
205 | |||
172 | static inline void fpu_fxsave(struct fpu *fpu) | 206 | static inline void fpu_fxsave(struct fpu *fpu) |
173 | { | 207 | { |
174 | if (config_enabled(CONFIG_X86_32)) | 208 | if (config_enabled(CONFIG_X86_32)) |
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index 71ecbcba1a4e..f373046e63ec 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h | |||
@@ -9,10 +9,13 @@ | |||
9 | #include <asm/asm.h> | 9 | #include <asm/asm.h> |
10 | #include <asm/errno.h> | 10 | #include <asm/errno.h> |
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/smap.h> | ||
12 | 13 | ||
13 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ | 14 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ |
14 | asm volatile("1:\t" insn "\n" \ | 15 | asm volatile("\t" ASM_STAC "\n" \ |
15 | "2:\t.section .fixup,\"ax\"\n" \ | 16 | "1:\t" insn "\n" \ |
17 | "2:\t" ASM_CLAC "\n" \ | ||
18 | "\t.section .fixup,\"ax\"\n" \ | ||
16 | "3:\tmov\t%3, %1\n" \ | 19 | "3:\tmov\t%3, %1\n" \ |
17 | "\tjmp\t2b\n" \ | 20 | "\tjmp\t2b\n" \ |
18 | "\t.previous\n" \ | 21 | "\t.previous\n" \ |
@@ -21,12 +24,14 @@ | |||
21 | : "i" (-EFAULT), "0" (oparg), "1" (0)) | 24 | : "i" (-EFAULT), "0" (oparg), "1" (0)) |
22 | 25 | ||
23 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ | 26 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ |
24 | asm volatile("1:\tmovl %2, %0\n" \ | 27 | asm volatile("\t" ASM_STAC "\n" \ |
28 | "1:\tmovl %2, %0\n" \ | ||
25 | "\tmovl\t%0, %3\n" \ | 29 | "\tmovl\t%0, %3\n" \ |
26 | "\t" insn "\n" \ | 30 | "\t" insn "\n" \ |
27 | "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ | 31 | "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ |
28 | "\tjnz\t1b\n" \ | 32 | "\tjnz\t1b\n" \ |
29 | "3:\t.section .fixup,\"ax\"\n" \ | 33 | "3:\t" ASM_CLAC "\n" \ |
34 | "\t.section .fixup,\"ax\"\n" \ | ||
30 | "4:\tmov\t%5, %1\n" \ | 35 | "4:\tmov\t%5, %1\n" \ |
31 | "\tjmp\t3b\n" \ | 36 | "\tjmp\t3b\n" \ |
32 | "\t.previous\n" \ | 37 | "\t.previous\n" \ |
@@ -122,8 +127,10 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
122 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | 127 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
123 | return -EFAULT; | 128 | return -EFAULT; |
124 | 129 | ||
125 | asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" | 130 | asm volatile("\t" ASM_STAC "\n" |
126 | "2:\t.section .fixup, \"ax\"\n" | 131 | "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" |
132 | "2:\t" ASM_CLAC "\n" | ||
133 | "\t.section .fixup, \"ax\"\n" | ||
127 | "3:\tmov %3, %0\n" | 134 | "3:\tmov %3, %0\n" |
128 | "\tjmp 2b\n" | 135 | "\tjmp 2b\n" |
129 | "\t.previous\n" | 136 | "\t.previous\n" |
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h index aea1d1d848c7..680cf09ed100 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h | |||
@@ -65,6 +65,7 @@ | |||
65 | #define X86_CR4_PCIDE 0x00020000 /* enable PCID support */ | 65 | #define X86_CR4_PCIDE 0x00020000 /* enable PCID support */ |
66 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ | 66 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ |
67 | #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ | 67 | #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ |
68 | #define X86_CR4_SMAP 0x00200000 /* enable SMAP support */ | ||
68 | 69 | ||
69 | /* | 70 | /* |
70 | * x86-64 Task Priority Register, CR8 | 71 | * x86-64 Task Priority Register, CR8 |
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h new file mode 100644 index 000000000000..8d3120f4e270 --- /dev/null +++ b/arch/x86/include/asm/smap.h | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Supervisor Mode Access Prevention support | ||
3 | * | ||
4 | * Copyright (C) 2012 Intel Corporation | ||
5 | * Author: H. Peter Anvin <hpa@linux.intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; version 2 | ||
10 | * of the License. | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_X86_SMAP_H | ||
14 | #define _ASM_X86_SMAP_H | ||
15 | |||
16 | #include <linux/stringify.h> | ||
17 | #include <asm/nops.h> | ||
18 | #include <asm/cpufeature.h> | ||
19 | |||
20 | /* "Raw" instruction opcodes */ | ||
21 | #define __ASM_CLAC .byte 0x0f,0x01,0xca | ||
22 | #define __ASM_STAC .byte 0x0f,0x01,0xcb | ||
23 | |||
24 | #ifdef __ASSEMBLY__ | ||
25 | |||
26 | #include <asm/alternative-asm.h> | ||
27 | |||
28 | #ifdef CONFIG_X86_SMAP | ||
29 | |||
30 | #define ASM_CLAC \ | ||
31 | 661: ASM_NOP3 ; \ | ||
32 | .pushsection .altinstr_replacement, "ax" ; \ | ||
33 | 662: __ASM_CLAC ; \ | ||
34 | .popsection ; \ | ||
35 | .pushsection .altinstructions, "a" ; \ | ||
36 | altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \ | ||
37 | .popsection | ||
38 | |||
39 | #define ASM_STAC \ | ||
40 | 661: ASM_NOP3 ; \ | ||
41 | .pushsection .altinstr_replacement, "ax" ; \ | ||
42 | 662: __ASM_STAC ; \ | ||
43 | .popsection ; \ | ||
44 | .pushsection .altinstructions, "a" ; \ | ||
45 | altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \ | ||
46 | .popsection | ||
47 | |||
48 | #else /* CONFIG_X86_SMAP */ | ||
49 | |||
50 | #define ASM_CLAC | ||
51 | #define ASM_STAC | ||
52 | |||
53 | #endif /* CONFIG_X86_SMAP */ | ||
54 | |||
55 | #else /* __ASSEMBLY__ */ | ||
56 | |||
57 | #include <asm/alternative.h> | ||
58 | |||
59 | #ifdef CONFIG_X86_SMAP | ||
60 | |||
61 | static __always_inline void clac(void) | ||
62 | { | ||
63 | /* Note: a barrier is implicit in alternative() */ | ||
64 | alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP); | ||
65 | } | ||
66 | |||
67 | static __always_inline void stac(void) | ||
68 | { | ||
69 | /* Note: a barrier is implicit in alternative() */ | ||
70 | alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP); | ||
71 | } | ||
72 | |||
73 | /* These macros can be used in asm() statements */ | ||
74 | #define ASM_CLAC \ | ||
75 | ALTERNATIVE(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP) | ||
76 | #define ASM_STAC \ | ||
77 | ALTERNATIVE(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP) | ||
78 | |||
79 | #else /* CONFIG_X86_SMAP */ | ||
80 | |||
81 | static inline void clac(void) { } | ||
82 | static inline void stac(void) { } | ||
83 | |||
84 | #define ASM_CLAC | ||
85 | #define ASM_STAC | ||
86 | |||
87 | #endif /* CONFIG_X86_SMAP */ | ||
88 | |||
89 | #endif /* __ASSEMBLY__ */ | ||
90 | |||
91 | #endif /* _ASM_X86_SMAP_H */ | ||
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index e1f3a17034fc..a91acfbb1a98 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/string.h> | 9 | #include <linux/string.h> |
10 | #include <asm/asm.h> | 10 | #include <asm/asm.h> |
11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
12 | #include <asm/smap.h> | ||
12 | 13 | ||
13 | #define VERIFY_READ 0 | 14 | #define VERIFY_READ 0 |
14 | #define VERIFY_WRITE 1 | 15 | #define VERIFY_WRITE 1 |
@@ -192,9 +193,10 @@ extern int __get_user_bad(void); | |||
192 | 193 | ||
193 | #ifdef CONFIG_X86_32 | 194 | #ifdef CONFIG_X86_32 |
194 | #define __put_user_asm_u64(x, addr, err, errret) \ | 195 | #define __put_user_asm_u64(x, addr, err, errret) \ |
195 | asm volatile("1: movl %%eax,0(%2)\n" \ | 196 | asm volatile(ASM_STAC "\n" \ |
197 | "1: movl %%eax,0(%2)\n" \ | ||
196 | "2: movl %%edx,4(%2)\n" \ | 198 | "2: movl %%edx,4(%2)\n" \ |
197 | "3:\n" \ | 199 | "3: " ASM_CLAC "\n" \ |
198 | ".section .fixup,\"ax\"\n" \ | 200 | ".section .fixup,\"ax\"\n" \ |
199 | "4: movl %3,%0\n" \ | 201 | "4: movl %3,%0\n" \ |
200 | " jmp 3b\n" \ | 202 | " jmp 3b\n" \ |
@@ -205,9 +207,10 @@ extern int __get_user_bad(void); | |||
205 | : "A" (x), "r" (addr), "i" (errret), "0" (err)) | 207 | : "A" (x), "r" (addr), "i" (errret), "0" (err)) |
206 | 208 | ||
207 | #define __put_user_asm_ex_u64(x, addr) \ | 209 | #define __put_user_asm_ex_u64(x, addr) \ |
208 | asm volatile("1: movl %%eax,0(%1)\n" \ | 210 | asm volatile(ASM_STAC "\n" \ |
211 | "1: movl %%eax,0(%1)\n" \ | ||
209 | "2: movl %%edx,4(%1)\n" \ | 212 | "2: movl %%edx,4(%1)\n" \ |
210 | "3:\n" \ | 213 | "3: " ASM_CLAC "\n" \ |
211 | _ASM_EXTABLE_EX(1b, 2b) \ | 214 | _ASM_EXTABLE_EX(1b, 2b) \ |
212 | _ASM_EXTABLE_EX(2b, 3b) \ | 215 | _ASM_EXTABLE_EX(2b, 3b) \ |
213 | : : "A" (x), "r" (addr)) | 216 | : : "A" (x), "r" (addr)) |
@@ -379,8 +382,9 @@ do { \ | |||
379 | } while (0) | 382 | } while (0) |
380 | 383 | ||
381 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | 384 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
382 | asm volatile("1: mov"itype" %2,%"rtype"1\n" \ | 385 | asm volatile(ASM_STAC "\n" \ |
383 | "2:\n" \ | 386 | "1: mov"itype" %2,%"rtype"1\n" \ |
387 | "2: " ASM_CLAC "\n" \ | ||
384 | ".section .fixup,\"ax\"\n" \ | 388 | ".section .fixup,\"ax\"\n" \ |
385 | "3: mov %3,%0\n" \ | 389 | "3: mov %3,%0\n" \ |
386 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | 390 | " xor"itype" %"rtype"1,%"rtype"1\n" \ |
@@ -443,8 +447,9 @@ struct __large_struct { unsigned long buf[100]; }; | |||
443 | * aliasing issues. | 447 | * aliasing issues. |
444 | */ | 448 | */ |
445 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | 449 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
446 | asm volatile("1: mov"itype" %"rtype"1,%2\n" \ | 450 | asm volatile(ASM_STAC "\n" \ |
447 | "2:\n" \ | 451 | "1: mov"itype" %"rtype"1,%2\n" \ |
452 | "2: " ASM_CLAC "\n" \ | ||
448 | ".section .fixup,\"ax\"\n" \ | 453 | ".section .fixup,\"ax\"\n" \ |
449 | "3: mov %3,%0\n" \ | 454 | "3: mov %3,%0\n" \ |
450 | " jmp 2b\n" \ | 455 | " jmp 2b\n" \ |
@@ -463,13 +468,13 @@ struct __large_struct { unsigned long buf[100]; }; | |||
463 | * uaccess_try and catch | 468 | * uaccess_try and catch |
464 | */ | 469 | */ |
465 | #define uaccess_try do { \ | 470 | #define uaccess_try do { \ |
466 | int prev_err = current_thread_info()->uaccess_err; \ | ||
467 | current_thread_info()->uaccess_err = 0; \ | 471 | current_thread_info()->uaccess_err = 0; \ |
472 | stac(); \ | ||
468 | barrier(); | 473 | barrier(); |
469 | 474 | ||
470 | #define uaccess_catch(err) \ | 475 | #define uaccess_catch(err) \ |
476 | clac(); \ | ||
471 | (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ | 477 | (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ |
472 | current_thread_info()->uaccess_err = prev_err; \ | ||
473 | } while (0) | 478 | } while (0) |
474 | 479 | ||
475 | /** | 480 | /** |
@@ -569,6 +574,9 @@ strncpy_from_user(char *dst, const char __user *src, long count); | |||
569 | extern __must_check long strlen_user(const char __user *str); | 574 | extern __must_check long strlen_user(const char __user *str); |
570 | extern __must_check long strnlen_user(const char __user *str, long n); | 575 | extern __must_check long strnlen_user(const char __user *str, long n); |
571 | 576 | ||
577 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); | ||
578 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); | ||
579 | |||
572 | /* | 580 | /* |
573 | * movsl can be slow when source and dest are not both 8-byte aligned | 581 | * movsl can be slow when source and dest are not both 8-byte aligned |
574 | */ | 582 | */ |
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 576e39bca6ad..7f760a9f1f61 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h | |||
@@ -213,7 +213,4 @@ static inline unsigned long __must_check copy_from_user(void *to, | |||
213 | return n; | 213 | return n; |
214 | } | 214 | } |
215 | 215 | ||
216 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); | ||
217 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); | ||
218 | |||
219 | #endif /* _ASM_X86_UACCESS_32_H */ | 216 | #endif /* _ASM_X86_UACCESS_32_H */ |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index d8def8b3dba0..142810c457dc 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -217,9 +217,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | |||
217 | } | 217 | } |
218 | } | 218 | } |
219 | 219 | ||
220 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); | ||
221 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); | ||
222 | |||
223 | static __must_check __always_inline int | 220 | static __must_check __always_inline int |
224 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) | 221 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) |
225 | { | 222 | { |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 2ddee1b87793..0415cdabb5a6 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -70,8 +70,9 @@ static inline int xsave_user(struct xsave_struct __user *buf) | |||
70 | if (unlikely(err)) | 70 | if (unlikely(err)) |
71 | return -EFAULT; | 71 | return -EFAULT; |
72 | 72 | ||
73 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" | 73 | __asm__ __volatile__(ASM_STAC "\n" |
74 | "2:\n" | 74 | "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" |
75 | "2: " ASM_CLAC "\n" | ||
75 | ".section .fixup,\"ax\"\n" | 76 | ".section .fixup,\"ax\"\n" |
76 | "3: movl $-1,%[err]\n" | 77 | "3: movl $-1,%[err]\n" |
77 | " jmp 2b\n" | 78 | " jmp 2b\n" |
@@ -90,8 +91,9 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) | |||
90 | u32 lmask = mask; | 91 | u32 lmask = mask; |
91 | u32 hmask = mask >> 32; | 92 | u32 hmask = mask >> 32; |
92 | 93 | ||
93 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" | 94 | __asm__ __volatile__(ASM_STAC "\n" |
94 | "2:\n" | 95 | "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" |
96 | "2: " ASM_CLAC "\n" | ||
95 | ".section .fixup,\"ax\"\n" | 97 | ".section .fixup,\"ax\"\n" |
96 | "3: movl $-1,%[err]\n" | 98 | "3: movl $-1,%[err]\n" |
97 | " jmp 2b\n" | 99 | " jmp 2b\n" |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 1b8e5a03d942..11676cf65aee 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -43,17 +43,22 @@ int acpi_suspend_lowlevel(void) | |||
43 | 43 | ||
44 | header->video_mode = saved_video_mode; | 44 | header->video_mode = saved_video_mode; |
45 | 45 | ||
46 | header->pmode_behavior = 0; | ||
47 | |||
46 | #ifndef CONFIG_64BIT | 48 | #ifndef CONFIG_64BIT |
47 | store_gdt((struct desc_ptr *)&header->pmode_gdt); | 49 | store_gdt((struct desc_ptr *)&header->pmode_gdt); |
48 | 50 | ||
49 | if (rdmsr_safe(MSR_EFER, &header->pmode_efer_low, | 51 | if (!rdmsr_safe(MSR_EFER, |
50 | &header->pmode_efer_high)) | 52 | &header->pmode_efer_low, |
51 | header->pmode_efer_low = header->pmode_efer_high = 0; | 53 | &header->pmode_efer_high)) |
54 | header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER); | ||
52 | #endif /* !CONFIG_64BIT */ | 55 | #endif /* !CONFIG_64BIT */ |
53 | 56 | ||
54 | header->pmode_cr0 = read_cr0(); | 57 | header->pmode_cr0 = read_cr0(); |
55 | header->pmode_cr4 = read_cr4_safe(); | 58 | if (__this_cpu_read(cpu_info.cpuid_level) >= 0) { |
56 | header->pmode_behavior = 0; | 59 | header->pmode_cr4 = read_cr4(); |
60 | header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4); | ||
61 | } | ||
57 | if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, | 62 | if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, |
58 | &header->pmode_misc_en_low, | 63 | &header->pmode_misc_en_low, |
59 | &header->pmode_misc_en_high)) | 64 | &header->pmode_misc_en_high)) |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 532691b6c8fe..7505f7b13e71 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -259,23 +259,36 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | |||
259 | } | 259 | } |
260 | #endif | 260 | #endif |
261 | 261 | ||
262 | static int disable_smep __cpuinitdata; | ||
263 | static __init int setup_disable_smep(char *arg) | 262 | static __init int setup_disable_smep(char *arg) |
264 | { | 263 | { |
265 | disable_smep = 1; | 264 | setup_clear_cpu_cap(X86_FEATURE_SMEP); |
266 | return 1; | 265 | return 1; |
267 | } | 266 | } |
268 | __setup("nosmep", setup_disable_smep); | 267 | __setup("nosmep", setup_disable_smep); |
269 | 268 | ||
270 | static __cpuinit void setup_smep(struct cpuinfo_x86 *c) | 269 | static __always_inline void setup_smep(struct cpuinfo_x86 *c) |
271 | { | 270 | { |
272 | if (cpu_has(c, X86_FEATURE_SMEP)) { | 271 | if (cpu_has(c, X86_FEATURE_SMEP)) |
273 | if (unlikely(disable_smep)) { | 272 | set_in_cr4(X86_CR4_SMEP); |
274 | setup_clear_cpu_cap(X86_FEATURE_SMEP); | 273 | } |
275 | clear_in_cr4(X86_CR4_SMEP); | 274 | |
276 | } else | 275 | static __init int setup_disable_smap(char *arg) |
277 | set_in_cr4(X86_CR4_SMEP); | 276 | { |
278 | } | 277 | setup_clear_cpu_cap(X86_FEATURE_SMAP); |
278 | return 1; | ||
279 | } | ||
280 | __setup("nosmap", setup_disable_smap); | ||
281 | |||
282 | static __always_inline void setup_smap(struct cpuinfo_x86 *c) | ||
283 | { | ||
284 | unsigned long eflags; | ||
285 | |||
286 | /* This should have been cleared long ago */ | ||
287 | raw_local_save_flags(eflags); | ||
288 | BUG_ON(eflags & X86_EFLAGS_AC); | ||
289 | |||
290 | if (cpu_has(c, X86_FEATURE_SMAP)) | ||
291 | set_in_cr4(X86_CR4_SMAP); | ||
279 | } | 292 | } |
280 | 293 | ||
281 | /* | 294 | /* |
@@ -712,8 +725,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
712 | c->cpu_index = 0; | 725 | c->cpu_index = 0; |
713 | filter_cpuid_features(c, false); | 726 | filter_cpuid_features(c, false); |
714 | 727 | ||
715 | setup_smep(c); | ||
716 | |||
717 | if (this_cpu->c_bsp_init) | 728 | if (this_cpu->c_bsp_init) |
718 | this_cpu->c_bsp_init(c); | 729 | this_cpu->c_bsp_init(c); |
719 | } | 730 | } |
@@ -798,8 +809,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | |||
798 | c->phys_proc_id = c->initial_apicid; | 809 | c->phys_proc_id = c->initial_apicid; |
799 | } | 810 | } |
800 | 811 | ||
801 | setup_smep(c); | ||
802 | |||
803 | get_model_name(c); /* Default name */ | 812 | get_model_name(c); /* Default name */ |
804 | 813 | ||
805 | detect_nopl(c); | 814 | detect_nopl(c); |
@@ -864,6 +873,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
864 | /* Disable the PN if appropriate */ | 873 | /* Disable the PN if appropriate */ |
865 | squash_the_stupid_serial_number(c); | 874 | squash_the_stupid_serial_number(c); |
866 | 875 | ||
876 | /* Set up SMEP/SMAP */ | ||
877 | setup_smep(c); | ||
878 | setup_smap(c); | ||
879 | |||
867 | /* | 880 | /* |
868 | * The vendor-specific functions might have changed features. | 881 | * The vendor-specific functions might have changed features. |
869 | * Now we do "generic changes." | 882 | * Now we do "generic changes." |
@@ -1114,7 +1127,8 @@ void syscall_init(void) | |||
1114 | 1127 | ||
1115 | /* Flags to clear on syscall */ | 1128 | /* Flags to clear on syscall */ |
1116 | wrmsrl(MSR_SYSCALL_MASK, | 1129 | wrmsrl(MSR_SYSCALL_MASK, |
1117 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); | 1130 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| |
1131 | X86_EFLAGS_IOPL|X86_EFLAGS_AC); | ||
1118 | } | 1132 | } |
1119 | 1133 | ||
1120 | /* | 1134 | /* |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index f438a44bf8f9..0750e3ba87c0 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <asm/cpufeature.h> | 57 | #include <asm/cpufeature.h> |
58 | #include <asm/alternative-asm.h> | 58 | #include <asm/alternative-asm.h> |
59 | #include <asm/asm.h> | 59 | #include <asm/asm.h> |
60 | #include <asm/smap.h> | ||
60 | 61 | ||
61 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | 62 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
62 | #include <linux/elf-em.h> | 63 | #include <linux/elf-em.h> |
@@ -407,7 +408,9 @@ sysenter_past_esp: | |||
407 | */ | 408 | */ |
408 | cmpl $__PAGE_OFFSET-3,%ebp | 409 | cmpl $__PAGE_OFFSET-3,%ebp |
409 | jae syscall_fault | 410 | jae syscall_fault |
411 | ASM_STAC | ||
410 | 1: movl (%ebp),%ebp | 412 | 1: movl (%ebp),%ebp |
413 | ASM_CLAC | ||
411 | movl %ebp,PT_EBP(%esp) | 414 | movl %ebp,PT_EBP(%esp) |
412 | _ASM_EXTABLE(1b,syscall_fault) | 415 | _ASM_EXTABLE(1b,syscall_fault) |
413 | 416 | ||
@@ -488,6 +491,7 @@ ENDPROC(ia32_sysenter_target) | |||
488 | # system call handler stub | 491 | # system call handler stub |
489 | ENTRY(system_call) | 492 | ENTRY(system_call) |
490 | RING0_INT_FRAME # can't unwind into user space anyway | 493 | RING0_INT_FRAME # can't unwind into user space anyway |
494 | ASM_CLAC | ||
491 | pushl_cfi %eax # save orig_eax | 495 | pushl_cfi %eax # save orig_eax |
492 | SAVE_ALL | 496 | SAVE_ALL |
493 | GET_THREAD_INFO(%ebp) | 497 | GET_THREAD_INFO(%ebp) |
@@ -670,6 +674,7 @@ END(syscall_exit_work) | |||
670 | 674 | ||
671 | RING0_INT_FRAME # can't unwind into user space anyway | 675 | RING0_INT_FRAME # can't unwind into user space anyway |
672 | syscall_fault: | 676 | syscall_fault: |
677 | ASM_CLAC | ||
673 | GET_THREAD_INFO(%ebp) | 678 | GET_THREAD_INFO(%ebp) |
674 | movl $-EFAULT,PT_EAX(%esp) | 679 | movl $-EFAULT,PT_EAX(%esp) |
675 | jmp resume_userspace | 680 | jmp resume_userspace |
@@ -825,6 +830,7 @@ END(interrupt) | |||
825 | */ | 830 | */ |
826 | .p2align CONFIG_X86_L1_CACHE_SHIFT | 831 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
827 | common_interrupt: | 832 | common_interrupt: |
833 | ASM_CLAC | ||
828 | addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */ | 834 | addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */ |
829 | SAVE_ALL | 835 | SAVE_ALL |
830 | TRACE_IRQS_OFF | 836 | TRACE_IRQS_OFF |
@@ -841,6 +847,7 @@ ENDPROC(common_interrupt) | |||
841 | #define BUILD_INTERRUPT3(name, nr, fn) \ | 847 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
842 | ENTRY(name) \ | 848 | ENTRY(name) \ |
843 | RING0_INT_FRAME; \ | 849 | RING0_INT_FRAME; \ |
850 | ASM_CLAC; \ | ||
844 | pushl_cfi $~(nr); \ | 851 | pushl_cfi $~(nr); \ |
845 | SAVE_ALL; \ | 852 | SAVE_ALL; \ |
846 | TRACE_IRQS_OFF \ | 853 | TRACE_IRQS_OFF \ |
@@ -857,6 +864,7 @@ ENDPROC(name) | |||
857 | 864 | ||
858 | ENTRY(coprocessor_error) | 865 | ENTRY(coprocessor_error) |
859 | RING0_INT_FRAME | 866 | RING0_INT_FRAME |
867 | ASM_CLAC | ||
860 | pushl_cfi $0 | 868 | pushl_cfi $0 |
861 | pushl_cfi $do_coprocessor_error | 869 | pushl_cfi $do_coprocessor_error |
862 | jmp error_code | 870 | jmp error_code |
@@ -865,6 +873,7 @@ END(coprocessor_error) | |||
865 | 873 | ||
866 | ENTRY(simd_coprocessor_error) | 874 | ENTRY(simd_coprocessor_error) |
867 | RING0_INT_FRAME | 875 | RING0_INT_FRAME |
876 | ASM_CLAC | ||
868 | pushl_cfi $0 | 877 | pushl_cfi $0 |
869 | #ifdef CONFIG_X86_INVD_BUG | 878 | #ifdef CONFIG_X86_INVD_BUG |
870 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ | 879 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ |
@@ -886,6 +895,7 @@ END(simd_coprocessor_error) | |||
886 | 895 | ||
887 | ENTRY(device_not_available) | 896 | ENTRY(device_not_available) |
888 | RING0_INT_FRAME | 897 | RING0_INT_FRAME |
898 | ASM_CLAC | ||
889 | pushl_cfi $-1 # mark this as an int | 899 | pushl_cfi $-1 # mark this as an int |
890 | pushl_cfi $do_device_not_available | 900 | pushl_cfi $do_device_not_available |
891 | jmp error_code | 901 | jmp error_code |
@@ -906,6 +916,7 @@ END(native_irq_enable_sysexit) | |||
906 | 916 | ||
907 | ENTRY(overflow) | 917 | ENTRY(overflow) |
908 | RING0_INT_FRAME | 918 | RING0_INT_FRAME |
919 | ASM_CLAC | ||
909 | pushl_cfi $0 | 920 | pushl_cfi $0 |
910 | pushl_cfi $do_overflow | 921 | pushl_cfi $do_overflow |
911 | jmp error_code | 922 | jmp error_code |
@@ -914,6 +925,7 @@ END(overflow) | |||
914 | 925 | ||
915 | ENTRY(bounds) | 926 | ENTRY(bounds) |
916 | RING0_INT_FRAME | 927 | RING0_INT_FRAME |
928 | ASM_CLAC | ||
917 | pushl_cfi $0 | 929 | pushl_cfi $0 |
918 | pushl_cfi $do_bounds | 930 | pushl_cfi $do_bounds |
919 | jmp error_code | 931 | jmp error_code |
@@ -922,6 +934,7 @@ END(bounds) | |||
922 | 934 | ||
923 | ENTRY(invalid_op) | 935 | ENTRY(invalid_op) |
924 | RING0_INT_FRAME | 936 | RING0_INT_FRAME |
937 | ASM_CLAC | ||
925 | pushl_cfi $0 | 938 | pushl_cfi $0 |
926 | pushl_cfi $do_invalid_op | 939 | pushl_cfi $do_invalid_op |
927 | jmp error_code | 940 | jmp error_code |
@@ -930,6 +943,7 @@ END(invalid_op) | |||
930 | 943 | ||
931 | ENTRY(coprocessor_segment_overrun) | 944 | ENTRY(coprocessor_segment_overrun) |
932 | RING0_INT_FRAME | 945 | RING0_INT_FRAME |
946 | ASM_CLAC | ||
933 | pushl_cfi $0 | 947 | pushl_cfi $0 |
934 | pushl_cfi $do_coprocessor_segment_overrun | 948 | pushl_cfi $do_coprocessor_segment_overrun |
935 | jmp error_code | 949 | jmp error_code |
@@ -938,6 +952,7 @@ END(coprocessor_segment_overrun) | |||
938 | 952 | ||
939 | ENTRY(invalid_TSS) | 953 | ENTRY(invalid_TSS) |
940 | RING0_EC_FRAME | 954 | RING0_EC_FRAME |
955 | ASM_CLAC | ||
941 | pushl_cfi $do_invalid_TSS | 956 | pushl_cfi $do_invalid_TSS |
942 | jmp error_code | 957 | jmp error_code |
943 | CFI_ENDPROC | 958 | CFI_ENDPROC |
@@ -945,6 +960,7 @@ END(invalid_TSS) | |||
945 | 960 | ||
946 | ENTRY(segment_not_present) | 961 | ENTRY(segment_not_present) |
947 | RING0_EC_FRAME | 962 | RING0_EC_FRAME |
963 | ASM_CLAC | ||
948 | pushl_cfi $do_segment_not_present | 964 | pushl_cfi $do_segment_not_present |
949 | jmp error_code | 965 | jmp error_code |
950 | CFI_ENDPROC | 966 | CFI_ENDPROC |
@@ -952,6 +968,7 @@ END(segment_not_present) | |||
952 | 968 | ||
953 | ENTRY(stack_segment) | 969 | ENTRY(stack_segment) |
954 | RING0_EC_FRAME | 970 | RING0_EC_FRAME |
971 | ASM_CLAC | ||
955 | pushl_cfi $do_stack_segment | 972 | pushl_cfi $do_stack_segment |
956 | jmp error_code | 973 | jmp error_code |
957 | CFI_ENDPROC | 974 | CFI_ENDPROC |
@@ -959,6 +976,7 @@ END(stack_segment) | |||
959 | 976 | ||
960 | ENTRY(alignment_check) | 977 | ENTRY(alignment_check) |
961 | RING0_EC_FRAME | 978 | RING0_EC_FRAME |
979 | ASM_CLAC | ||
962 | pushl_cfi $do_alignment_check | 980 | pushl_cfi $do_alignment_check |
963 | jmp error_code | 981 | jmp error_code |
964 | CFI_ENDPROC | 982 | CFI_ENDPROC |
@@ -966,6 +984,7 @@ END(alignment_check) | |||
966 | 984 | ||
967 | ENTRY(divide_error) | 985 | ENTRY(divide_error) |
968 | RING0_INT_FRAME | 986 | RING0_INT_FRAME |
987 | ASM_CLAC | ||
969 | pushl_cfi $0 # no error code | 988 | pushl_cfi $0 # no error code |
970 | pushl_cfi $do_divide_error | 989 | pushl_cfi $do_divide_error |
971 | jmp error_code | 990 | jmp error_code |
@@ -975,6 +994,7 @@ END(divide_error) | |||
975 | #ifdef CONFIG_X86_MCE | 994 | #ifdef CONFIG_X86_MCE |
976 | ENTRY(machine_check) | 995 | ENTRY(machine_check) |
977 | RING0_INT_FRAME | 996 | RING0_INT_FRAME |
997 | ASM_CLAC | ||
978 | pushl_cfi $0 | 998 | pushl_cfi $0 |
979 | pushl_cfi machine_check_vector | 999 | pushl_cfi machine_check_vector |
980 | jmp error_code | 1000 | jmp error_code |
@@ -984,6 +1004,7 @@ END(machine_check) | |||
984 | 1004 | ||
985 | ENTRY(spurious_interrupt_bug) | 1005 | ENTRY(spurious_interrupt_bug) |
986 | RING0_INT_FRAME | 1006 | RING0_INT_FRAME |
1007 | ASM_CLAC | ||
987 | pushl_cfi $0 | 1008 | pushl_cfi $0 |
988 | pushl_cfi $do_spurious_interrupt_bug | 1009 | pushl_cfi $do_spurious_interrupt_bug |
989 | jmp error_code | 1010 | jmp error_code |
@@ -1273,6 +1294,7 @@ return_to_handler: | |||
1273 | 1294 | ||
1274 | ENTRY(page_fault) | 1295 | ENTRY(page_fault) |
1275 | RING0_EC_FRAME | 1296 | RING0_EC_FRAME |
1297 | ASM_CLAC | ||
1276 | pushl_cfi $do_page_fault | 1298 | pushl_cfi $do_page_fault |
1277 | ALIGN | 1299 | ALIGN |
1278 | error_code: | 1300 | error_code: |
@@ -1345,6 +1367,7 @@ END(page_fault) | |||
1345 | 1367 | ||
1346 | ENTRY(debug) | 1368 | ENTRY(debug) |
1347 | RING0_INT_FRAME | 1369 | RING0_INT_FRAME |
1370 | ASM_CLAC | ||
1348 | cmpl $ia32_sysenter_target,(%esp) | 1371 | cmpl $ia32_sysenter_target,(%esp) |
1349 | jne debug_stack_correct | 1372 | jne debug_stack_correct |
1350 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn | 1373 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn |
@@ -1369,6 +1392,7 @@ END(debug) | |||
1369 | */ | 1392 | */ |
1370 | ENTRY(nmi) | 1393 | ENTRY(nmi) |
1371 | RING0_INT_FRAME | 1394 | RING0_INT_FRAME |
1395 | ASM_CLAC | ||
1372 | pushl_cfi %eax | 1396 | pushl_cfi %eax |
1373 | movl %ss, %eax | 1397 | movl %ss, %eax |
1374 | cmpw $__ESPFIX_SS, %ax | 1398 | cmpw $__ESPFIX_SS, %ax |
@@ -1439,6 +1463,7 @@ END(nmi) | |||
1439 | 1463 | ||
1440 | ENTRY(int3) | 1464 | ENTRY(int3) |
1441 | RING0_INT_FRAME | 1465 | RING0_INT_FRAME |
1466 | ASM_CLAC | ||
1442 | pushl_cfi $-1 # mark this as an int | 1467 | pushl_cfi $-1 # mark this as an int |
1443 | SAVE_ALL | 1468 | SAVE_ALL |
1444 | TRACE_IRQS_OFF | 1469 | TRACE_IRQS_OFF |
@@ -1459,6 +1484,7 @@ END(general_protection) | |||
1459 | #ifdef CONFIG_KVM_GUEST | 1484 | #ifdef CONFIG_KVM_GUEST |
1460 | ENTRY(async_page_fault) | 1485 | ENTRY(async_page_fault) |
1461 | RING0_EC_FRAME | 1486 | RING0_EC_FRAME |
1487 | ASM_CLAC | ||
1462 | pushl_cfi $do_async_page_fault | 1488 | pushl_cfi $do_async_page_fault |
1463 | jmp error_code | 1489 | jmp error_code |
1464 | CFI_ENDPROC | 1490 | CFI_ENDPROC |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 066334be7b74..44531acd9a81 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <asm/percpu.h> | 57 | #include <asm/percpu.h> |
58 | #include <asm/asm.h> | 58 | #include <asm/asm.h> |
59 | #include <asm/rcu.h> | 59 | #include <asm/rcu.h> |
60 | #include <asm/smap.h> | ||
60 | #include <linux/err.h> | 61 | #include <linux/err.h> |
61 | 62 | ||
62 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | 63 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
@@ -568,7 +569,8 @@ END(ret_from_fork) | |||
568 | * System call entry. Up to 6 arguments in registers are supported. | 569 | * System call entry. Up to 6 arguments in registers are supported. |
569 | * | 570 | * |
570 | * SYSCALL does not save anything on the stack and does not change the | 571 | * SYSCALL does not save anything on the stack and does not change the |
571 | * stack pointer. | 572 | * stack pointer. However, it does mask the flags register for us, so |
573 | * CLD and CLAC are not needed. | ||
572 | */ | 574 | */ |
573 | 575 | ||
574 | /* | 576 | /* |
@@ -987,6 +989,7 @@ END(interrupt) | |||
987 | */ | 989 | */ |
988 | .p2align CONFIG_X86_L1_CACHE_SHIFT | 990 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
989 | common_interrupt: | 991 | common_interrupt: |
992 | ASM_CLAC | ||
990 | XCPT_FRAME | 993 | XCPT_FRAME |
991 | addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ | 994 | addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ |
992 | interrupt do_IRQ | 995 | interrupt do_IRQ |
@@ -1126,6 +1129,7 @@ END(common_interrupt) | |||
1126 | */ | 1129 | */ |
1127 | .macro apicinterrupt num sym do_sym | 1130 | .macro apicinterrupt num sym do_sym |
1128 | ENTRY(\sym) | 1131 | ENTRY(\sym) |
1132 | ASM_CLAC | ||
1129 | INTR_FRAME | 1133 | INTR_FRAME |
1130 | pushq_cfi $~(\num) | 1134 | pushq_cfi $~(\num) |
1131 | .Lcommon_\sym: | 1135 | .Lcommon_\sym: |
@@ -1180,6 +1184,7 @@ apicinterrupt IRQ_WORK_VECTOR \ | |||
1180 | */ | 1184 | */ |
1181 | .macro zeroentry sym do_sym | 1185 | .macro zeroentry sym do_sym |
1182 | ENTRY(\sym) | 1186 | ENTRY(\sym) |
1187 | ASM_CLAC | ||
1183 | INTR_FRAME | 1188 | INTR_FRAME |
1184 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1189 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1185 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | 1190 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
@@ -1197,6 +1202,7 @@ END(\sym) | |||
1197 | 1202 | ||
1198 | .macro paranoidzeroentry sym do_sym | 1203 | .macro paranoidzeroentry sym do_sym |
1199 | ENTRY(\sym) | 1204 | ENTRY(\sym) |
1205 | ASM_CLAC | ||
1200 | INTR_FRAME | 1206 | INTR_FRAME |
1201 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1207 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1202 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | 1208 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
@@ -1215,6 +1221,7 @@ END(\sym) | |||
1215 | #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) | 1221 | #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) |
1216 | .macro paranoidzeroentry_ist sym do_sym ist | 1222 | .macro paranoidzeroentry_ist sym do_sym ist |
1217 | ENTRY(\sym) | 1223 | ENTRY(\sym) |
1224 | ASM_CLAC | ||
1218 | INTR_FRAME | 1225 | INTR_FRAME |
1219 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1226 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1220 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | 1227 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
@@ -1234,6 +1241,7 @@ END(\sym) | |||
1234 | 1241 | ||
1235 | .macro errorentry sym do_sym | 1242 | .macro errorentry sym do_sym |
1236 | ENTRY(\sym) | 1243 | ENTRY(\sym) |
1244 | ASM_CLAC | ||
1237 | XCPT_FRAME | 1245 | XCPT_FRAME |
1238 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1246 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1239 | subq $ORIG_RAX-R15, %rsp | 1247 | subq $ORIG_RAX-R15, %rsp |
@@ -1252,6 +1260,7 @@ END(\sym) | |||
1252 | /* error code is on the stack already */ | 1260 | /* error code is on the stack already */ |
1253 | .macro paranoiderrorentry sym do_sym | 1261 | .macro paranoiderrorentry sym do_sym |
1254 | ENTRY(\sym) | 1262 | ENTRY(\sym) |
1263 | ASM_CLAC | ||
1255 | XCPT_FRAME | 1264 | XCPT_FRAME |
1256 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1265 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1257 | subq $ORIG_RAX-R15, %rsp | 1266 | subq $ORIG_RAX-R15, %rsp |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index d42ab17b7397..957a47aec64e 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -287,27 +287,28 @@ ENTRY(startup_32_smp) | |||
287 | leal -__PAGE_OFFSET(%ecx),%esp | 287 | leal -__PAGE_OFFSET(%ecx),%esp |
288 | 288 | ||
289 | default_entry: | 289 | default_entry: |
290 | |||
291 | /* | 290 | /* |
292 | * New page tables may be in 4Mbyte page mode and may | 291 | * New page tables may be in 4Mbyte page mode and may |
293 | * be using the global pages. | 292 | * be using the global pages. |
294 | * | 293 | * |
295 | * NOTE! If we are on a 486 we may have no cr4 at all! | 294 | * NOTE! If we are on a 486 we may have no cr4 at all! |
296 | * So we do not try to touch it unless we really have | 295 | * Specifically, cr4 exists if and only if CPUID exists, |
297 | * some bits in it to set. This won't work if the BSP | 296 | * which in turn exists if and only if EFLAGS.ID exists. |
298 | * implements cr4 but this AP does not -- very unlikely | ||
299 | * but be warned! The same applies to the pse feature | ||
300 | * if not equally supported. --macro | ||
301 | * | ||
302 | * NOTE! We have to correct for the fact that we're | ||
303 | * not yet offset PAGE_OFFSET.. | ||
304 | */ | 297 | */ |
305 | #define cr4_bits pa(mmu_cr4_features) | 298 | movl $X86_EFLAGS_ID,%ecx |
306 | movl cr4_bits,%edx | 299 | pushl %ecx |
307 | andl %edx,%edx | 300 | popfl |
308 | jz 6f | 301 | pushfl |
309 | movl %cr4,%eax # Turn on paging options (PSE,PAE,..) | 302 | popl %eax |
310 | orl %edx,%eax | 303 | pushl $0 |
304 | popfl | ||
305 | pushfl | ||
306 | popl %edx | ||
307 | xorl %edx,%eax | ||
308 | testl %ecx,%eax | ||
309 | jz 6f # No ID flag = no CPUID = no CR4 | ||
310 | |||
311 | movl pa(mmu_cr4_features),%eax | ||
311 | movl %eax,%cr4 | 312 | movl %eax,%cr4 |
312 | 313 | ||
313 | testb $X86_CR4_PAE, %al # check if PAE is enabled | 314 | testb $X86_CR4_PAE, %al # check if PAE is enabled |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 3160c26db5e7..b33144c8b309 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -114,11 +114,12 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
114 | regs->orig_ax = -1; /* disable syscall checks */ | 114 | regs->orig_ax = -1; /* disable syscall checks */ |
115 | 115 | ||
116 | get_user_ex(buf, &sc->fpstate); | 116 | get_user_ex(buf, &sc->fpstate); |
117 | err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32)); | ||
118 | 117 | ||
119 | get_user_ex(*pax, &sc->ax); | 118 | get_user_ex(*pax, &sc->ax); |
120 | } get_user_catch(err); | 119 | } get_user_catch(err); |
121 | 120 | ||
121 | err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32)); | ||
122 | |||
122 | return err; | 123 | return err; |
123 | } | 124 | } |
124 | 125 | ||
@@ -355,7 +356,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
355 | put_user_ex(sig, &frame->sig); | 356 | put_user_ex(sig, &frame->sig); |
356 | put_user_ex(&frame->info, &frame->pinfo); | 357 | put_user_ex(&frame->info, &frame->pinfo); |
357 | put_user_ex(&frame->uc, &frame->puc); | 358 | put_user_ex(&frame->uc, &frame->puc); |
358 | err |= copy_siginfo_to_user(&frame->info, info); | ||
359 | 359 | ||
360 | /* Create the ucontext. */ | 360 | /* Create the ucontext. */ |
361 | if (cpu_has_xsave) | 361 | if (cpu_has_xsave) |
@@ -367,9 +367,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
367 | put_user_ex(sas_ss_flags(regs->sp), | 367 | put_user_ex(sas_ss_flags(regs->sp), |
368 | &frame->uc.uc_stack.ss_flags); | 368 | &frame->uc.uc_stack.ss_flags); |
369 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 369 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
370 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, | ||
371 | regs, set->sig[0]); | ||
372 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
373 | 370 | ||
374 | /* Set up to return from userspace. */ | 371 | /* Set up to return from userspace. */ |
375 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); | 372 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); |
@@ -386,6 +383,11 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
386 | */ | 383 | */ |
387 | put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); | 384 | put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); |
388 | } put_user_catch(err); | 385 | } put_user_catch(err); |
386 | |||
387 | err |= copy_siginfo_to_user(&frame->info, info); | ||
388 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, | ||
389 | regs, set->sig[0]); | ||
390 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
389 | 391 | ||
390 | if (err) | 392 | if (err) |
391 | return -EFAULT; | 393 | return -EFAULT; |
@@ -434,8 +436,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
434 | put_user_ex(sas_ss_flags(regs->sp), | 436 | put_user_ex(sas_ss_flags(regs->sp), |
435 | &frame->uc.uc_stack.ss_flags); | 437 | &frame->uc.uc_stack.ss_flags); |
436 | put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size); | 438 | put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size); |
437 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); | ||
438 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
439 | 439 | ||
440 | /* Set up to return from userspace. If provided, use a stub | 440 | /* Set up to return from userspace. If provided, use a stub |
441 | already in userspace. */ | 441 | already in userspace. */ |
@@ -448,6 +448,9 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
448 | } | 448 | } |
449 | } put_user_catch(err); | 449 | } put_user_catch(err); |
450 | 450 | ||
451 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); | ||
452 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
453 | |||
451 | if (err) | 454 | if (err) |
452 | return -EFAULT; | 455 | return -EFAULT; |
453 | 456 | ||
@@ -504,9 +507,6 @@ static int x32_setup_rt_frame(int sig, struct k_sigaction *ka, | |||
504 | &frame->uc.uc_stack.ss_flags); | 507 | &frame->uc.uc_stack.ss_flags); |
505 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 508 | put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
506 | put_user_ex(0, &frame->uc.uc__pad0); | 509 | put_user_ex(0, &frame->uc.uc__pad0); |
507 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, | ||
508 | regs, set->sig[0]); | ||
509 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
510 | 510 | ||
511 | if (ka->sa.sa_flags & SA_RESTORER) { | 511 | if (ka->sa.sa_flags & SA_RESTORER) { |
512 | restorer = ka->sa.sa_restorer; | 512 | restorer = ka->sa.sa_restorer; |
@@ -518,6 +518,10 @@ static int x32_setup_rt_frame(int sig, struct k_sigaction *ka, | |||
518 | put_user_ex(restorer, &frame->pretcode); | 518 | put_user_ex(restorer, &frame->pretcode); |
519 | } put_user_catch(err); | 519 | } put_user_catch(err); |
520 | 520 | ||
521 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, | ||
522 | regs, set->sig[0]); | ||
523 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
524 | |||
521 | if (err) | 525 | if (err) |
522 | return -EFAULT; | 526 | return -EFAULT; |
523 | 527 | ||
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 4e89b3dd408d..ada87a329edc 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -315,7 +315,7 @@ static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only) | |||
315 | if ((unsigned long)buf % 64 || fx_only) { | 315 | if ((unsigned long)buf % 64 || fx_only) { |
316 | u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE; | 316 | u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE; |
317 | xrstor_state(init_xstate_buf, init_bv); | 317 | xrstor_state(init_xstate_buf, init_bv); |
318 | return fxrstor_checking((__force void *) buf); | 318 | return fxrstor_user(buf); |
319 | } else { | 319 | } else { |
320 | u64 init_bv = pcntxt_mask & ~xbv; | 320 | u64 init_bv = pcntxt_mask & ~xbv; |
321 | if (unlikely(init_bv)) | 321 | if (unlikely(init_bv)) |
@@ -323,9 +323,9 @@ static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only) | |||
323 | return xrestore_user(buf, xbv); | 323 | return xrestore_user(buf, xbv); |
324 | } | 324 | } |
325 | } else if (use_fxsr()) { | 325 | } else if (use_fxsr()) { |
326 | return fxrstor_checking((__force void *) buf); | 326 | return fxrstor_user(buf); |
327 | } else | 327 | } else |
328 | return frstor_checking((__force void *) buf); | 328 | return frstor_user(buf); |
329 | } | 329 | } |
330 | 330 | ||
331 | int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) | 331 | int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) |
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 5b2995f4557a..a30ca15be21c 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/cpufeature.h> | 17 | #include <asm/cpufeature.h> |
18 | #include <asm/alternative-asm.h> | 18 | #include <asm/alternative-asm.h> |
19 | #include <asm/asm.h> | 19 | #include <asm/asm.h> |
20 | #include <asm/smap.h> | ||
20 | 21 | ||
21 | /* | 22 | /* |
22 | * By placing feature2 after feature1 in altinstructions section, we logically | 23 | * By placing feature2 after feature1 in altinstructions section, we logically |
@@ -130,6 +131,7 @@ ENDPROC(bad_from_user) | |||
130 | */ | 131 | */ |
131 | ENTRY(copy_user_generic_unrolled) | 132 | ENTRY(copy_user_generic_unrolled) |
132 | CFI_STARTPROC | 133 | CFI_STARTPROC |
134 | ASM_STAC | ||
133 | cmpl $8,%edx | 135 | cmpl $8,%edx |
134 | jb 20f /* less then 8 bytes, go to byte copy loop */ | 136 | jb 20f /* less then 8 bytes, go to byte copy loop */ |
135 | ALIGN_DESTINATION | 137 | ALIGN_DESTINATION |
@@ -177,6 +179,7 @@ ENTRY(copy_user_generic_unrolled) | |||
177 | decl %ecx | 179 | decl %ecx |
178 | jnz 21b | 180 | jnz 21b |
179 | 23: xor %eax,%eax | 181 | 23: xor %eax,%eax |
182 | ASM_CLAC | ||
180 | ret | 183 | ret |
181 | 184 | ||
182 | .section .fixup,"ax" | 185 | .section .fixup,"ax" |
@@ -232,6 +235,7 @@ ENDPROC(copy_user_generic_unrolled) | |||
232 | */ | 235 | */ |
233 | ENTRY(copy_user_generic_string) | 236 | ENTRY(copy_user_generic_string) |
234 | CFI_STARTPROC | 237 | CFI_STARTPROC |
238 | ASM_STAC | ||
235 | andl %edx,%edx | 239 | andl %edx,%edx |
236 | jz 4f | 240 | jz 4f |
237 | cmpl $8,%edx | 241 | cmpl $8,%edx |
@@ -246,6 +250,7 @@ ENTRY(copy_user_generic_string) | |||
246 | 3: rep | 250 | 3: rep |
247 | movsb | 251 | movsb |
248 | 4: xorl %eax,%eax | 252 | 4: xorl %eax,%eax |
253 | ASM_CLAC | ||
249 | ret | 254 | ret |
250 | 255 | ||
251 | .section .fixup,"ax" | 256 | .section .fixup,"ax" |
@@ -273,12 +278,14 @@ ENDPROC(copy_user_generic_string) | |||
273 | */ | 278 | */ |
274 | ENTRY(copy_user_enhanced_fast_string) | 279 | ENTRY(copy_user_enhanced_fast_string) |
275 | CFI_STARTPROC | 280 | CFI_STARTPROC |
281 | ASM_STAC | ||
276 | andl %edx,%edx | 282 | andl %edx,%edx |
277 | jz 2f | 283 | jz 2f |
278 | movl %edx,%ecx | 284 | movl %edx,%ecx |
279 | 1: rep | 285 | 1: rep |
280 | movsb | 286 | movsb |
281 | 2: xorl %eax,%eax | 287 | 2: xorl %eax,%eax |
288 | ASM_CLAC | ||
282 | ret | 289 | ret |
283 | 290 | ||
284 | .section .fixup,"ax" | 291 | .section .fixup,"ax" |
diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S index cacddc7163eb..6a4f43c2d9e6 100644 --- a/arch/x86/lib/copy_user_nocache_64.S +++ b/arch/x86/lib/copy_user_nocache_64.S | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/asm-offsets.h> | 15 | #include <asm/asm-offsets.h> |
16 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | #include <asm/asm.h> | 17 | #include <asm/asm.h> |
18 | #include <asm/smap.h> | ||
18 | 19 | ||
19 | .macro ALIGN_DESTINATION | 20 | .macro ALIGN_DESTINATION |
20 | #ifdef FIX_ALIGNMENT | 21 | #ifdef FIX_ALIGNMENT |
@@ -48,6 +49,7 @@ | |||
48 | */ | 49 | */ |
49 | ENTRY(__copy_user_nocache) | 50 | ENTRY(__copy_user_nocache) |
50 | CFI_STARTPROC | 51 | CFI_STARTPROC |
52 | ASM_STAC | ||
51 | cmpl $8,%edx | 53 | cmpl $8,%edx |
52 | jb 20f /* less then 8 bytes, go to byte copy loop */ | 54 | jb 20f /* less then 8 bytes, go to byte copy loop */ |
53 | ALIGN_DESTINATION | 55 | ALIGN_DESTINATION |
@@ -95,6 +97,7 @@ ENTRY(__copy_user_nocache) | |||
95 | decl %ecx | 97 | decl %ecx |
96 | jnz 21b | 98 | jnz 21b |
97 | 23: xorl %eax,%eax | 99 | 23: xorl %eax,%eax |
100 | ASM_CLAC | ||
98 | sfence | 101 | sfence |
99 | ret | 102 | ret |
100 | 103 | ||
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index b33b1fb1e6d4..156b9c804670 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/asm-offsets.h> | 33 | #include <asm/asm-offsets.h> |
34 | #include <asm/thread_info.h> | 34 | #include <asm/thread_info.h> |
35 | #include <asm/asm.h> | 35 | #include <asm/asm.h> |
36 | #include <asm/smap.h> | ||
36 | 37 | ||
37 | .text | 38 | .text |
38 | ENTRY(__get_user_1) | 39 | ENTRY(__get_user_1) |
@@ -40,8 +41,10 @@ ENTRY(__get_user_1) | |||
40 | GET_THREAD_INFO(%_ASM_DX) | 41 | GET_THREAD_INFO(%_ASM_DX) |
41 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX | 42 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX |
42 | jae bad_get_user | 43 | jae bad_get_user |
44 | ASM_STAC | ||
43 | 1: movzb (%_ASM_AX),%edx | 45 | 1: movzb (%_ASM_AX),%edx |
44 | xor %eax,%eax | 46 | xor %eax,%eax |
47 | ASM_CLAC | ||
45 | ret | 48 | ret |
46 | CFI_ENDPROC | 49 | CFI_ENDPROC |
47 | ENDPROC(__get_user_1) | 50 | ENDPROC(__get_user_1) |
@@ -53,8 +56,10 @@ ENTRY(__get_user_2) | |||
53 | GET_THREAD_INFO(%_ASM_DX) | 56 | GET_THREAD_INFO(%_ASM_DX) |
54 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX | 57 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX |
55 | jae bad_get_user | 58 | jae bad_get_user |
59 | ASM_STAC | ||
56 | 2: movzwl -1(%_ASM_AX),%edx | 60 | 2: movzwl -1(%_ASM_AX),%edx |
57 | xor %eax,%eax | 61 | xor %eax,%eax |
62 | ASM_CLAC | ||
58 | ret | 63 | ret |
59 | CFI_ENDPROC | 64 | CFI_ENDPROC |
60 | ENDPROC(__get_user_2) | 65 | ENDPROC(__get_user_2) |
@@ -66,8 +71,10 @@ ENTRY(__get_user_4) | |||
66 | GET_THREAD_INFO(%_ASM_DX) | 71 | GET_THREAD_INFO(%_ASM_DX) |
67 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX | 72 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX |
68 | jae bad_get_user | 73 | jae bad_get_user |
74 | ASM_STAC | ||
69 | 3: mov -3(%_ASM_AX),%edx | 75 | 3: mov -3(%_ASM_AX),%edx |
70 | xor %eax,%eax | 76 | xor %eax,%eax |
77 | ASM_CLAC | ||
71 | ret | 78 | ret |
72 | CFI_ENDPROC | 79 | CFI_ENDPROC |
73 | ENDPROC(__get_user_4) | 80 | ENDPROC(__get_user_4) |
@@ -80,8 +87,10 @@ ENTRY(__get_user_8) | |||
80 | GET_THREAD_INFO(%_ASM_DX) | 87 | GET_THREAD_INFO(%_ASM_DX) |
81 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX | 88 | cmp TI_addr_limit(%_ASM_DX),%_ASM_AX |
82 | jae bad_get_user | 89 | jae bad_get_user |
90 | ASM_STAC | ||
83 | 4: movq -7(%_ASM_AX),%_ASM_DX | 91 | 4: movq -7(%_ASM_AX),%_ASM_DX |
84 | xor %eax,%eax | 92 | xor %eax,%eax |
93 | ASM_CLAC | ||
85 | ret | 94 | ret |
86 | CFI_ENDPROC | 95 | CFI_ENDPROC |
87 | ENDPROC(__get_user_8) | 96 | ENDPROC(__get_user_8) |
@@ -91,6 +100,7 @@ bad_get_user: | |||
91 | CFI_STARTPROC | 100 | CFI_STARTPROC |
92 | xor %edx,%edx | 101 | xor %edx,%edx |
93 | mov $(-EFAULT),%_ASM_AX | 102 | mov $(-EFAULT),%_ASM_AX |
103 | ASM_CLAC | ||
94 | ret | 104 | ret |
95 | CFI_ENDPROC | 105 | CFI_ENDPROC |
96 | END(bad_get_user) | 106 | END(bad_get_user) |
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index 7f951c8f76c4..fc6ba17a7eec 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/thread_info.h> | 15 | #include <asm/thread_info.h> |
16 | #include <asm/errno.h> | 16 | #include <asm/errno.h> |
17 | #include <asm/asm.h> | 17 | #include <asm/asm.h> |
18 | #include <asm/smap.h> | ||
18 | 19 | ||
19 | 20 | ||
20 | /* | 21 | /* |
@@ -31,7 +32,8 @@ | |||
31 | 32 | ||
32 | #define ENTER CFI_STARTPROC ; \ | 33 | #define ENTER CFI_STARTPROC ; \ |
33 | GET_THREAD_INFO(%_ASM_BX) | 34 | GET_THREAD_INFO(%_ASM_BX) |
34 | #define EXIT ret ; \ | 35 | #define EXIT ASM_CLAC ; \ |
36 | ret ; \ | ||
35 | CFI_ENDPROC | 37 | CFI_ENDPROC |
36 | 38 | ||
37 | .text | 39 | .text |
@@ -39,6 +41,7 @@ ENTRY(__put_user_1) | |||
39 | ENTER | 41 | ENTER |
40 | cmp TI_addr_limit(%_ASM_BX),%_ASM_CX | 42 | cmp TI_addr_limit(%_ASM_BX),%_ASM_CX |
41 | jae bad_put_user | 43 | jae bad_put_user |
44 | ASM_STAC | ||
42 | 1: movb %al,(%_ASM_CX) | 45 | 1: movb %al,(%_ASM_CX) |
43 | xor %eax,%eax | 46 | xor %eax,%eax |
44 | EXIT | 47 | EXIT |
@@ -50,6 +53,7 @@ ENTRY(__put_user_2) | |||
50 | sub $1,%_ASM_BX | 53 | sub $1,%_ASM_BX |
51 | cmp %_ASM_BX,%_ASM_CX | 54 | cmp %_ASM_BX,%_ASM_CX |
52 | jae bad_put_user | 55 | jae bad_put_user |
56 | ASM_STAC | ||
53 | 2: movw %ax,(%_ASM_CX) | 57 | 2: movw %ax,(%_ASM_CX) |
54 | xor %eax,%eax | 58 | xor %eax,%eax |
55 | EXIT | 59 | EXIT |
@@ -61,6 +65,7 @@ ENTRY(__put_user_4) | |||
61 | sub $3,%_ASM_BX | 65 | sub $3,%_ASM_BX |
62 | cmp %_ASM_BX,%_ASM_CX | 66 | cmp %_ASM_BX,%_ASM_CX |
63 | jae bad_put_user | 67 | jae bad_put_user |
68 | ASM_STAC | ||
64 | 3: movl %eax,(%_ASM_CX) | 69 | 3: movl %eax,(%_ASM_CX) |
65 | xor %eax,%eax | 70 | xor %eax,%eax |
66 | EXIT | 71 | EXIT |
@@ -72,6 +77,7 @@ ENTRY(__put_user_8) | |||
72 | sub $7,%_ASM_BX | 77 | sub $7,%_ASM_BX |
73 | cmp %_ASM_BX,%_ASM_CX | 78 | cmp %_ASM_BX,%_ASM_CX |
74 | jae bad_put_user | 79 | jae bad_put_user |
80 | ASM_STAC | ||
75 | 4: mov %_ASM_AX,(%_ASM_CX) | 81 | 4: mov %_ASM_AX,(%_ASM_CX) |
76 | #ifdef CONFIG_X86_32 | 82 | #ifdef CONFIG_X86_32 |
77 | 5: movl %edx,4(%_ASM_CX) | 83 | 5: movl %edx,4(%_ASM_CX) |
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 1781b2f950e2..98f6d6b68f5a 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -42,10 +42,11 @@ do { \ | |||
42 | int __d0; \ | 42 | int __d0; \ |
43 | might_fault(); \ | 43 | might_fault(); \ |
44 | __asm__ __volatile__( \ | 44 | __asm__ __volatile__( \ |
45 | ASM_STAC "\n" \ | ||
45 | "0: rep; stosl\n" \ | 46 | "0: rep; stosl\n" \ |
46 | " movl %2,%0\n" \ | 47 | " movl %2,%0\n" \ |
47 | "1: rep; stosb\n" \ | 48 | "1: rep; stosb\n" \ |
48 | "2:\n" \ | 49 | "2: " ASM_CLAC "\n" \ |
49 | ".section .fixup,\"ax\"\n" \ | 50 | ".section .fixup,\"ax\"\n" \ |
50 | "3: lea 0(%2,%0,4),%0\n" \ | 51 | "3: lea 0(%2,%0,4),%0\n" \ |
51 | " jmp 2b\n" \ | 52 | " jmp 2b\n" \ |
@@ -626,10 +627,12 @@ survive: | |||
626 | return n; | 627 | return n; |
627 | } | 628 | } |
628 | #endif | 629 | #endif |
630 | stac(); | ||
629 | if (movsl_is_ok(to, from, n)) | 631 | if (movsl_is_ok(to, from, n)) |
630 | __copy_user(to, from, n); | 632 | __copy_user(to, from, n); |
631 | else | 633 | else |
632 | n = __copy_user_intel(to, from, n); | 634 | n = __copy_user_intel(to, from, n); |
635 | clac(); | ||
633 | return n; | 636 | return n; |
634 | } | 637 | } |
635 | EXPORT_SYMBOL(__copy_to_user_ll); | 638 | EXPORT_SYMBOL(__copy_to_user_ll); |
@@ -637,10 +640,12 @@ EXPORT_SYMBOL(__copy_to_user_ll); | |||
637 | unsigned long __copy_from_user_ll(void *to, const void __user *from, | 640 | unsigned long __copy_from_user_ll(void *to, const void __user *from, |
638 | unsigned long n) | 641 | unsigned long n) |
639 | { | 642 | { |
643 | stac(); | ||
640 | if (movsl_is_ok(to, from, n)) | 644 | if (movsl_is_ok(to, from, n)) |
641 | __copy_user_zeroing(to, from, n); | 645 | __copy_user_zeroing(to, from, n); |
642 | else | 646 | else |
643 | n = __copy_user_zeroing_intel(to, from, n); | 647 | n = __copy_user_zeroing_intel(to, from, n); |
648 | clac(); | ||
644 | return n; | 649 | return n; |
645 | } | 650 | } |
646 | EXPORT_SYMBOL(__copy_from_user_ll); | 651 | EXPORT_SYMBOL(__copy_from_user_ll); |
@@ -648,11 +653,13 @@ EXPORT_SYMBOL(__copy_from_user_ll); | |||
648 | unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, | 653 | unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, |
649 | unsigned long n) | 654 | unsigned long n) |
650 | { | 655 | { |
656 | stac(); | ||
651 | if (movsl_is_ok(to, from, n)) | 657 | if (movsl_is_ok(to, from, n)) |
652 | __copy_user(to, from, n); | 658 | __copy_user(to, from, n); |
653 | else | 659 | else |
654 | n = __copy_user_intel((void __user *)to, | 660 | n = __copy_user_intel((void __user *)to, |
655 | (const void *)from, n); | 661 | (const void *)from, n); |
662 | clac(); | ||
656 | return n; | 663 | return n; |
657 | } | 664 | } |
658 | EXPORT_SYMBOL(__copy_from_user_ll_nozero); | 665 | EXPORT_SYMBOL(__copy_from_user_ll_nozero); |
@@ -660,6 +667,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nozero); | |||
660 | unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, | 667 | unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, |
661 | unsigned long n) | 668 | unsigned long n) |
662 | { | 669 | { |
670 | stac(); | ||
663 | #ifdef CONFIG_X86_INTEL_USERCOPY | 671 | #ifdef CONFIG_X86_INTEL_USERCOPY |
664 | if (n > 64 && cpu_has_xmm2) | 672 | if (n > 64 && cpu_has_xmm2) |
665 | n = __copy_user_zeroing_intel_nocache(to, from, n); | 673 | n = __copy_user_zeroing_intel_nocache(to, from, n); |
@@ -668,6 +676,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, | |||
668 | #else | 676 | #else |
669 | __copy_user_zeroing(to, from, n); | 677 | __copy_user_zeroing(to, from, n); |
670 | #endif | 678 | #endif |
679 | clac(); | ||
671 | return n; | 680 | return n; |
672 | } | 681 | } |
673 | EXPORT_SYMBOL(__copy_from_user_ll_nocache); | 682 | EXPORT_SYMBOL(__copy_from_user_ll_nocache); |
@@ -675,6 +684,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache); | |||
675 | unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, | 684 | unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, |
676 | unsigned long n) | 685 | unsigned long n) |
677 | { | 686 | { |
687 | stac(); | ||
678 | #ifdef CONFIG_X86_INTEL_USERCOPY | 688 | #ifdef CONFIG_X86_INTEL_USERCOPY |
679 | if (n > 64 && cpu_has_xmm2) | 689 | if (n > 64 && cpu_has_xmm2) |
680 | n = __copy_user_intel_nocache(to, from, n); | 690 | n = __copy_user_intel_nocache(to, from, n); |
@@ -683,6 +693,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr | |||
683 | #else | 693 | #else |
684 | __copy_user(to, from, n); | 694 | __copy_user(to, from, n); |
685 | #endif | 695 | #endif |
696 | clac(); | ||
686 | return n; | 697 | return n; |
687 | } | 698 | } |
688 | EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); | 699 | EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); |
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index e5b130bc2d0e..05928aae911e 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c | |||
@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) | |||
18 | might_fault(); | 18 | might_fault(); |
19 | /* no memory constraint because it doesn't change any memory gcc knows | 19 | /* no memory constraint because it doesn't change any memory gcc knows |
20 | about */ | 20 | about */ |
21 | stac(); | ||
21 | asm volatile( | 22 | asm volatile( |
22 | " testq %[size8],%[size8]\n" | 23 | " testq %[size8],%[size8]\n" |
23 | " jz 4f\n" | 24 | " jz 4f\n" |
@@ -40,6 +41,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) | |||
40 | : [size8] "=&c"(size), [dst] "=&D" (__d0) | 41 | : [size8] "=&c"(size), [dst] "=&D" (__d0) |
41 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), | 42 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), |
42 | [zero] "r" (0UL), [eight] "r" (8UL)); | 43 | [zero] "r" (0UL), [eight] "r" (8UL)); |
44 | clac(); | ||
43 | return size; | 45 | return size; |
44 | } | 46 | } |
45 | EXPORT_SYMBOL(__clear_user); | 47 | EXPORT_SYMBOL(__clear_user); |
@@ -82,5 +84,6 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) | |||
82 | for (c = 0, zero_len = len; zerorest && zero_len; --zero_len) | 84 | for (c = 0, zero_len = len; zerorest && zero_len; --zero_len) |
83 | if (__put_user_nocheck(c, to++, sizeof(char))) | 85 | if (__put_user_nocheck(c, to++, sizeof(char))) |
84 | break; | 86 | break; |
87 | clac(); | ||
85 | return len; | 88 | return len; |
86 | } | 89 | } |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 7dde46d68a25..a530b230e7d7 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -996,6 +996,17 @@ static int fault_in_kernel_space(unsigned long address) | |||
996 | return address >= TASK_SIZE_MAX; | 996 | return address >= TASK_SIZE_MAX; |
997 | } | 997 | } |
998 | 998 | ||
999 | static inline bool smap_violation(int error_code, struct pt_regs *regs) | ||
1000 | { | ||
1001 | if (error_code & PF_USER) | ||
1002 | return false; | ||
1003 | |||
1004 | if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC)) | ||
1005 | return false; | ||
1006 | |||
1007 | return true; | ||
1008 | } | ||
1009 | |||
999 | /* | 1010 | /* |
1000 | * This routine handles page faults. It determines the address, | 1011 | * This routine handles page faults. It determines the address, |
1001 | * and the problem, and then passes it off to one of the appropriate | 1012 | * and the problem, and then passes it off to one of the appropriate |
@@ -1089,6 +1100,13 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
1089 | if (unlikely(error_code & PF_RSVD)) | 1100 | if (unlikely(error_code & PF_RSVD)) |
1090 | pgtable_bad(regs, error_code, address); | 1101 | pgtable_bad(regs, error_code, address); |
1091 | 1102 | ||
1103 | if (static_cpu_has(X86_FEATURE_SMAP)) { | ||
1104 | if (unlikely(smap_violation(error_code, regs))) { | ||
1105 | bad_area_nosemaphore(regs, error_code, address); | ||
1106 | return; | ||
1107 | } | ||
1108 | } | ||
1109 | |||
1092 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | 1110 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
1093 | 1111 | ||
1094 | /* | 1112 | /* |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 4f04db150027..11a58001b4ce 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -709,7 +709,7 @@ static void __init test_wp_bit(void) | |||
709 | "Checking if this processor honours the WP bit even in supervisor mode..."); | 709 | "Checking if this processor honours the WP bit even in supervisor mode..."); |
710 | 710 | ||
711 | /* Any page-aligned address will do, the test is non-destructive */ | 711 | /* Any page-aligned address will do, the test is non-destructive */ |
712 | __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); | 712 | __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_KERNEL_RO); |
713 | boot_cpu_data.wp_works_ok = do_test_wp_bit(); | 713 | boot_cpu_data.wp_works_ok = do_test_wp_bit(); |
714 | clear_fixmap(FIX_WP_TEST); | 714 | clear_fixmap(FIX_WP_TEST); |
715 | 715 | ||
diff --git a/arch/x86/realmode/rm/wakeup.h b/arch/x86/realmode/rm/wakeup.h index 9317e0042f24..7dd86a419f5d 100644 --- a/arch/x86/realmode/rm/wakeup.h +++ b/arch/x86/realmode/rm/wakeup.h | |||
@@ -36,5 +36,7 @@ extern struct wakeup_header wakeup_header; | |||
36 | 36 | ||
37 | /* Wakeup behavior bits */ | 37 | /* Wakeup behavior bits */ |
38 | #define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0 | 38 | #define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0 |
39 | #define WAKEUP_BEHAVIOR_RESTORE_CR4 1 | ||
40 | #define WAKEUP_BEHAVIOR_RESTORE_EFER 2 | ||
39 | 41 | ||
40 | #endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ | 42 | #endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ |
diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S index 8905166b0bbb..e56479e58053 100644 --- a/arch/x86/realmode/rm/wakeup_asm.S +++ b/arch/x86/realmode/rm/wakeup_asm.S | |||
@@ -74,9 +74,18 @@ ENTRY(wakeup_start) | |||
74 | 74 | ||
75 | lidtl wakeup_idt | 75 | lidtl wakeup_idt |
76 | 76 | ||
77 | /* Clear the EFLAGS */ | 77 | /* Clear the EFLAGS but remember if we have EFLAGS.ID */ |
78 | pushl $0 | 78 | movl $X86_EFLAGS_ID, %ecx |
79 | pushl %ecx | ||
79 | popfl | 80 | popfl |
81 | pushfl | ||
82 | popl %edi | ||
83 | pushl $0 | ||
84 | popfl | ||
85 | pushfl | ||
86 | popl %edx | ||
87 | xorl %edx, %edi | ||
88 | andl %ecx, %edi /* %edi is zero iff CPUID & %cr4 are missing */ | ||
80 | 89 | ||
81 | /* Check header signature... */ | 90 | /* Check header signature... */ |
82 | movl signature, %eax | 91 | movl signature, %eax |
@@ -93,8 +102,8 @@ ENTRY(wakeup_start) | |||
93 | 102 | ||
94 | /* Restore MISC_ENABLE before entering protected mode, in case | 103 | /* Restore MISC_ENABLE before entering protected mode, in case |
95 | BIOS decided to clear XD_DISABLE during S3. */ | 104 | BIOS decided to clear XD_DISABLE during S3. */ |
96 | movl pmode_behavior, %eax | 105 | movl pmode_behavior, %edi |
97 | btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax | 106 | btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %edi |
98 | jnc 1f | 107 | jnc 1f |
99 | 108 | ||
100 | movl pmode_misc_en, %eax | 109 | movl pmode_misc_en, %eax |
@@ -110,15 +119,15 @@ ENTRY(wakeup_start) | |||
110 | movl pmode_cr3, %eax | 119 | movl pmode_cr3, %eax |
111 | movl %eax, %cr3 | 120 | movl %eax, %cr3 |
112 | 121 | ||
113 | movl pmode_cr4, %ecx | 122 | btl $WAKEUP_BEHAVIOR_RESTORE_CR4, %edi |
114 | jecxz 1f | 123 | jz 1f |
115 | movl %ecx, %cr4 | 124 | movl pmode_cr4, %eax |
125 | movl %eax, %cr4 | ||
116 | 1: | 126 | 1: |
127 | btl $WAKEUP_BEHAVIOR_RESTORE_EFER, %edi | ||
128 | jz 1f | ||
117 | movl pmode_efer, %eax | 129 | movl pmode_efer, %eax |
118 | movl pmode_efer + 4, %edx | 130 | movl pmode_efer + 4, %edx |
119 | movl %eax, %ecx | ||
120 | orl %edx, %ecx | ||
121 | jz 1f | ||
122 | movl $MSR_EFER, %ecx | 131 | movl $MSR_EFER, %ecx |
123 | wrmsr | 132 | wrmsr |
124 | 1: | 133 | 1: |