aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2012-09-26 21:02:28 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2012-09-27 12:52:38 -0400
commitb2cc2a074de75671bbed5e2dda67a9252ef353ea (patch)
tree8e01fcacb8bad82c4351fa82b351ecc185fe8056 /arch/x86/kernel
parent73201dbec64aebf6b0dca855b523f437972dc7bb (diff)
x86, smep, smap: Make the switching functions one-way
There is no fundamental reason why we should switch SMEP and SMAP on during early cpu initialization just to switch them off again. Now with %eflags and %cr4 forced to be initialized to a clean state, we only need the one-way enable. Also, make the functions inline to make them (somewhat) harder to abuse. This does mean that SMEP and SMAP do not get initialized anywhere near as early. Even using early_param() instead of __setup() doesn't give us control early enough to do this during the early cpu initialization phase. This seems reasonable to me, because SMEP and SMAP should not matter until we have userspace to protect ourselves from, but it does potentially make it possible for a bug involving a "leak of permissions to userspace" to get uncaught. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/common.c49
1 files changed, 18 insertions, 31 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 44aec5d4dfaf..fefd9b7e93e1 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -259,48 +259,36 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
259} 259}
260#endif 260#endif
261 261
262static int disable_smep __cpuinitdata;
263static __init int setup_disable_smep(char *arg) 262static __init int setup_disable_smep(char *arg)
264{ 263{
265 disable_smep = 1; 264 setup_clear_cpu_cap(X86_FEATURE_SMEP);
266 return 1; 265 return 1;
267} 266}
268__setup("nosmep", setup_disable_smep); 267__setup("nosmep", setup_disable_smep);
269 268
270static __cpuinit void setup_smep(struct cpuinfo_x86 *c) 269static __always_inline void setup_smep(struct cpuinfo_x86 *c)
271{ 270{
272 if (cpu_has(c, X86_FEATURE_SMEP)) { 271 if (cpu_has(c, X86_FEATURE_SMEP))
273 if (unlikely(disable_smep)) { 272 set_in_cr4(X86_CR4_SMEP);
274 setup_clear_cpu_cap(X86_FEATURE_SMEP);
275 clear_in_cr4(X86_CR4_SMEP);
276 } else
277 set_in_cr4(X86_CR4_SMEP);
278 }
279} 273}
280 274
281static int disable_smap __cpuinitdata;
282static __init int setup_disable_smap(char *arg) 275static __init int setup_disable_smap(char *arg)
283{ 276{
284 disable_smap = 1; 277 setup_clear_cpu_cap(X86_FEATURE_SMAP);
285 return 1; 278 return 1;
286} 279}
287__setup("nosmap", setup_disable_smap); 280__setup("nosmap", setup_disable_smap);
288 281
289static __cpuinit void setup_smap(struct cpuinfo_x86 *c) 282static __always_inline void setup_smap(struct cpuinfo_x86 *c)
290{ 283{
291 if (cpu_has(c, X86_FEATURE_SMAP)) { 284 unsigned long eflags;
292 if (unlikely(disable_smap)) { 285
293 setup_clear_cpu_cap(X86_FEATURE_SMAP); 286 /* This should have been cleared long ago */
294 clear_in_cr4(X86_CR4_SMAP); 287 raw_local_save_flags(eflags);
295 } else { 288 BUG_ON(eflags & X86_EFLAGS_AC);
296 set_in_cr4(X86_CR4_SMAP); 289
297 /* 290 if (cpu_has(c, X86_FEATURE_SMAP))
298 * Don't use clac() here since alternatives 291 set_in_cr4(X86_CR4_SMAP);
299 * haven't run yet...
300 */
301 asm volatile(__stringify(__ASM_CLAC) ::: "memory");
302 }
303 }
304} 292}
305 293
306/* 294/*
@@ -737,9 +725,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
737 c->cpu_index = 0; 725 c->cpu_index = 0;
738 filter_cpuid_features(c, false); 726 filter_cpuid_features(c, false);
739 727
740 setup_smep(c);
741 setup_smap(c);
742
743 if (this_cpu->c_bsp_init) 728 if (this_cpu->c_bsp_init)
744 this_cpu->c_bsp_init(c); 729 this_cpu->c_bsp_init(c);
745} 730}
@@ -824,8 +809,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
824 c->phys_proc_id = c->initial_apicid; 809 c->phys_proc_id = c->initial_apicid;
825 } 810 }
826 811
827 setup_smep(c);
828
829 get_model_name(c); /* Default name */ 812 get_model_name(c); /* Default name */
830 813
831 detect_nopl(c); 814 detect_nopl(c);
@@ -890,6 +873,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
890 /* Disable the PN if appropriate */ 873 /* Disable the PN if appropriate */
891 squash_the_stupid_serial_number(c); 874 squash_the_stupid_serial_number(c);
892 875
876 /* Set up SMEP/SMAP */
877 setup_smep(c);
878 setup_smap(c);
879
893 /* 880 /*
894 * The vendor-specific functions might have changed features. 881 * The vendor-specific functions might have changed features.
895 * Now we do "generic changes." 882 * Now we do "generic changes."