aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/common.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-16 17:58:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-16 17:58:12 -0500
commit37507717de51a8332a34ee07fd88700be88df5bf (patch)
treed6eb5d00a798a4b1ce40c8c4c8ca74b0d22fe1df /arch/x86/kernel/cpu/common.c
parenta68fb48380bb993306dd62a58cbd946b4348222a (diff)
parenta66734297f78707ce39d756b656bfae861d53f62 (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 perf updates from Ingo Molnar: "This series tightens up RDPMC permissions: currently even highly sandboxed x86 execution environments (such as seccomp) have permission to execute RDPMC, which may leak various perf events / PMU state such as timing information and other CPU execution details. This 'all is allowed' RDPMC mode is still preserved as the (non-default) /sys/devices/cpu/rdpmc=2 setting. The new default is that RDPMC access is only allowed if a perf event is mmap-ed (which is needed to correctly interpret RDPMC counter values in any case). As a side effect of these changes CR4 handling is cleaned up in the x86 code and a shadow copy of the CR4 value is added. The extra CR4 manipulation adds ~ <50ns to the context switch cost between rdpmc-capable and rdpmc-non-capable mms" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86: Add /sys/devices/cpu/rdpmc=2 to allow rdpmc for all tasks perf/x86: Only allow rdpmc if a perf_event is mapped perf: Pass the event to arch_perf_update_userpage() perf: Add pmu callbacks to track event mapping and unmapping x86: Add a comment clarifying LDT context switching x86: Store a per-cpu shadow copy of CR4 x86: Clean up cr4 manipulation
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r--arch/x86/kernel/cpu/common.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index b15bffcaba6d..b5c8ff5e9dfc 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -19,6 +19,7 @@
19#include <asm/archrandom.h> 19#include <asm/archrandom.h>
20#include <asm/hypervisor.h> 20#include <asm/hypervisor.h>
21#include <asm/processor.h> 21#include <asm/processor.h>
22#include <asm/tlbflush.h>
22#include <asm/debugreg.h> 23#include <asm/debugreg.h>
23#include <asm/sections.h> 24#include <asm/sections.h>
24#include <asm/vsyscall.h> 25#include <asm/vsyscall.h>
@@ -278,7 +279,7 @@ __setup("nosmep", setup_disable_smep);
278static __always_inline void setup_smep(struct cpuinfo_x86 *c) 279static __always_inline void setup_smep(struct cpuinfo_x86 *c)
279{ 280{
280 if (cpu_has(c, X86_FEATURE_SMEP)) 281 if (cpu_has(c, X86_FEATURE_SMEP))
281 set_in_cr4(X86_CR4_SMEP); 282 cr4_set_bits(X86_CR4_SMEP);
282} 283}
283 284
284static __init int setup_disable_smap(char *arg) 285static __init int setup_disable_smap(char *arg)
@@ -298,9 +299,9 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
298 299
299 if (cpu_has(c, X86_FEATURE_SMAP)) { 300 if (cpu_has(c, X86_FEATURE_SMAP)) {
300#ifdef CONFIG_X86_SMAP 301#ifdef CONFIG_X86_SMAP
301 set_in_cr4(X86_CR4_SMAP); 302 cr4_set_bits(X86_CR4_SMAP);
302#else 303#else
303 clear_in_cr4(X86_CR4_SMAP); 304 cr4_clear_bits(X86_CR4_SMAP);
304#endif 305#endif
305 } 306 }
306} 307}
@@ -1295,6 +1296,12 @@ void cpu_init(void)
1295 wait_for_master_cpu(cpu); 1296 wait_for_master_cpu(cpu);
1296 1297
1297 /* 1298 /*
1299 * Initialize the CR4 shadow before doing anything that could
1300 * try to read it.
1301 */
1302 cr4_init_shadow();
1303
1304 /*
1298 * Load microcode on this cpu if a valid microcode is available. 1305 * Load microcode on this cpu if a valid microcode is available.
1299 * This is early microcode loading procedure. 1306 * This is early microcode loading procedure.
1300 */ 1307 */
@@ -1313,7 +1320,7 @@ void cpu_init(void)
1313 1320
1314 pr_debug("Initializing CPU#%d\n", cpu); 1321 pr_debug("Initializing CPU#%d\n", cpu);
1315 1322
1316 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1323 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1317 1324
1318 /* 1325 /*
1319 * Initialize the per-CPU GDT with the boot GDT, 1326 * Initialize the per-CPU GDT with the boot GDT,
@@ -1394,7 +1401,7 @@ void cpu_init(void)
1394 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1401 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1395 1402
1396 if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de) 1403 if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de)
1397 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1404 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1398 1405
1399 load_current_idt(); 1406 load_current_idt();
1400 switch_to_new_gdt(cpu); 1407 switch_to_new_gdt(cpu);