aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-16 17:58:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-16 17:58:12 -0500
commit37507717de51a8332a34ee07fd88700be88df5bf (patch)
treed6eb5d00a798a4b1ce40c8c4c8ca74b0d22fe1df /arch/x86/mm
parenta68fb48380bb993306dd62a58cbd946b4348222a (diff)
parenta66734297f78707ce39d756b656bfae861d53f62 (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 perf updates from Ingo Molnar: "This series tightens up RDPMC permissions: currently even highly sandboxed x86 execution environments (such as seccomp) have permission to execute RDPMC, which may leak various perf events / PMU state such as timing information and other CPU execution details. This 'all is allowed' RDPMC mode is still preserved as the (non-default) /sys/devices/cpu/rdpmc=2 setting. The new default is that RDPMC access is only allowed if a perf event is mmap-ed (which is needed to correctly interpret RDPMC counter values in any case). As a side effect of these changes CR4 handling is cleaned up in the x86 code and a shadow copy of the CR4 value is added. The extra CR4 manipulation adds ~ <50ns to the context switch cost between rdpmc-capable and rdpmc-non-capable mms" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86: Add /sys/devices/cpu/rdpmc=2 to allow rdpmc for all tasks perf/x86: Only allow rdpmc if a perf_event is mapped perf: Pass the event to arch_perf_update_userpage() perf: Add pmu callbacks to track event mapping and unmapping x86: Add a comment clarifying LDT context switching x86: Store a per-cpu shadow copy of CR4 x86: Clean up cr4 manipulation
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/init.c13
-rw-r--r--arch/x86/mm/tlb.c3
3 files changed, 12 insertions, 6 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index e3ff27a5b634..ede025fb46f1 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -600,7 +600,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
600 printk(nx_warning, from_kuid(&init_user_ns, current_uid())); 600 printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
601 if (pte && pte_present(*pte) && pte_exec(*pte) && 601 if (pte && pte_present(*pte) && pte_exec(*pte) &&
602 (pgd_flags(*pgd) & _PAGE_USER) && 602 (pgd_flags(*pgd) & _PAGE_USER) &&
603 (read_cr4() & X86_CR4_SMEP)) 603 (__read_cr4() & X86_CR4_SMEP))
604 printk(smep_warning, from_kuid(&init_user_ns, current_uid())); 604 printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
605 } 605 }
606 606
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 649da47d3827..553c094b9cd7 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -173,11 +173,11 @@ static void __init probe_page_size_mask(void)
173 173
174 /* Enable PSE if available */ 174 /* Enable PSE if available */
175 if (cpu_has_pse) 175 if (cpu_has_pse)
176 set_in_cr4(X86_CR4_PSE); 176 cr4_set_bits_and_update_boot(X86_CR4_PSE);
177 177
178 /* Enable PGE if available */ 178 /* Enable PGE if available */
179 if (cpu_has_pge) { 179 if (cpu_has_pge) {
180 set_in_cr4(X86_CR4_PGE); 180 cr4_set_bits_and_update_boot(X86_CR4_PGE);
181 __supported_pte_mask |= _PAGE_GLOBAL; 181 __supported_pte_mask |= _PAGE_GLOBAL;
182 } 182 }
183} 183}
@@ -713,6 +713,15 @@ void __init zone_sizes_init(void)
713 free_area_init_nodes(max_zone_pfns); 713 free_area_init_nodes(max_zone_pfns);
714} 714}
715 715
716DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
717#ifdef CONFIG_SMP
718 .active_mm = &init_mm,
719 .state = 0,
720#endif
721 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
722};
723EXPORT_SYMBOL_GPL(cpu_tlbstate);
724
716void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) 725void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
717{ 726{
718 /* entry 0 MUST be WB (hardwired to speed up translations) */ 727 /* entry 0 MUST be WB (hardwired to speed up translations) */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index ee61c36d64f8..3250f2371aea 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -14,9 +14,6 @@
14#include <asm/uv/uv.h> 14#include <asm/uv/uv.h>
15#include <linux/debugfs.h> 15#include <linux/debugfs.h>
16 16
17DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
18 = { &init_mm, 0, };
19
20/* 17/*
21 * Smarter SMP flushing macros. 18 * Smarter SMP flushing macros.
22 * c/o Linus Torvalds. 19 * c/o Linus Torvalds.