diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-16 17:58:12 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-16 17:58:12 -0500 |
commit | 37507717de51a8332a34ee07fd88700be88df5bf (patch) | |
tree | d6eb5d00a798a4b1ce40c8c4c8ca74b0d22fe1df /arch/x86/include/asm/processor.h | |
parent | a68fb48380bb993306dd62a58cbd946b4348222a (diff) | |
parent | a66734297f78707ce39d756b656bfae861d53f62 (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 perf updates from Ingo Molnar:
"This series tightens up RDPMC permissions: currently even highly
sandboxed x86 execution environments (such as seccomp) have permission
to execute RDPMC, which may leak various perf events / PMU state such
as timing information and other CPU execution details.
This 'all is allowed' RDPMC mode is still preserved as the
(non-default) /sys/devices/cpu/rdpmc=2 setting. The new default is
that RDPMC access is only allowed if a perf event is mmap-ed (which is
needed to correctly interpret RDPMC counter values in any case).
As a side effect of these changes CR4 handling is cleaned up in the
x86 code and a shadow copy of the CR4 value is added.
The extra CR4 manipulation adds ~ <50ns to the context switch cost
between rdpmc-capable and rdpmc-non-capable mms"
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/x86: Add /sys/devices/cpu/rdpmc=2 to allow rdpmc for all tasks
perf/x86: Only allow rdpmc if a perf_event is mapped
perf: Pass the event to arch_perf_update_userpage()
perf: Add pmu callbacks to track event mapping and unmapping
x86: Add a comment clarifying LDT context switching
x86: Store a per-cpu shadow copy of CR4
x86: Clean up cr4 manipulation
Diffstat (limited to 'arch/x86/include/asm/processor.h')
-rw-r--r-- | arch/x86/include/asm/processor.h | 33 |
1 files changed, 0 insertions, 33 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index a092a0cce0b7..ec1c93588cef 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -579,39 +579,6 @@ static inline void load_sp0(struct tss_struct *tss, | |||
579 | #define set_iopl_mask native_set_iopl_mask | 579 | #define set_iopl_mask native_set_iopl_mask |
580 | #endif /* CONFIG_PARAVIRT */ | 580 | #endif /* CONFIG_PARAVIRT */ |
581 | 581 | ||
582 | /* | ||
583 | * Save the cr4 feature set we're using (ie | ||
584 | * Pentium 4MB enable and PPro Global page | ||
585 | * enable), so that any CPU's that boot up | ||
586 | * after us can get the correct flags. | ||
587 | */ | ||
588 | extern unsigned long mmu_cr4_features; | ||
589 | extern u32 *trampoline_cr4_features; | ||
590 | |||
591 | static inline void set_in_cr4(unsigned long mask) | ||
592 | { | ||
593 | unsigned long cr4; | ||
594 | |||
595 | mmu_cr4_features |= mask; | ||
596 | if (trampoline_cr4_features) | ||
597 | *trampoline_cr4_features = mmu_cr4_features; | ||
598 | cr4 = read_cr4(); | ||
599 | cr4 |= mask; | ||
600 | write_cr4(cr4); | ||
601 | } | ||
602 | |||
603 | static inline void clear_in_cr4(unsigned long mask) | ||
604 | { | ||
605 | unsigned long cr4; | ||
606 | |||
607 | mmu_cr4_features &= ~mask; | ||
608 | if (trampoline_cr4_features) | ||
609 | *trampoline_cr4_features = mmu_cr4_features; | ||
610 | cr4 = read_cr4(); | ||
611 | cr4 &= ~mask; | ||
612 | write_cr4(cr4); | ||
613 | } | ||
614 | |||
615 | typedef struct { | 582 | typedef struct { |
616 | unsigned long seg; | 583 | unsigned long seg; |
617 | } mm_segment_t; | 584 | } mm_segment_t; |