aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-16 17:58:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-16 17:58:12 -0500
commit37507717de51a8332a34ee07fd88700be88df5bf (patch)
treed6eb5d00a798a4b1ce40c8c4c8ca74b0d22fe1df /arch/x86/kvm/vmx.c
parenta68fb48380bb993306dd62a58cbd946b4348222a (diff)
parenta66734297f78707ce39d756b656bfae861d53f62 (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 perf updates from Ingo Molnar: "This series tightens up RDPMC permissions: currently even highly sandboxed x86 execution environments (such as seccomp) have permission to execute RDPMC, which may leak various perf events / PMU state such as timing information and other CPU execution details. This 'all is allowed' RDPMC mode is still preserved as the (non-default) /sys/devices/cpu/rdpmc=2 setting. The new default is that RDPMC access is only allowed if a perf event is mmap-ed (which is needed to correctly interpret RDPMC counter values in any case). As a side effect of these changes CR4 handling is cleaned up in the x86 code and a shadow copy of the CR4 value is added. The extra CR4 manipulation adds ~ <50ns to the context switch cost between rdpmc-capable and rdpmc-non-capable mms" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86: Add /sys/devices/cpu/rdpmc=2 to allow rdpmc for all tasks perf/x86: Only allow rdpmc if a perf_event is mapped perf: Pass the event to arch_perf_update_userpage() perf: Add pmu callbacks to track event mapping and unmapping x86: Add a comment clarifying LDT context switching x86: Store a per-cpu shadow copy of CR4 x86: Clean up cr4 manipulation
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3f73bfad0349..14c1a18d206a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2871,7 +2871,7 @@ static int hardware_enable(void)
2871 u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); 2871 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2872 u64 old, test_bits; 2872 u64 old, test_bits;
2873 2873
2874 if (read_cr4() & X86_CR4_VMXE) 2874 if (cr4_read_shadow() & X86_CR4_VMXE)
2875 return -EBUSY; 2875 return -EBUSY;
2876 2876
2877 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); 2877 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
@@ -2898,7 +2898,7 @@ static int hardware_enable(void)
2898 /* enable and lock */ 2898 /* enable and lock */
2899 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); 2899 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
2900 } 2900 }
2901 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ 2901 cr4_set_bits(X86_CR4_VMXE);
2902 2902
2903 if (vmm_exclusive) { 2903 if (vmm_exclusive) {
2904 kvm_cpu_vmxon(phys_addr); 2904 kvm_cpu_vmxon(phys_addr);
@@ -2935,7 +2935,7 @@ static void hardware_disable(void)
2935 vmclear_local_loaded_vmcss(); 2935 vmclear_local_loaded_vmcss();
2936 kvm_cpu_vmxoff(); 2936 kvm_cpu_vmxoff();
2937 } 2937 }
2938 write_cr4(read_cr4() & ~X86_CR4_VMXE); 2938 cr4_clear_bits(X86_CR4_VMXE);
2939} 2939}
2940 2940
2941static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, 2941static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
@@ -4450,7 +4450,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4450 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ 4450 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
4451 4451
4452 /* Save the most likely value for this task's CR4 in the VMCS. */ 4452 /* Save the most likely value for this task's CR4 in the VMCS. */
4453 cr4 = read_cr4(); 4453 cr4 = cr4_read_shadow();
4454 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ 4454 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
4455 vmx->host_state.vmcs_host_cr4 = cr4; 4455 vmx->host_state.vmcs_host_cr4 = cr4;
4456 4456
@@ -8146,7 +8146,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
8146 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) 8146 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
8147 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); 8147 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
8148 8148
8149 cr4 = read_cr4(); 8149 cr4 = cr4_read_shadow();
8150 if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { 8150 if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
8151 vmcs_writel(HOST_CR4, cr4); 8151 vmcs_writel(HOST_CR4, cr4);
8152 vmx->host_state.vmcs_host_cr4 = cr4; 8152 vmx->host_state.vmcs_host_cr4 = cr4;