aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-02-22 10:43:17 -0500
committerIngo Molnar <mingo@kernel.org>2018-02-23 02:24:35 -0500
commitecb586bd29c99fb4de599dec388658e74388daad (patch)
tree5c309f49fee88e9690ead6beac4c9b3e70aa7d3c
parentd5028ba8ee5a18c9d0bb926d883c28b370f89009 (diff)
KVM/x86: Remove indirect MSR op calls from SPEC_CTRL
Having a paravirt indirect call in the IBRS restore path is not a good idea, since we are trying to protect from speculative execution of bogus indirect branch targets. It is also slower, so use native_wrmsrl() on the vmentry path too. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Jim Mattson <jmattson@google.com> Cc: David Woodhouse <dwmw@amazon.co.uk> Cc: KarimAllah Ahmed <karahmed@amazon.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Radim Krčmář <rkrcmar@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: kvm@vger.kernel.org Cc: stable@vger.kernel.org Fixes: d28b387fb74da95d69d2615732f50cceb38e9a4d Link: http://lkml.kernel.org/r/20180222154318.20361-2-pbonzini@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kvm/svm.c7
-rw-r--r--arch/x86/kvm/vmx.c7
2 files changed, 8 insertions, 6 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b3e488a74828..1598beeda11c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -49,6 +49,7 @@
49#include <asm/debugreg.h> 49#include <asm/debugreg.h>
50#include <asm/kvm_para.h> 50#include <asm/kvm_para.h>
51#include <asm/irq_remapping.h> 51#include <asm/irq_remapping.h>
52#include <asm/microcode.h>
52#include <asm/nospec-branch.h> 53#include <asm/nospec-branch.h>
53 54
54#include <asm/virtext.h> 55#include <asm/virtext.h>
@@ -5355,7 +5356,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5355 * being speculatively taken. 5356 * being speculatively taken.
5356 */ 5357 */
5357 if (svm->spec_ctrl) 5358 if (svm->spec_ctrl)
5358 wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); 5359 native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
5359 5360
5360 asm volatile ( 5361 asm volatile (
5361 "push %%" _ASM_BP "; \n\t" 5362 "push %%" _ASM_BP "; \n\t"
@@ -5465,10 +5466,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5465 * save it. 5466 * save it.
5466 */ 5467 */
5467 if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) 5468 if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
5468 rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); 5469 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
5469 5470
5470 if (svm->spec_ctrl) 5471 if (svm->spec_ctrl)
5471 wrmsrl(MSR_IA32_SPEC_CTRL, 0); 5472 native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
5472 5473
5473 /* Eliminate branch target predictions from guest mode */ 5474 /* Eliminate branch target predictions from guest mode */
5474 vmexit_fill_RSB(); 5475 vmexit_fill_RSB();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3dec126aa302..0927be315965 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -51,6 +51,7 @@
51#include <asm/apic.h> 51#include <asm/apic.h>
52#include <asm/irq_remapping.h> 52#include <asm/irq_remapping.h>
53#include <asm/mmu_context.h> 53#include <asm/mmu_context.h>
54#include <asm/microcode.h>
54#include <asm/nospec-branch.h> 55#include <asm/nospec-branch.h>
55 56
56#include "trace.h" 57#include "trace.h"
@@ -9452,7 +9453,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9452 * being speculatively taken. 9453 * being speculatively taken.
9453 */ 9454 */
9454 if (vmx->spec_ctrl) 9455 if (vmx->spec_ctrl)
9455 wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); 9456 native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
9456 9457
9457 vmx->__launched = vmx->loaded_vmcs->launched; 9458 vmx->__launched = vmx->loaded_vmcs->launched;
9458 asm( 9459 asm(
@@ -9588,10 +9589,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9588 * save it. 9589 * save it.
9589 */ 9590 */
9590 if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) 9591 if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
9591 rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); 9592 vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
9592 9593
9593 if (vmx->spec_ctrl) 9594 if (vmx->spec_ctrl)
9594 wrmsrl(MSR_IA32_SPEC_CTRL, 0); 9595 native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
9595 9596
9596 /* Eliminate branch target predictions from guest mode */ 9597 /* Eliminate branch target predictions from guest mode */
9597 vmexit_fill_RSB(); 9598 vmexit_fill_RSB();