aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-04-02 11:10:47 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2019-04-16 09:37:37 -0400
commit8f4dc2e77cdfaf7e644ef29693fa229db29ee1de (patch)
treeadd5a9e7f4fcef67fffaf8f40358d20b114c2419
parent9ec19493fb86d6d5fbf9286b94ff21e56ef66376 (diff)
KVM: x86: Don't clear EFER during SMM transitions for 32-bit vCPU
Neither AMD nor Intel CPUs have an EFER field in the legacy SMRAM save state area, i.e. don't save/restore EFER across SMM transitions. KVM somewhat models this, e.g. doesn't clear EFER on entry to SMM if the guest doesn't support long mode. But during RSM, KVM unconditionally clears EFER so that it can get back to pure 32-bit mode in order to start loading CRs with their actual non-SMM values. Clear EFER only when it will be written when loading the non-SMM state so as to preserve bits that can theoretically be set on 32-bit vCPUs, e.g. KVM always emulates EFER_SCE. And because CR4.PAE is cleared only to play nice with EFER, wrap that code in the long mode check as well. Note, this may result in a compiler warning about cr4 being consumed uninitialized. Re-read CR4 even though it's technically unnecessary, as doing so allows for more readable code and RSM emulation is not a performance critical path. Fixes: 660a5d517aaab ("KVM: x86: save/load state on SMM switch") Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/emulate.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index f526acee2eed..f3284827c432 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2582,15 +2582,13 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
2582 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU 2582 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2583 * supports long mode. 2583 * supports long mode.
2584 */ 2584 */
2585 cr4 = ctxt->ops->get_cr(ctxt, 4);
2586 if (emulator_has_longmode(ctxt)) { 2585 if (emulator_has_longmode(ctxt)) {
2587 struct desc_struct cs_desc; 2586 struct desc_struct cs_desc;
2588 2587
2589 /* Zero CR4.PCIDE before CR0.PG. */ 2588 /* Zero CR4.PCIDE before CR0.PG. */
2590 if (cr4 & X86_CR4_PCIDE) { 2589 cr4 = ctxt->ops->get_cr(ctxt, 4);
2590 if (cr4 & X86_CR4_PCIDE)
2591 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); 2591 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2592 cr4 &= ~X86_CR4_PCIDE;
2593 }
2594 2592
2595 /* A 32-bit code segment is required to clear EFER.LMA. */ 2593 /* A 32-bit code segment is required to clear EFER.LMA. */
2596 memset(&cs_desc, 0, sizeof(cs_desc)); 2594 memset(&cs_desc, 0, sizeof(cs_desc));
@@ -2604,13 +2602,16 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
2604 if (cr0 & X86_CR0_PE) 2602 if (cr0 & X86_CR0_PE)
2605 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); 2603 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2606 2604
2607 /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */ 2605 if (emulator_has_longmode(ctxt)) {
2608 if (cr4 & X86_CR4_PAE) 2606 /* Clear CR4.PAE before clearing EFER.LME. */
2609 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); 2607 cr4 = ctxt->ops->get_cr(ctxt, 4);
2608 if (cr4 & X86_CR4_PAE)
2609 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2610 2610
2611 /* And finally go back to 32-bit mode. */ 2611 /* And finally go back to 32-bit mode. */
2612 efer = 0; 2612 efer = 0;
2613 ctxt->ops->set_msr(ctxt, MSR_EFER, efer); 2613 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2614 }
2614 2615
2615 /* 2616 /*
2616 * Give pre_leave_smm() a chance to make ISA-specific changes to the 2617 * Give pre_leave_smm() a chance to make ISA-specific changes to the