aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorAmit Shah <amit.shah@redhat.com>2009-02-20 12:23:37 -0500
committerAvi Kivity <avi@redhat.com>2009-03-24 05:03:13 -0400
commit401d10dee083bda281f2fdcdf654080313ba30ec (patch)
treec70fc703853ae2f14c8f6e1c79d17f41ed1ab90f /arch/x86/kvm/vmx.c
parent6b08035f3e64d8e474be166d682b52c95941662e (diff)
KVM: VMX: Update necessary state when guest enters long mode
setup_msrs() should be called when entering long mode to save the shadow state for the 64-bit guest state. Using vmx_set_efer() in enter_lmode() removes some duplicated code and also ensures we call setup_msrs(). We can safely pass the value of shadow_efer to vmx_set_efer() as no other bits in the efer change while enabling long mode (guest first sets EFER.LME, then sets CR0.PG which causes a vmexit where we activate long mode). With this fix, is_long_mode() can check for EFER.LMA set instead of EFER.LME and 5e23049e86dd298b72e206b420513dbc3a240cd9 can be reverted. Signed-off-by: Amit Shah <amit.shah@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c54
1 files changed, 24 insertions, 30 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index cb27ffccf466..48063a0aa243 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1430,6 +1430,29 @@ continue_rmode:
1430 init_rmode(vcpu->kvm); 1430 init_rmode(vcpu->kvm);
1431} 1431}
1432 1432
1433static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1434{
1435 struct vcpu_vmx *vmx = to_vmx(vcpu);
1436 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1437
1438 vcpu->arch.shadow_efer = efer;
1439 if (!msr)
1440 return;
1441 if (efer & EFER_LMA) {
1442 vmcs_write32(VM_ENTRY_CONTROLS,
1443 vmcs_read32(VM_ENTRY_CONTROLS) |
1444 VM_ENTRY_IA32E_MODE);
1445 msr->data = efer;
1446 } else {
1447 vmcs_write32(VM_ENTRY_CONTROLS,
1448 vmcs_read32(VM_ENTRY_CONTROLS) &
1449 ~VM_ENTRY_IA32E_MODE);
1450
1451 msr->data = efer & ~EFER_LME;
1452 }
1453 setup_msrs(vmx);
1454}
1455
1433#ifdef CONFIG_X86_64 1456#ifdef CONFIG_X86_64
1434 1457
1435static void enter_lmode(struct kvm_vcpu *vcpu) 1458static void enter_lmode(struct kvm_vcpu *vcpu)
@@ -1444,13 +1467,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
1444 (guest_tr_ar & ~AR_TYPE_MASK) 1467 (guest_tr_ar & ~AR_TYPE_MASK)
1445 | AR_TYPE_BUSY_64_TSS); 1468 | AR_TYPE_BUSY_64_TSS);
1446 } 1469 }
1447
1448 vcpu->arch.shadow_efer |= EFER_LMA; 1470 vcpu->arch.shadow_efer |= EFER_LMA;
1449 1471 vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
1450 find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
1451 vmcs_write32(VM_ENTRY_CONTROLS,
1452 vmcs_read32(VM_ENTRY_CONTROLS)
1453 | VM_ENTRY_IA32E_MODE);
1454} 1472}
1455 1473
1456static void exit_lmode(struct kvm_vcpu *vcpu) 1474static void exit_lmode(struct kvm_vcpu *vcpu)
@@ -1609,30 +1627,6 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1609 vmcs_writel(GUEST_CR4, hw_cr4); 1627 vmcs_writel(GUEST_CR4, hw_cr4);
1610} 1628}
1611 1629
1612static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1613{
1614 struct vcpu_vmx *vmx = to_vmx(vcpu);
1615 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1616
1617 vcpu->arch.shadow_efer = efer;
1618 if (!msr)
1619 return;
1620 if (efer & EFER_LMA) {
1621 vmcs_write32(VM_ENTRY_CONTROLS,
1622 vmcs_read32(VM_ENTRY_CONTROLS) |
1623 VM_ENTRY_IA32E_MODE);
1624 msr->data = efer;
1625
1626 } else {
1627 vmcs_write32(VM_ENTRY_CONTROLS,
1628 vmcs_read32(VM_ENTRY_CONTROLS) &
1629 ~VM_ENTRY_IA32E_MODE);
1630
1631 msr->data = efer & ~EFER_LME;
1632 }
1633 setup_msrs(vmx);
1634}
1635
1636static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) 1630static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1637{ 1631{
1638 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 1632 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];