diff options
-rw-r--r-- | arch/x86/kvm/vmx.c | 30 |
1 files changed, 30 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 736f83955ce9..a713c69bfcee 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -191,6 +191,8 @@ static unsigned long *vmx_io_bitmap_b; | |||
191 | static unsigned long *vmx_msr_bitmap_legacy; | 191 | static unsigned long *vmx_msr_bitmap_legacy; |
192 | static unsigned long *vmx_msr_bitmap_longmode; | 192 | static unsigned long *vmx_msr_bitmap_longmode; |
193 | 193 | ||
194 | static bool cpu_has_load_ia32_efer; | ||
195 | |||
194 | static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); | 196 | static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); |
195 | static DEFINE_SPINLOCK(vmx_vpid_lock); | 197 | static DEFINE_SPINLOCK(vmx_vpid_lock); |
196 | 198 | ||
@@ -664,6 +666,12 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) | |||
664 | unsigned i; | 666 | unsigned i; |
665 | struct msr_autoload *m = &vmx->msr_autoload; | 667 | struct msr_autoload *m = &vmx->msr_autoload; |
666 | 668 | ||
669 | if (msr == MSR_EFER && cpu_has_load_ia32_efer) { | ||
670 | vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); | ||
671 | vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); | ||
672 | return; | ||
673 | } | ||
674 | |||
667 | for (i = 0; i < m->nr; ++i) | 675 | for (i = 0; i < m->nr; ++i) |
668 | if (m->guest[i].index == msr) | 676 | if (m->guest[i].index == msr) |
669 | break; | 677 | break; |
@@ -683,6 +691,14 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, | |||
683 | unsigned i; | 691 | unsigned i; |
684 | struct msr_autoload *m = &vmx->msr_autoload; | 692 | struct msr_autoload *m = &vmx->msr_autoload; |
685 | 693 | ||
694 | if (msr == MSR_EFER && cpu_has_load_ia32_efer) { | ||
695 | vmcs_write64(GUEST_IA32_EFER, guest_val); | ||
696 | vmcs_write64(HOST_IA32_EFER, host_val); | ||
697 | vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); | ||
698 | vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); | ||
699 | return; | ||
700 | } | ||
701 | |||
686 | for (i = 0; i < m->nr; ++i) | 702 | for (i = 0; i < m->nr; ++i) |
687 | if (m->guest[i].index == msr) | 703 | if (m->guest[i].index == msr) |
688 | break; | 704 | break; |
@@ -1418,6 +1434,14 @@ static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, | |||
1418 | return 0; | 1434 | return 0; |
1419 | } | 1435 | } |
1420 | 1436 | ||
1437 | static __init bool allow_1_setting(u32 msr, u32 ctl) | ||
1438 | { | ||
1439 | u32 vmx_msr_low, vmx_msr_high; | ||
1440 | |||
1441 | rdmsr(msr, vmx_msr_low, vmx_msr_high); | ||
1442 | return vmx_msr_high & ctl; | ||
1443 | } | ||
1444 | |||
1421 | static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | 1445 | static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) |
1422 | { | 1446 | { |
1423 | u32 vmx_msr_low, vmx_msr_high; | 1447 | u32 vmx_msr_low, vmx_msr_high; |
@@ -1532,6 +1556,12 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
1532 | vmcs_conf->vmexit_ctrl = _vmexit_control; | 1556 | vmcs_conf->vmexit_ctrl = _vmexit_control; |
1533 | vmcs_conf->vmentry_ctrl = _vmentry_control; | 1557 | vmcs_conf->vmentry_ctrl = _vmentry_control; |
1534 | 1558 | ||
1559 | cpu_has_load_ia32_efer = | ||
1560 | allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, | ||
1561 | VM_ENTRY_LOAD_IA32_EFER) | ||
1562 | && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, | ||
1563 | VM_EXIT_LOAD_IA32_EFER); | ||
1564 | |||
1535 | return 0; | 1565 | return 0; |
1536 | } | 1566 | } |
1537 | 1567 | ||