diff options
author | Dongxiao Xu <dongxiao.xu@intel.com> | 2010-05-11 06:29:45 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-08-01 03:35:42 -0400 |
commit | b923e62e4d48bc5242b32a6ef5ba0f886137668a (patch) | |
tree | 71652285850011abeea56b6f444498cffd66fd45 /arch/x86/kvm/vmx.c | |
parent | 92fe13be74303a7b80dc3c99e22e12a87d41bd5f (diff) |
KVM: VMX: VMCLEAR/VMPTRLD usage changes
Originally VMCLEAR/VMPTRLD is called on vcpu migration. To
support hosted VMM coexistance, VMCLEAR is executed on vcpu
schedule out, and VMPTRLD is executed on vcpu schedule in.
This could also eliminate the IPI when doing VMCLEAR.
Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 7 |
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0d281dbc008f..9529bff04262 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -63,6 +63,9 @@ module_param_named(unrestricted_guest, | |||
63 | static int __read_mostly emulate_invalid_guest_state = 0; | 63 | static int __read_mostly emulate_invalid_guest_state = 0; |
64 | module_param(emulate_invalid_guest_state, bool, S_IRUGO); | 64 | module_param(emulate_invalid_guest_state, bool, S_IRUGO); |
65 | 65 | ||
66 | static int __read_mostly vmm_exclusive = 1; | ||
67 | module_param(vmm_exclusive, bool, S_IRUGO); | ||
68 | |||
66 | #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ | 69 | #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ |
67 | (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) | 70 | (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) |
68 | #define KVM_GUEST_CR0_MASK \ | 71 | #define KVM_GUEST_CR0_MASK \ |
@@ -845,7 +848,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
845 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 848 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
846 | u64 tsc_this, delta, new_offset; | 849 | u64 tsc_this, delta, new_offset; |
847 | 850 | ||
848 | if (vcpu->cpu != cpu) | 851 | if (vmm_exclusive && vcpu->cpu != cpu) |
849 | vcpu_clear(vmx); | 852 | vcpu_clear(vmx); |
850 | 853 | ||
851 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { | 854 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { |
@@ -891,6 +894,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
891 | static void vmx_vcpu_put(struct kvm_vcpu *vcpu) | 894 | static void vmx_vcpu_put(struct kvm_vcpu *vcpu) |
892 | { | 895 | { |
893 | __vmx_load_host_state(to_vmx(vcpu)); | 896 | __vmx_load_host_state(to_vmx(vcpu)); |
897 | if (!vmm_exclusive) | ||
898 | __vcpu_clear(to_vmx(vcpu)); | ||
894 | } | 899 | } |
895 | 900 | ||
896 | static void vmx_fpu_activate(struct kvm_vcpu *vcpu) | 901 | static void vmx_fpu_activate(struct kvm_vcpu *vcpu) |