aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2009-09-15 05:37:46 -0400
committerAvi Kivity <avi@redhat.com>2009-12-03 02:32:10 -0500
commit10474ae8945ce08622fd1f3464e55bd817bf2376 (patch)
treed390843b5107e600fbbf745eb24d85d745fe449f /arch/x86/kvm/vmx.c
parente8b3433a5c062e94e34cadb6144c10689a497bc3 (diff)
KVM: Activate Virtualization On Demand
X86 CPUs need to have some magic happening to enable the virtualization extensions on them. This magic can result in unpleasant results for users, like blocking other VMMs from working (vmx) or using invalid TLB entries (svm). Currently KVM activates virtualization when the respective kernel module is loaded. This blocks us from autoloading KVM modules without breaking other VMMs. To circumvent this problem at least a bit, this patch introduces on demand activation of virtualization. This means, that instead virtualization is enabled on creation of the first virtual machine and disabled on destruction of the last one. So using this, KVM can be easily autoloaded, while keeping other hypervisors usable. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 73cb5dd960cf..a187570e4837 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1138,12 +1138,15 @@ static __init int vmx_disabled_by_bios(void)
1138 /* locked but not enabled */ 1138 /* locked but not enabled */
1139} 1139}
1140 1140
1141static void hardware_enable(void *garbage) 1141static int hardware_enable(void *garbage)
1142{ 1142{
1143 int cpu = raw_smp_processor_id(); 1143 int cpu = raw_smp_processor_id();
1144 u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); 1144 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1145 u64 old; 1145 u64 old;
1146 1146
1147 if (read_cr4() & X86_CR4_VMXE)
1148 return -EBUSY;
1149
1147 INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); 1150 INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
1148 rdmsrl(MSR_IA32_FEATURE_CONTROL, old); 1151 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
1149 if ((old & (FEATURE_CONTROL_LOCKED | 1152 if ((old & (FEATURE_CONTROL_LOCKED |
@@ -1158,6 +1161,10 @@ static void hardware_enable(void *garbage)
1158 asm volatile (ASM_VMX_VMXON_RAX 1161 asm volatile (ASM_VMX_VMXON_RAX
1159 : : "a"(&phys_addr), "m"(phys_addr) 1162 : : "a"(&phys_addr), "m"(phys_addr)
1160 : "memory", "cc"); 1163 : "memory", "cc");
1164
1165 ept_sync_global();
1166
1167 return 0;
1161} 1168}
1162 1169
1163static void vmclear_local_vcpus(void) 1170static void vmclear_local_vcpus(void)
@@ -4040,8 +4047,6 @@ static int __init vmx_init(void)
4040 if (bypass_guest_pf) 4047 if (bypass_guest_pf)
4041 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); 4048 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
4042 4049
4043 ept_sync_global();
4044
4045 return 0; 4050 return 0;
4046 4051
4047out3: 4052out3: