aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2009-09-15 05:37:46 -0400
committerAvi Kivity <avi@redhat.com>2009-12-03 02:32:10 -0500
commit10474ae8945ce08622fd1f3464e55bd817bf2376 (patch)
treed390843b5107e600fbbf745eb24d85d745fe449f
parente8b3433a5c062e94e34cadb6144c10689a497bc3 (diff)
KVM: Activate Virtualization On Demand
X86 CPUs need to have some magic happening to enable the virtualization extensions on them. This magic can result in unpleasant results for users, like blocking other VMMs from working (vmx) or using invalid TLB entries (svm). Currently KVM activates virtualization when the respective kernel module is loaded. This blocks us from autoloading KVM modules without breaking other VMMs. To circumvent this problem at least a bit, this patch introduces on demand activation of virtualization. This means, that instead virtualization is enabled on creation of the first virtual machine and disabled on destruction of the last one. So using this, KVM can be easily autoloaded, while keeping other hypervisors usable. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/ia64/kvm/kvm-ia64.c8
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/s390/kvm/kvm-s390.c3
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/svm.c13
-rw-r--r--arch/x86/kvm/vmx.c11
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--virt/kvm/kvm_main.c90
9 files changed, 108 insertions, 28 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f6471c882667..5fdeec5fddcf 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
124 124
125static DEFINE_SPINLOCK(vp_lock); 125static DEFINE_SPINLOCK(vp_lock);
126 126
127void kvm_arch_hardware_enable(void *garbage) 127int kvm_arch_hardware_enable(void *garbage)
128{ 128{
129 long status; 129 long status;
130 long tmp_base; 130 long tmp_base;
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); 137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
138 local_irq_restore(saved_psr); 138 local_irq_restore(saved_psr);
139 if (slot < 0) 139 if (slot < 0)
140 return; 140 return -EINVAL;
141 141
142 spin_lock(&vp_lock); 142 spin_lock(&vp_lock);
143 status = ia64_pal_vp_init_env(kvm_vsa_base ? 143 status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); 145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
146 if (status != 0) { 146 if (status != 0) {
147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); 147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
148 return ; 148 return -EINVAL;
149 } 149 }
150 150
151 if (!kvm_vsa_base) { 151 if (!kvm_vsa_base) {
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
154 } 154 }
155 spin_unlock(&vp_lock); 155 spin_unlock(&vp_lock);
156 ia64_ptr_entry(0x3, slot); 156 ia64_ptr_entry(0x3, slot);
157
158 return 0;
157} 159}
158 160
159void kvm_arch_hardware_disable(void *garbage) 161void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 95af62217b6b..5902bbc2411e 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -78,8 +78,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
78 return r; 78 return r;
79} 79}
80 80
81void kvm_arch_hardware_enable(void *garbage) 81int kvm_arch_hardware_enable(void *garbage)
82{ 82{
83 return 0;
83} 84}
84 85
85void kvm_arch_hardware_disable(void *garbage) 86void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 00e2ce8e91f5..544505893c9f 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -74,9 +74,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
74static unsigned long long *facilities; 74static unsigned long long *facilities;
75 75
76/* Section: not file related */ 76/* Section: not file related */
77void kvm_arch_hardware_enable(void *garbage) 77int kvm_arch_hardware_enable(void *garbage)
78{ 78{
79 /* every s390 is virtualization enabled ;-) */ 79 /* every s390 is virtualization enabled ;-) */
80 return 0;
80} 81}
81 82
82void kvm_arch_hardware_disable(void *garbage) 83void kvm_arch_hardware_disable(void *garbage)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a46e2dd9aca8..295c7c4d9c90 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -459,7 +459,7 @@ struct descriptor_table {
459struct kvm_x86_ops { 459struct kvm_x86_ops {
460 int (*cpu_has_kvm_support)(void); /* __init */ 460 int (*cpu_has_kvm_support)(void); /* __init */
461 int (*disabled_by_bios)(void); /* __init */ 461 int (*disabled_by_bios)(void); /* __init */
462 void (*hardware_enable)(void *dummy); /* __init */ 462 int (*hardware_enable)(void *dummy);
463 void (*hardware_disable)(void *dummy); 463 void (*hardware_disable)(void *dummy);
464 void (*check_processor_compatibility)(void *rtn); 464 void (*check_processor_compatibility)(void *rtn);
465 int (*hardware_setup)(void); /* __init */ 465 int (*hardware_setup)(void); /* __init */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f54c4f9d2865..59fe4d54da11 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
316 cpu_svm_disable(); 316 cpu_svm_disable();
317} 317}
318 318
319static void svm_hardware_enable(void *garbage) 319static int svm_hardware_enable(void *garbage)
320{ 320{
321 321
322 struct svm_cpu_data *svm_data; 322 struct svm_cpu_data *svm_data;
@@ -325,16 +325,20 @@ static void svm_hardware_enable(void *garbage)
325 struct desc_struct *gdt; 325 struct desc_struct *gdt;
326 int me = raw_smp_processor_id(); 326 int me = raw_smp_processor_id();
327 327
328 rdmsrl(MSR_EFER, efer);
329 if (efer & EFER_SVME)
330 return -EBUSY;
331
328 if (!has_svm()) { 332 if (!has_svm()) {
329 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me); 333 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
330 return; 334 return -EINVAL;
331 } 335 }
332 svm_data = per_cpu(svm_data, me); 336 svm_data = per_cpu(svm_data, me);
333 337
334 if (!svm_data) { 338 if (!svm_data) {
335 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n", 339 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
336 me); 340 me);
337 return; 341 return -EINVAL;
338 } 342 }
339 343
340 svm_data->asid_generation = 1; 344 svm_data->asid_generation = 1;
@@ -345,11 +349,12 @@ static void svm_hardware_enable(void *garbage)
345 gdt = (struct desc_struct *)gdt_descr.base; 349 gdt = (struct desc_struct *)gdt_descr.base;
346 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); 350 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
347 351
348 rdmsrl(MSR_EFER, efer);
349 wrmsrl(MSR_EFER, efer | EFER_SVME); 352 wrmsrl(MSR_EFER, efer | EFER_SVME);
350 353
351 wrmsrl(MSR_VM_HSAVE_PA, 354 wrmsrl(MSR_VM_HSAVE_PA,
352 page_to_pfn(svm_data->save_area) << PAGE_SHIFT); 355 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
356
357 return 0;
353} 358}
354 359
355static void svm_cpu_uninit(int cpu) 360static void svm_cpu_uninit(int cpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 73cb5dd960cf..a187570e4837 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1138,12 +1138,15 @@ static __init int vmx_disabled_by_bios(void)
1138 /* locked but not enabled */ 1138 /* locked but not enabled */
1139} 1139}
1140 1140
1141static void hardware_enable(void *garbage) 1141static int hardware_enable(void *garbage)
1142{ 1142{
1143 int cpu = raw_smp_processor_id(); 1143 int cpu = raw_smp_processor_id();
1144 u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); 1144 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1145 u64 old; 1145 u64 old;
1146 1146
1147 if (read_cr4() & X86_CR4_VMXE)
1148 return -EBUSY;
1149
1147 INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); 1150 INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
1148 rdmsrl(MSR_IA32_FEATURE_CONTROL, old); 1151 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
1149 if ((old & (FEATURE_CONTROL_LOCKED | 1152 if ((old & (FEATURE_CONTROL_LOCKED |
@@ -1158,6 +1161,10 @@ static void hardware_enable(void *garbage)
1158 asm volatile (ASM_VMX_VMXON_RAX 1161 asm volatile (ASM_VMX_VMXON_RAX
1159 : : "a"(&phys_addr), "m"(phys_addr) 1162 : : "a"(&phys_addr), "m"(phys_addr)
1160 : "memory", "cc"); 1163 : "memory", "cc");
1164
1165 ept_sync_global();
1166
1167 return 0;
1161} 1168}
1162 1169
1163static void vmclear_local_vcpus(void) 1170static void vmclear_local_vcpus(void)
@@ -4040,8 +4047,6 @@ static int __init vmx_init(void)
4040 if (bypass_guest_pf) 4047 if (bypass_guest_pf)
4041 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); 4048 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
4042 4049
4043 ept_sync_global();
4044
4045 return 0; 4050 return 0;
4046 4051
4047out3: 4052out3:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 829e3063e2ab..3d83de8bcbf4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4691,9 +4691,9 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4691 return kvm_x86_ops->vcpu_reset(vcpu); 4691 return kvm_x86_ops->vcpu_reset(vcpu);
4692} 4692}
4693 4693
4694void kvm_arch_hardware_enable(void *garbage) 4694int kvm_arch_hardware_enable(void *garbage)
4695{ 4695{
4696 kvm_x86_ops->hardware_enable(garbage); 4696 return kvm_x86_ops->hardware_enable(garbage);
4697} 4697}
4698 4698
4699void kvm_arch_hardware_disable(void *garbage) 4699void kvm_arch_hardware_disable(void *garbage)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c0a1cc35f080..b985a29d8175 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -345,7 +345,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
345void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 345void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
346 346
347int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu); 347int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
348void kvm_arch_hardware_enable(void *garbage); 348int kvm_arch_hardware_enable(void *garbage);
349void kvm_arch_hardware_disable(void *garbage); 349void kvm_arch_hardware_disable(void *garbage);
350int kvm_arch_hardware_setup(void); 350int kvm_arch_hardware_setup(void);
351void kvm_arch_hardware_unsetup(void); 351void kvm_arch_hardware_unsetup(void);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 38e4d2c34ac1..70c8cbea0a99 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -69,6 +69,8 @@ DEFINE_SPINLOCK(kvm_lock);
69LIST_HEAD(vm_list); 69LIST_HEAD(vm_list);
70 70
71static cpumask_var_t cpus_hardware_enabled; 71static cpumask_var_t cpus_hardware_enabled;
72static int kvm_usage_count = 0;
73static atomic_t hardware_enable_failed;
72 74
73struct kmem_cache *kvm_vcpu_cache; 75struct kmem_cache *kvm_vcpu_cache;
74EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 76EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
@@ -79,6 +81,8 @@ struct dentry *kvm_debugfs_dir;
79 81
80static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 82static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
81 unsigned long arg); 83 unsigned long arg);
84static int hardware_enable_all(void);
85static void hardware_disable_all(void);
82 86
83static bool kvm_rebooting; 87static bool kvm_rebooting;
84 88
@@ -339,6 +343,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
339 343
340static struct kvm *kvm_create_vm(void) 344static struct kvm *kvm_create_vm(void)
341{ 345{
346 int r = 0;
342 struct kvm *kvm = kvm_arch_create_vm(); 347 struct kvm *kvm = kvm_arch_create_vm();
343#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 348#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
344 struct page *page; 349 struct page *page;
@@ -346,6 +351,11 @@ static struct kvm *kvm_create_vm(void)
346 351
347 if (IS_ERR(kvm)) 352 if (IS_ERR(kvm))
348 goto out; 353 goto out;
354
355 r = hardware_enable_all();
356 if (r)
357 goto out_err_nodisable;
358
349#ifdef CONFIG_HAVE_KVM_IRQCHIP 359#ifdef CONFIG_HAVE_KVM_IRQCHIP
350 INIT_HLIST_HEAD(&kvm->mask_notifier_list); 360 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
351 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 361 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
@@ -354,8 +364,8 @@ static struct kvm *kvm_create_vm(void)
354#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 364#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
355 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 365 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
356 if (!page) { 366 if (!page) {
357 kfree(kvm); 367 r = -ENOMEM;
358 return ERR_PTR(-ENOMEM); 368 goto out_err;
359 } 369 }
360 kvm->coalesced_mmio_ring = 370 kvm->coalesced_mmio_ring =
361 (struct kvm_coalesced_mmio_ring *)page_address(page); 371 (struct kvm_coalesced_mmio_ring *)page_address(page);
@@ -363,15 +373,13 @@ static struct kvm *kvm_create_vm(void)
363 373
364#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 374#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
365 { 375 {
366 int err;
367 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 376 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
368 err = mmu_notifier_register(&kvm->mmu_notifier, current->mm); 377 r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
369 if (err) { 378 if (r) {
370#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 379#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
371 put_page(page); 380 put_page(page);
372#endif 381#endif
373 kfree(kvm); 382 goto out_err;
374 return ERR_PTR(err);
375 } 383 }
376 } 384 }
377#endif 385#endif
@@ -395,6 +403,12 @@ static struct kvm *kvm_create_vm(void)
395#endif 403#endif
396out: 404out:
397 return kvm; 405 return kvm;
406
407out_err:
408 hardware_disable_all();
409out_err_nodisable:
410 kfree(kvm);
411 return ERR_PTR(r);
398} 412}
399 413
400/* 414/*
@@ -453,6 +467,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
453 kvm_arch_flush_shadow(kvm); 467 kvm_arch_flush_shadow(kvm);
454#endif 468#endif
455 kvm_arch_destroy_vm(kvm); 469 kvm_arch_destroy_vm(kvm);
470 hardware_disable_all();
456 mmdrop(mm); 471 mmdrop(mm);
457} 472}
458 473
@@ -1644,11 +1659,21 @@ static struct miscdevice kvm_dev = {
1644static void hardware_enable(void *junk) 1659static void hardware_enable(void *junk)
1645{ 1660{
1646 int cpu = raw_smp_processor_id(); 1661 int cpu = raw_smp_processor_id();
1662 int r;
1647 1663
1648 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 1664 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1649 return; 1665 return;
1666
1650 cpumask_set_cpu(cpu, cpus_hardware_enabled); 1667 cpumask_set_cpu(cpu, cpus_hardware_enabled);
1651 kvm_arch_hardware_enable(NULL); 1668
1669 r = kvm_arch_hardware_enable(NULL);
1670
1671 if (r) {
1672 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
1673 atomic_inc(&hardware_enable_failed);
1674 printk(KERN_INFO "kvm: enabling virtualization on "
1675 "CPU%d failed\n", cpu);
1676 }
1652} 1677}
1653 1678
1654static void hardware_disable(void *junk) 1679static void hardware_disable(void *junk)
@@ -1661,11 +1686,52 @@ static void hardware_disable(void *junk)
1661 kvm_arch_hardware_disable(NULL); 1686 kvm_arch_hardware_disable(NULL);
1662} 1687}
1663 1688
1689static void hardware_disable_all_nolock(void)
1690{
1691 BUG_ON(!kvm_usage_count);
1692
1693 kvm_usage_count--;
1694 if (!kvm_usage_count)
1695 on_each_cpu(hardware_disable, NULL, 1);
1696}
1697
1698static void hardware_disable_all(void)
1699{
1700 spin_lock(&kvm_lock);
1701 hardware_disable_all_nolock();
1702 spin_unlock(&kvm_lock);
1703}
1704
1705static int hardware_enable_all(void)
1706{
1707 int r = 0;
1708
1709 spin_lock(&kvm_lock);
1710
1711 kvm_usage_count++;
1712 if (kvm_usage_count == 1) {
1713 atomic_set(&hardware_enable_failed, 0);
1714 on_each_cpu(hardware_enable, NULL, 1);
1715
1716 if (atomic_read(&hardware_enable_failed)) {
1717 hardware_disable_all_nolock();
1718 r = -EBUSY;
1719 }
1720 }
1721
1722 spin_unlock(&kvm_lock);
1723
1724 return r;
1725}
1726
1664static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 1727static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1665 void *v) 1728 void *v)
1666{ 1729{
1667 int cpu = (long)v; 1730 int cpu = (long)v;
1668 1731
1732 if (!kvm_usage_count)
1733 return NOTIFY_OK;
1734
1669 val &= ~CPU_TASKS_FROZEN; 1735 val &= ~CPU_TASKS_FROZEN;
1670 switch (val) { 1736 switch (val) {
1671 case CPU_DYING: 1737 case CPU_DYING:
@@ -1868,13 +1934,15 @@ static void kvm_exit_debug(void)
1868 1934
1869static int kvm_suspend(struct sys_device *dev, pm_message_t state) 1935static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1870{ 1936{
1871 hardware_disable(NULL); 1937 if (kvm_usage_count)
1938 hardware_disable(NULL);
1872 return 0; 1939 return 0;
1873} 1940}
1874 1941
1875static int kvm_resume(struct sys_device *dev) 1942static int kvm_resume(struct sys_device *dev)
1876{ 1943{
1877 hardware_enable(NULL); 1944 if (kvm_usage_count)
1945 hardware_enable(NULL);
1878 return 0; 1946 return 0;
1879} 1947}
1880 1948
@@ -1949,7 +2017,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
1949 goto out_free_1; 2017 goto out_free_1;
1950 } 2018 }
1951 2019
1952 on_each_cpu(hardware_enable, NULL, 1);
1953 r = register_cpu_notifier(&kvm_cpu_notifier); 2020 r = register_cpu_notifier(&kvm_cpu_notifier);
1954 if (r) 2021 if (r)
1955 goto out_free_2; 2022 goto out_free_2;
@@ -1999,7 +2066,6 @@ out_free_3:
1999 unregister_reboot_notifier(&kvm_reboot_notifier); 2066 unregister_reboot_notifier(&kvm_reboot_notifier);
2000 unregister_cpu_notifier(&kvm_cpu_notifier); 2067 unregister_cpu_notifier(&kvm_cpu_notifier);
2001out_free_2: 2068out_free_2:
2002 on_each_cpu(hardware_disable, NULL, 1);
2003out_free_1: 2069out_free_1:
2004 kvm_arch_hardware_unsetup(); 2070 kvm_arch_hardware_unsetup();
2005out_free_0a: 2071out_free_0a: