aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2010-11-09 11:02:49 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:29:09 -0500
commitd89f5eff70a31237ffa1e21c51d23ca532110aea (patch)
tree13b47648a564d8382e08d7e5937ea30ff0fb838c /arch/ia64
parent9d893c6bc177b6ac5a1e937f4fdc359d272d68ff (diff)
KVM: Clean up vm creation and release
IA64 support forces us to abstract the allocation of the kvm structure. But instead of mixing this up with arch-specific initialization and doing the same on destruction, split both steps. This allows to move generic destruction calls into generic code. It also fixes error clean-up on failures of kvm_create_vm for IA64. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/kvm_host.h4
-rw-r--r--arch/ia64/kvm/kvm-ia64.c28
2 files changed, 11 insertions, 21 deletions
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 2f229e5de498..2689ee54a1c9 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -590,6 +590,10 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu);
590int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); 590int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
591void kvm_sal_emul(struct kvm_vcpu *vcpu); 591void kvm_sal_emul(struct kvm_vcpu *vcpu);
592 592
593#define __KVM_HAVE_ARCH_VM_ALLOC 1
594struct kvm *kvm_arch_alloc_vm(void);
595void kvm_arch_free_vm(struct kvm *kvm);
596
593#endif /* __ASSEMBLY__*/ 597#endif /* __ASSEMBLY__*/
594 598
595#endif 599#endif
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f56a6316e134..48a48bdc59c3 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -749,7 +749,7 @@ out:
749 return r; 749 return r;
750} 750}
751 751
752static struct kvm *kvm_alloc_kvm(void) 752struct kvm *kvm_arch_alloc_vm(void)
753{ 753{
754 754
755 struct kvm *kvm; 755 struct kvm *kvm;
@@ -760,7 +760,7 @@ static struct kvm *kvm_alloc_kvm(void)
760 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); 760 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
761 761
762 if (!vm_base) 762 if (!vm_base)
763 return ERR_PTR(-ENOMEM); 763 return NULL;
764 764
765 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); 765 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
766 kvm = (struct kvm *)(vm_base + 766 kvm = (struct kvm *)(vm_base +
@@ -806,10 +806,12 @@ static void kvm_build_io_pmt(struct kvm *kvm)
806#define GUEST_PHYSICAL_RR4 0x2739 806#define GUEST_PHYSICAL_RR4 0x2739
807#define VMM_INIT_RR 0x1660 807#define VMM_INIT_RR 0x1660
808 808
809static void kvm_init_vm(struct kvm *kvm) 809int kvm_arch_init_vm(struct kvm *kvm)
810{ 810{
811 BUG_ON(!kvm); 811 BUG_ON(!kvm);
812 812
813 kvm->arch.is_sn2 = ia64_platform_is("sn2");
814
813 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; 815 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
814 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; 816 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
815 kvm->arch.vmm_init_rr = VMM_INIT_RR; 817 kvm->arch.vmm_init_rr = VMM_INIT_RR;
@@ -823,21 +825,8 @@ static void kvm_init_vm(struct kvm *kvm)
823 825
824 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 826 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
825 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 827 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
826}
827
828struct kvm *kvm_arch_create_vm(void)
829{
830 struct kvm *kvm = kvm_alloc_kvm();
831
832 if (IS_ERR(kvm))
833 return ERR_PTR(-ENOMEM);
834
835 kvm->arch.is_sn2 = ia64_platform_is("sn2");
836
837 kvm_init_vm(kvm);
838
839 return kvm;
840 828
829 return 0;
841} 830}
842 831
843static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, 832static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
@@ -1357,7 +1346,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1357 return -EINVAL; 1346 return -EINVAL;
1358} 1347}
1359 1348
1360static void free_kvm(struct kvm *kvm) 1349void kvm_arch_free_vm(struct kvm *kvm)
1361{ 1350{
1362 unsigned long vm_base = kvm->arch.vm_base; 1351 unsigned long vm_base = kvm->arch.vm_base;
1363 1352
@@ -1399,9 +1388,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
1399#endif 1388#endif
1400 kfree(kvm->arch.vioapic); 1389 kfree(kvm->arch.vioapic);
1401 kvm_release_vm_pages(kvm); 1390 kvm_release_vm_pages(kvm);
1402 kvm_free_physmem(kvm);
1403 cleanup_srcu_struct(&kvm->srcu);
1404 free_kvm(kvm);
1405} 1391}
1406 1392
1407void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1393void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)