diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2013-10-07 12:48:01 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2013-10-17 12:42:36 -0400 |
commit | cbbc58d4fdfab1a39a6ac1b41fcb17885952157a (patch) | |
tree | 66315f4516c953a9c1f0699d985541d84fcb7df3 /arch/powerpc/kvm | |
parent | 5587027ce9d59a57aecaa190be1c8e560aaff45d (diff) |
kvm: powerpc: book3s: Allow the HV and PR selection per virtual machine
This moves the kvmppc_ops callbacks to be a per VM entity. This
enables us to select HV and PR mode when creating a VM. We also
allow both kvm-hv and kvm-pr kernel module to be loaded. To
achieve this we move /dev/kvm ownership to kvm.ko module. Depending on
which KVM mode we select during VM creation we take a reference
count on respective module
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
[agraf: fix coding style]
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/44x.c | 7 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 89 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 18 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 25 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xics.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 22 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500mc.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 76 |
11 files changed, 178 insertions, 88 deletions
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index a765bcd74fbb..93221e87b911 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c | |||
@@ -213,16 +213,19 @@ static int __init kvmppc_44x_init(void) | |||
213 | if (r) | 213 | if (r) |
214 | goto err_out; | 214 | goto err_out; |
215 | 215 | ||
216 | r = kvm_init(&kvm_ops_44x, sizeof(struct kvmppc_vcpu_44x), | 216 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE); |
217 | 0, THIS_MODULE); | ||
218 | if (r) | 217 | if (r) |
219 | goto err_out; | 218 | goto err_out; |
219 | kvm_ops_44x.owner = THIS_MODULE; | ||
220 | kvmppc_pr_ops = &kvm_ops_44x; | ||
221 | |||
220 | err_out: | 222 | err_out: |
221 | return r; | 223 | return r; |
222 | } | 224 | } |
223 | 225 | ||
224 | static void __exit kvmppc_44x_exit(void) | 226 | static void __exit kvmppc_44x_exit(void) |
225 | { | 227 | { |
228 | kvmppc_pr_ops = NULL; | ||
226 | kvmppc_booke_exit(); | 229 | kvmppc_booke_exit(); |
227 | } | 230 | } |
228 | 231 | ||
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 130fe1d75bac..ad8f6ed3f136 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/vmalloc.h> | 34 | #include <linux/vmalloc.h> |
35 | #include <linux/highmem.h> | 35 | #include <linux/highmem.h> |
36 | 36 | ||
37 | #include "book3s.h" | ||
37 | #include "trace.h" | 38 | #include "trace.h" |
38 | 39 | ||
39 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 40 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
@@ -71,7 +72,7 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
71 | 72 | ||
72 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) | 73 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) |
73 | { | 74 | { |
74 | if (!kvmppc_ops->is_hv_enabled) | 75 | if (!vcpu->kvm->arch.kvm_ops->is_hv_enabled) |
75 | return to_book3s(vcpu)->hior; | 76 | return to_book3s(vcpu)->hior; |
76 | return 0; | 77 | return 0; |
77 | } | 78 | } |
@@ -79,7 +80,7 @@ static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) | |||
79 | static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, | 80 | static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, |
80 | unsigned long pending_now, unsigned long old_pending) | 81 | unsigned long pending_now, unsigned long old_pending) |
81 | { | 82 | { |
82 | if (kvmppc_ops->is_hv_enabled) | 83 | if (vcpu->kvm->arch.kvm_ops->is_hv_enabled) |
83 | return; | 84 | return; |
84 | if (pending_now) | 85 | if (pending_now) |
85 | vcpu->arch.shared->int_pending = 1; | 86 | vcpu->arch.shared->int_pending = 1; |
@@ -93,7 +94,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | |||
93 | ulong crit_r1; | 94 | ulong crit_r1; |
94 | bool crit; | 95 | bool crit; |
95 | 96 | ||
96 | if (kvmppc_ops->is_hv_enabled) | 97 | if (vcpu->kvm->arch.kvm_ops->is_hv_enabled) |
97 | return false; | 98 | return false; |
98 | 99 | ||
99 | crit_raw = vcpu->arch.shared->critical; | 100 | crit_raw = vcpu->arch.shared->critical; |
@@ -477,13 +478,13 @@ void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) | |||
477 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 478 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
478 | struct kvm_sregs *sregs) | 479 | struct kvm_sregs *sregs) |
479 | { | 480 | { |
480 | return kvmppc_ops->get_sregs(vcpu, sregs); | 481 | return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); |
481 | } | 482 | } |
482 | 483 | ||
483 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 484 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
484 | struct kvm_sregs *sregs) | 485 | struct kvm_sregs *sregs) |
485 | { | 486 | { |
486 | return kvmppc_ops->set_sregs(vcpu, sregs); | 487 | return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); |
487 | } | 488 | } |
488 | 489 | ||
489 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 490 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
@@ -562,7 +563,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
562 | if (size > sizeof(val)) | 563 | if (size > sizeof(val)) |
563 | return -EINVAL; | 564 | return -EINVAL; |
564 | 565 | ||
565 | r = kvmppc_ops->get_one_reg(vcpu, reg->id, &val); | 566 | r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val); |
566 | if (r == -EINVAL) { | 567 | if (r == -EINVAL) { |
567 | r = 0; | 568 | r = 0; |
568 | switch (reg->id) { | 569 | switch (reg->id) { |
@@ -641,7 +642,7 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
641 | if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) | 642 | if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) |
642 | return -EFAULT; | 643 | return -EFAULT; |
643 | 644 | ||
644 | r = kvmppc_ops->set_one_reg(vcpu, reg->id, &val); | 645 | r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val); |
645 | if (r == -EINVAL) { | 646 | if (r == -EINVAL) { |
646 | r = 0; | 647 | r = 0; |
647 | switch (reg->id) { | 648 | switch (reg->id) { |
@@ -702,23 +703,23 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
702 | 703 | ||
703 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 704 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
704 | { | 705 | { |
705 | kvmppc_ops->vcpu_load(vcpu, cpu); | 706 | vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); |
706 | } | 707 | } |
707 | 708 | ||
708 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 709 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
709 | { | 710 | { |
710 | kvmppc_ops->vcpu_put(vcpu); | 711 | vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); |
711 | } | 712 | } |
712 | 713 | ||
713 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | 714 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) |
714 | { | 715 | { |
715 | kvmppc_ops->set_msr(vcpu, msr); | 716 | vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); |
716 | } | 717 | } |
717 | EXPORT_SYMBOL_GPL(kvmppc_set_msr); | 718 | EXPORT_SYMBOL_GPL(kvmppc_set_msr); |
718 | 719 | ||
719 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 720 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
720 | { | 721 | { |
721 | return kvmppc_ops->vcpu_run(kvm_run, vcpu); | 722 | return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); |
722 | } | 723 | } |
723 | 724 | ||
724 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | 725 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
@@ -743,84 +744,84 @@ void kvmppc_decrementer_func(unsigned long data) | |||
743 | 744 | ||
744 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | 745 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) |
745 | { | 746 | { |
746 | return kvmppc_ops->vcpu_create(kvm, id); | 747 | return kvm->arch.kvm_ops->vcpu_create(kvm, id); |
747 | } | 748 | } |
748 | 749 | ||
749 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | 750 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) |
750 | { | 751 | { |
751 | kvmppc_ops->vcpu_free(vcpu); | 752 | vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); |
752 | } | 753 | } |
753 | 754 | ||
754 | int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | 755 | int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) |
755 | { | 756 | { |
756 | return kvmppc_ops->check_requests(vcpu); | 757 | return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); |
757 | } | 758 | } |
758 | 759 | ||
759 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | 760 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
760 | { | 761 | { |
761 | return kvmppc_ops->get_dirty_log(kvm, log); | 762 | return kvm->arch.kvm_ops->get_dirty_log(kvm, log); |
762 | } | 763 | } |
763 | 764 | ||
764 | void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | 765 | void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
765 | struct kvm_memory_slot *dont) | 766 | struct kvm_memory_slot *dont) |
766 | { | 767 | { |
767 | kvmppc_ops->free_memslot(free, dont); | 768 | kvm->arch.kvm_ops->free_memslot(free, dont); |
768 | } | 769 | } |
769 | 770 | ||
770 | int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | 771 | int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
771 | unsigned long npages) | 772 | unsigned long npages) |
772 | { | 773 | { |
773 | return kvmppc_ops->create_memslot(slot, npages); | 774 | return kvm->arch.kvm_ops->create_memslot(slot, npages); |
774 | } | 775 | } |
775 | 776 | ||
776 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) | 777 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) |
777 | { | 778 | { |
778 | kvmppc_ops->flush_memslot(kvm, memslot); | 779 | kvm->arch.kvm_ops->flush_memslot(kvm, memslot); |
779 | } | 780 | } |
780 | 781 | ||
781 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 782 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
782 | struct kvm_memory_slot *memslot, | 783 | struct kvm_memory_slot *memslot, |
783 | struct kvm_userspace_memory_region *mem) | 784 | struct kvm_userspace_memory_region *mem) |
784 | { | 785 | { |
785 | return kvmppc_ops->prepare_memory_region(kvm, memslot, mem); | 786 | return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); |
786 | } | 787 | } |
787 | 788 | ||
788 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | 789 | void kvmppc_core_commit_memory_region(struct kvm *kvm, |
789 | struct kvm_userspace_memory_region *mem, | 790 | struct kvm_userspace_memory_region *mem, |
790 | const struct kvm_memory_slot *old) | 791 | const struct kvm_memory_slot *old) |
791 | { | 792 | { |
792 | kvmppc_ops->commit_memory_region(kvm, mem, old); | 793 | kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old); |
793 | } | 794 | } |
794 | 795 | ||
795 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 796 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
796 | { | 797 | { |
797 | return kvmppc_ops->unmap_hva(kvm, hva); | 798 | return kvm->arch.kvm_ops->unmap_hva(kvm, hva); |
798 | } | 799 | } |
799 | EXPORT_SYMBOL_GPL(kvm_unmap_hva); | 800 | EXPORT_SYMBOL_GPL(kvm_unmap_hva); |
800 | 801 | ||
801 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | 802 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) |
802 | { | 803 | { |
803 | return kvmppc_ops->unmap_hva_range(kvm, start, end); | 804 | return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); |
804 | } | 805 | } |
805 | 806 | ||
806 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | 807 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) |
807 | { | 808 | { |
808 | return kvmppc_ops->age_hva(kvm, hva); | 809 | return kvm->arch.kvm_ops->age_hva(kvm, hva); |
809 | } | 810 | } |
810 | 811 | ||
811 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | 812 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) |
812 | { | 813 | { |
813 | return kvmppc_ops->test_age_hva(kvm, hva); | 814 | return kvm->arch.kvm_ops->test_age_hva(kvm, hva); |
814 | } | 815 | } |
815 | 816 | ||
816 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | 817 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
817 | { | 818 | { |
818 | kvmppc_ops->set_spte_hva(kvm, hva, pte); | 819 | kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); |
819 | } | 820 | } |
820 | 821 | ||
821 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 822 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
822 | { | 823 | { |
823 | kvmppc_ops->mmu_destroy(vcpu); | 824 | vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); |
824 | } | 825 | } |
825 | 826 | ||
826 | int kvmppc_core_init_vm(struct kvm *kvm) | 827 | int kvmppc_core_init_vm(struct kvm *kvm) |
@@ -831,12 +832,12 @@ int kvmppc_core_init_vm(struct kvm *kvm) | |||
831 | INIT_LIST_HEAD(&kvm->arch.rtas_tokens); | 832 | INIT_LIST_HEAD(&kvm->arch.rtas_tokens); |
832 | #endif | 833 | #endif |
833 | 834 | ||
834 | return kvmppc_ops->init_vm(kvm); | 835 | return kvm->arch.kvm_ops->init_vm(kvm); |
835 | } | 836 | } |
836 | 837 | ||
837 | void kvmppc_core_destroy_vm(struct kvm *kvm) | 838 | void kvmppc_core_destroy_vm(struct kvm *kvm) |
838 | { | 839 | { |
839 | kvmppc_ops->destroy_vm(kvm); | 840 | kvm->arch.kvm_ops->destroy_vm(kvm); |
840 | 841 | ||
841 | #ifdef CONFIG_PPC64 | 842 | #ifdef CONFIG_PPC64 |
842 | kvmppc_rtas_tokens_free(kvm); | 843 | kvmppc_rtas_tokens_free(kvm); |
@@ -846,5 +847,35 @@ void kvmppc_core_destroy_vm(struct kvm *kvm) | |||
846 | 847 | ||
847 | int kvmppc_core_check_processor_compat(void) | 848 | int kvmppc_core_check_processor_compat(void) |
848 | { | 849 | { |
849 | return kvmppc_ops->check_processor_compat(); | 850 | /* |
851 | * We always return 0 for book3s. We check | ||
852 | * for compatability while loading the HV | ||
853 | * or PR module | ||
854 | */ | ||
855 | return 0; | ||
856 | } | ||
857 | |||
858 | static int kvmppc_book3s_init(void) | ||
859 | { | ||
860 | int r; | ||
861 | |||
862 | r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | ||
863 | if (r) | ||
864 | return r; | ||
865 | #ifdef CONFIG_KVM_BOOK3S_32 | ||
866 | r = kvmppc_book3s_init_pr(); | ||
867 | #endif | ||
868 | return r; | ||
869 | |||
870 | } | ||
871 | |||
872 | static void kvmppc_book3s_exit(void) | ||
873 | { | ||
874 | #ifdef CONFIG_KVM_BOOK3S_32 | ||
875 | kvmppc_book3s_exit_pr(); | ||
876 | #endif | ||
877 | kvm_exit(); | ||
850 | } | 878 | } |
879 | |||
880 | module_init(kvmppc_book3s_init); | ||
881 | module_exit(kvmppc_book3s_exit); | ||
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h index 9e5b3a341943..4bf956cf94d6 100644 --- a/arch/powerpc/kvm/book3s.h +++ b/arch/powerpc/kvm/book3s.h | |||
@@ -28,5 +28,7 @@ extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, | |||
28 | int sprn, ulong spr_val); | 28 | int sprn, ulong spr_val); |
29 | extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, | 29 | extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, |
30 | int sprn, ulong *spr_val); | 30 | int sprn, ulong *spr_val); |
31 | extern int kvmppc_book3s_init_pr(void); | ||
32 | extern void kvmppc_book3s_exit_pr(void); | ||
31 | 33 | ||
32 | #endif | 34 | #endif |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 9e954a81c078..8743048881b7 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -2159,7 +2159,7 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp, | |||
2159 | return r; | 2159 | return r; |
2160 | } | 2160 | } |
2161 | 2161 | ||
2162 | static struct kvmppc_ops kvmppc_hv_ops = { | 2162 | static struct kvmppc_ops kvm_ops_hv = { |
2163 | .is_hv_enabled = true, | 2163 | .is_hv_enabled = true, |
2164 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, | 2164 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, |
2165 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, | 2165 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, |
@@ -2186,7 +2186,6 @@ static struct kvmppc_ops kvmppc_hv_ops = { | |||
2186 | .create_memslot = kvmppc_core_create_memslot_hv, | 2186 | .create_memslot = kvmppc_core_create_memslot_hv, |
2187 | .init_vm = kvmppc_core_init_vm_hv, | 2187 | .init_vm = kvmppc_core_init_vm_hv, |
2188 | .destroy_vm = kvmppc_core_destroy_vm_hv, | 2188 | .destroy_vm = kvmppc_core_destroy_vm_hv, |
2189 | .check_processor_compat = kvmppc_core_check_processor_compat_hv, | ||
2190 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv, | 2189 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv, |
2191 | .emulate_op = kvmppc_core_emulate_op_hv, | 2190 | .emulate_op = kvmppc_core_emulate_op_hv, |
2192 | .emulate_mtspr = kvmppc_core_emulate_mtspr_hv, | 2191 | .emulate_mtspr = kvmppc_core_emulate_mtspr_hv, |
@@ -2198,20 +2197,23 @@ static struct kvmppc_ops kvmppc_hv_ops = { | |||
2198 | static int kvmppc_book3s_init_hv(void) | 2197 | static int kvmppc_book3s_init_hv(void) |
2199 | { | 2198 | { |
2200 | int r; | 2199 | int r; |
2201 | 2200 | /* | |
2202 | r = kvm_init(&kvmppc_hv_ops, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | 2201 | * FIXME!! Do we need to check on all cpus ? |
2203 | 2202 | */ | |
2204 | if (r) | 2203 | r = kvmppc_core_check_processor_compat_hv(); |
2204 | if (r < 0) | ||
2205 | return r; | 2205 | return r; |
2206 | 2206 | ||
2207 | r = kvmppc_mmu_hv_init(); | 2207 | kvm_ops_hv.owner = THIS_MODULE; |
2208 | kvmppc_hv_ops = &kvm_ops_hv; | ||
2208 | 2209 | ||
2210 | r = kvmppc_mmu_hv_init(); | ||
2209 | return r; | 2211 | return r; |
2210 | } | 2212 | } |
2211 | 2213 | ||
2212 | static void kvmppc_book3s_exit_hv(void) | 2214 | static void kvmppc_book3s_exit_hv(void) |
2213 | { | 2215 | { |
2214 | kvm_exit(); | 2216 | kvmppc_hv_ops = NULL; |
2215 | } | 2217 | } |
2216 | 2218 | ||
2217 | module_init(kvmppc_book3s_init_hv); | 2219 | module_init(kvmppc_book3s_init_hv); |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 7f583a482161..fbd985f0cb02 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -1525,7 +1525,7 @@ static long kvm_arch_vm_ioctl_pr(struct file *filp, | |||
1525 | return -ENOTTY; | 1525 | return -ENOTTY; |
1526 | } | 1526 | } |
1527 | 1527 | ||
1528 | static struct kvmppc_ops kvmppc_pr_ops = { | 1528 | static struct kvmppc_ops kvm_ops_pr = { |
1529 | .is_hv_enabled = false, | 1529 | .is_hv_enabled = false, |
1530 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, | 1530 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, |
1531 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, | 1531 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, |
@@ -1552,7 +1552,6 @@ static struct kvmppc_ops kvmppc_pr_ops = { | |||
1552 | .create_memslot = kvmppc_core_create_memslot_pr, | 1552 | .create_memslot = kvmppc_core_create_memslot_pr, |
1553 | .init_vm = kvmppc_core_init_vm_pr, | 1553 | .init_vm = kvmppc_core_init_vm_pr, |
1554 | .destroy_vm = kvmppc_core_destroy_vm_pr, | 1554 | .destroy_vm = kvmppc_core_destroy_vm_pr, |
1555 | .check_processor_compat = kvmppc_core_check_processor_compat_pr, | ||
1556 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, | 1555 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, |
1557 | .emulate_op = kvmppc_core_emulate_op_pr, | 1556 | .emulate_op = kvmppc_core_emulate_op_pr, |
1558 | .emulate_mtspr = kvmppc_core_emulate_mtspr_pr, | 1557 | .emulate_mtspr = kvmppc_core_emulate_mtspr_pr, |
@@ -1561,27 +1560,35 @@ static struct kvmppc_ops kvmppc_pr_ops = { | |||
1561 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, | 1560 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, |
1562 | }; | 1561 | }; |
1563 | 1562 | ||
1564 | static int kvmppc_book3s_init_pr(void) | 1563 | |
1564 | int kvmppc_book3s_init_pr(void) | ||
1565 | { | 1565 | { |
1566 | int r; | 1566 | int r; |
1567 | 1567 | ||
1568 | r = kvm_init(&kvmppc_pr_ops, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | 1568 | r = kvmppc_core_check_processor_compat_pr(); |
1569 | 1569 | if (r < 0) | |
1570 | if (r) | ||
1571 | return r; | 1570 | return r; |
1572 | 1571 | ||
1573 | r = kvmppc_mmu_hpte_sysinit(); | 1572 | kvm_ops_pr.owner = THIS_MODULE; |
1573 | kvmppc_pr_ops = &kvm_ops_pr; | ||
1574 | 1574 | ||
1575 | r = kvmppc_mmu_hpte_sysinit(); | ||
1575 | return r; | 1576 | return r; |
1576 | } | 1577 | } |
1577 | 1578 | ||
1578 | static void kvmppc_book3s_exit_pr(void) | 1579 | void kvmppc_book3s_exit_pr(void) |
1579 | { | 1580 | { |
1581 | kvmppc_pr_ops = NULL; | ||
1580 | kvmppc_mmu_hpte_sysexit(); | 1582 | kvmppc_mmu_hpte_sysexit(); |
1581 | kvm_exit(); | ||
1582 | } | 1583 | } |
1583 | 1584 | ||
1585 | /* | ||
1586 | * We only support separate modules for book3s 64 | ||
1587 | */ | ||
1588 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
1589 | |||
1584 | module_init(kvmppc_book3s_init_pr); | 1590 | module_init(kvmppc_book3s_init_pr); |
1585 | module_exit(kvmppc_book3s_exit_pr); | 1591 | module_exit(kvmppc_book3s_exit_pr); |
1586 | 1592 | ||
1587 | MODULE_LICENSE("GPL"); | 1593 | MODULE_LICENSE("GPL"); |
1594 | #endif | ||
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index c3c832b27ee5..f7a5108a3483 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c | |||
@@ -818,7 +818,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) | |||
818 | } | 818 | } |
819 | 819 | ||
820 | /* Check for real mode returning too hard */ | 820 | /* Check for real mode returning too hard */ |
821 | if (xics->real_mode && kvmppc_ops->is_hv_enabled) | 821 | if (xics->real_mode && vcpu->kvm->arch.kvm_ops->is_hv_enabled) |
822 | return kvmppc_xics_rm_complete(vcpu, req); | 822 | return kvmppc_xics_rm_complete(vcpu, req); |
823 | 823 | ||
824 | switch (req) { | 824 | switch (req) { |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index cb2d986a3382..15d0149511eb 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -1472,7 +1472,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
1472 | 1472 | ||
1473 | get_sregs_base(vcpu, sregs); | 1473 | get_sregs_base(vcpu, sregs); |
1474 | get_sregs_arch206(vcpu, sregs); | 1474 | get_sregs_arch206(vcpu, sregs); |
1475 | return kvmppc_ops->get_sregs(vcpu, sregs); | 1475 | return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); |
1476 | } | 1476 | } |
1477 | 1477 | ||
1478 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 1478 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
@@ -1491,7 +1491,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
1491 | if (ret < 0) | 1491 | if (ret < 0) |
1492 | return ret; | 1492 | return ret; |
1493 | 1493 | ||
1494 | return kvmppc_ops->set_sregs(vcpu, sregs); | 1494 | return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); |
1495 | } | 1495 | } |
1496 | 1496 | ||
1497 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | 1497 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
@@ -1548,7 +1548,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
1548 | val = get_reg_val(reg->id, vcpu->arch.vrsave); | 1548 | val = get_reg_val(reg->id, vcpu->arch.vrsave); |
1549 | break; | 1549 | break; |
1550 | default: | 1550 | default: |
1551 | r = kvmppc_ops->get_one_reg(vcpu, reg->id, &val); | 1551 | r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val); |
1552 | break; | 1552 | break; |
1553 | } | 1553 | } |
1554 | 1554 | ||
@@ -1631,7 +1631,7 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
1631 | vcpu->arch.vrsave = set_reg_val(reg->id, val); | 1631 | vcpu->arch.vrsave = set_reg_val(reg->id, val); |
1632 | break; | 1632 | break; |
1633 | default: | 1633 | default: |
1634 | r = kvmppc_ops->set_one_reg(vcpu, reg->id, &val); | 1634 | r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val); |
1635 | break; | 1635 | break; |
1636 | } | 1636 | } |
1637 | 1637 | ||
@@ -1911,37 +1911,37 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) | |||
1911 | 1911 | ||
1912 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 1912 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
1913 | { | 1913 | { |
1914 | kvmppc_ops->mmu_destroy(vcpu); | 1914 | vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); |
1915 | } | 1915 | } |
1916 | 1916 | ||
1917 | int kvmppc_core_init_vm(struct kvm *kvm) | 1917 | int kvmppc_core_init_vm(struct kvm *kvm) |
1918 | { | 1918 | { |
1919 | return kvmppc_ops->init_vm(kvm); | 1919 | return kvm->arch.kvm_ops->init_vm(kvm); |
1920 | } | 1920 | } |
1921 | 1921 | ||
1922 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | 1922 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) |
1923 | { | 1923 | { |
1924 | return kvmppc_ops->vcpu_create(kvm, id); | 1924 | return kvm->arch.kvm_ops->vcpu_create(kvm, id); |
1925 | } | 1925 | } |
1926 | 1926 | ||
1927 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | 1927 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) |
1928 | { | 1928 | { |
1929 | kvmppc_ops->vcpu_free(vcpu); | 1929 | vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); |
1930 | } | 1930 | } |
1931 | 1931 | ||
1932 | void kvmppc_core_destroy_vm(struct kvm *kvm) | 1932 | void kvmppc_core_destroy_vm(struct kvm *kvm) |
1933 | { | 1933 | { |
1934 | kvmppc_ops->destroy_vm(kvm); | 1934 | kvm->arch.kvm_ops->destroy_vm(kvm); |
1935 | } | 1935 | } |
1936 | 1936 | ||
1937 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 1937 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1938 | { | 1938 | { |
1939 | kvmppc_ops->vcpu_load(vcpu, cpu); | 1939 | vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); |
1940 | } | 1940 | } |
1941 | 1941 | ||
1942 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 1942 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
1943 | { | 1943 | { |
1944 | kvmppc_ops->vcpu_put(vcpu); | 1944 | vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); |
1945 | } | 1945 | } |
1946 | 1946 | ||
1947 | int __init kvmppc_booke_init(void) | 1947 | int __init kvmppc_booke_init(void) |
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index d225d5ebddcc..497b142f651c 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -555,13 +555,19 @@ static int __init kvmppc_e500_init(void) | |||
555 | flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + | 555 | flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + |
556 | ivor[max_ivor] + handler_len); | 556 | ivor[max_ivor] + handler_len); |
557 | 557 | ||
558 | r = kvm_init(&kvm_ops_e500, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); | 558 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); |
559 | if (r) | ||
560 | goto err_out; | ||
561 | kvm_ops_e500.owner = THIS_MODULE; | ||
562 | kvmppc_pr_ops = &kvm_ops_e500; | ||
563 | |||
559 | err_out: | 564 | err_out: |
560 | return r; | 565 | return r; |
561 | } | 566 | } |
562 | 567 | ||
563 | static void __exit kvmppc_e500_exit(void) | 568 | static void __exit kvmppc_e500_exit(void) |
564 | { | 569 | { |
570 | kvmppc_pr_ops = NULL; | ||
565 | kvmppc_booke_exit(); | 571 | kvmppc_booke_exit(); |
566 | } | 572 | } |
567 | 573 | ||
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index db6a383401c7..4132cd2fc171 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c | |||
@@ -373,15 +373,19 @@ static int __init kvmppc_e500mc_init(void) | |||
373 | kvmppc_init_lpid(64); | 373 | kvmppc_init_lpid(64); |
374 | kvmppc_claim_lpid(0); /* host */ | 374 | kvmppc_claim_lpid(0); /* host */ |
375 | 375 | ||
376 | r = kvm_init(&kvm_ops_e500mc, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); | 376 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); |
377 | if (r) | 377 | if (r) |
378 | goto err_out; | 378 | goto err_out; |
379 | kvm_ops_e500mc.owner = THIS_MODULE; | ||
380 | kvmppc_pr_ops = &kvm_ops_e500mc; | ||
381 | |||
379 | err_out: | 382 | err_out: |
380 | return r; | 383 | return r; |
381 | } | 384 | } |
382 | 385 | ||
383 | static void __exit kvmppc_e500mc_exit(void) | 386 | static void __exit kvmppc_e500mc_exit(void) |
384 | { | 387 | { |
388 | kvmppc_pr_ops = NULL; | ||
385 | kvmppc_booke_exit(); | 389 | kvmppc_booke_exit(); |
386 | } | 390 | } |
387 | 391 | ||
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index de9a340d22ed..2f9a0873b44f 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -130,8 +130,8 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
130 | case SPRN_PIR: break; | 130 | case SPRN_PIR: break; |
131 | 131 | ||
132 | default: | 132 | default: |
133 | emulated = kvmppc_ops->emulate_mtspr(vcpu, sprn, | 133 | emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn, |
134 | spr_val); | 134 | spr_val); |
135 | if (emulated == EMULATE_FAIL) | 135 | if (emulated == EMULATE_FAIL) |
136 | printk(KERN_INFO "mtspr: unknown spr " | 136 | printk(KERN_INFO "mtspr: unknown spr " |
137 | "0x%x\n", sprn); | 137 | "0x%x\n", sprn); |
@@ -191,8 +191,8 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
191 | spr_val = kvmppc_get_dec(vcpu, get_tb()); | 191 | spr_val = kvmppc_get_dec(vcpu, get_tb()); |
192 | break; | 192 | break; |
193 | default: | 193 | default: |
194 | emulated = kvmppc_ops->emulate_mfspr(vcpu, sprn, | 194 | emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn, |
195 | &spr_val); | 195 | &spr_val); |
196 | if (unlikely(emulated == EMULATE_FAIL)) { | 196 | if (unlikely(emulated == EMULATE_FAIL)) { |
197 | printk(KERN_INFO "mfspr: unknown spr " | 197 | printk(KERN_INFO "mfspr: unknown spr " |
198 | "0x%x\n", sprn); | 198 | "0x%x\n", sprn); |
@@ -464,7 +464,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
464 | } | 464 | } |
465 | 465 | ||
466 | if (emulated == EMULATE_FAIL) { | 466 | if (emulated == EMULATE_FAIL) { |
467 | emulated = kvmppc_ops->emulate_op(run, vcpu, inst, &advance); | 467 | emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst, |
468 | &advance); | ||
468 | if (emulated == EMULATE_AGAIN) { | 469 | if (emulated == EMULATE_AGAIN) { |
469 | advance = 0; | 470 | advance = 0; |
470 | } else if (emulated == EMULATE_FAIL) { | 471 | } else if (emulated == EMULATE_FAIL) { |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index b103d747934a..0320c1721caa 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/file.h> | 28 | #include <linux/file.h> |
29 | #include <linux/module.h> | ||
29 | #include <asm/cputable.h> | 30 | #include <asm/cputable.h> |
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
31 | #include <asm/kvm_ppc.h> | 32 | #include <asm/kvm_ppc.h> |
@@ -39,7 +40,11 @@ | |||
39 | #define CREATE_TRACE_POINTS | 40 | #define CREATE_TRACE_POINTS |
40 | #include "trace.h" | 41 | #include "trace.h" |
41 | 42 | ||
42 | struct kvmppc_ops *kvmppc_ops; | 43 | struct kvmppc_ops *kvmppc_hv_ops; |
44 | EXPORT_SYMBOL_GPL(kvmppc_hv_ops); | ||
45 | struct kvmppc_ops *kvmppc_pr_ops; | ||
46 | EXPORT_SYMBOL_GPL(kvmppc_pr_ops); | ||
47 | |||
43 | 48 | ||
44 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 49 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
45 | { | 50 | { |
@@ -195,7 +200,7 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu) | |||
195 | goto out; | 200 | goto out; |
196 | 201 | ||
197 | /* HV KVM can only do PAPR mode for now */ | 202 | /* HV KVM can only do PAPR mode for now */ |
198 | if (!vcpu->arch.papr_enabled && kvmppc_ops->is_hv_enabled) | 203 | if (!vcpu->arch.papr_enabled && vcpu->kvm->arch.kvm_ops->is_hv_enabled) |
199 | goto out; | 204 | goto out; |
200 | 205 | ||
201 | #ifdef CONFIG_KVM_BOOKE_HV | 206 | #ifdef CONFIG_KVM_BOOKE_HV |
@@ -271,10 +276,35 @@ void kvm_arch_check_processor_compat(void *rtn) | |||
271 | 276 | ||
272 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | 277 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
273 | { | 278 | { |
274 | if (type) | 279 | struct kvmppc_ops *kvm_ops = NULL; |
275 | return -EINVAL; | 280 | /* |
276 | 281 | * if we have both HV and PR enabled, default is HV | |
282 | */ | ||
283 | if (type == 0) { | ||
284 | if (kvmppc_hv_ops) | ||
285 | kvm_ops = kvmppc_hv_ops; | ||
286 | else | ||
287 | kvm_ops = kvmppc_pr_ops; | ||
288 | if (!kvm_ops) | ||
289 | goto err_out; | ||
290 | } else if (type == KVM_VM_PPC_HV) { | ||
291 | if (!kvmppc_hv_ops) | ||
292 | goto err_out; | ||
293 | kvm_ops = kvmppc_hv_ops; | ||
294 | } else if (type == KVM_VM_PPC_PR) { | ||
295 | if (!kvmppc_pr_ops) | ||
296 | goto err_out; | ||
297 | kvm_ops = kvmppc_pr_ops; | ||
298 | } else | ||
299 | goto err_out; | ||
300 | |||
301 | if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) | ||
302 | return -ENOENT; | ||
303 | |||
304 | kvm->arch.kvm_ops = kvm_ops; | ||
277 | return kvmppc_core_init_vm(kvm); | 305 | return kvmppc_core_init_vm(kvm); |
306 | err_out: | ||
307 | return -EINVAL; | ||
278 | } | 308 | } |
279 | 309 | ||
280 | void kvm_arch_destroy_vm(struct kvm *kvm) | 310 | void kvm_arch_destroy_vm(struct kvm *kvm) |
@@ -294,6 +324,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
294 | kvmppc_core_destroy_vm(kvm); | 324 | kvmppc_core_destroy_vm(kvm); |
295 | 325 | ||
296 | mutex_unlock(&kvm->lock); | 326 | mutex_unlock(&kvm->lock); |
327 | |||
328 | /* drop the module reference */ | ||
329 | module_put(kvm->arch.kvm_ops->owner); | ||
297 | } | 330 | } |
298 | 331 | ||
299 | void kvm_arch_sync_events(struct kvm *kvm) | 332 | void kvm_arch_sync_events(struct kvm *kvm) |
@@ -303,6 +336,10 @@ void kvm_arch_sync_events(struct kvm *kvm) | |||
303 | int kvm_dev_ioctl_check_extension(long ext) | 336 | int kvm_dev_ioctl_check_extension(long ext) |
304 | { | 337 | { |
305 | int r; | 338 | int r; |
339 | /* FIXME!! | ||
340 | * Should some of this be vm ioctl ? is it possible now ? | ||
341 | */ | ||
342 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; | ||
306 | 343 | ||
307 | switch (ext) { | 344 | switch (ext) { |
308 | #ifdef CONFIG_BOOKE | 345 | #ifdef CONFIG_BOOKE |
@@ -329,7 +366,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
329 | case KVM_CAP_SW_TLB: | 366 | case KVM_CAP_SW_TLB: |
330 | #endif | 367 | #endif |
331 | /* We support this only for PR */ | 368 | /* We support this only for PR */ |
332 | r = !kvmppc_ops->is_hv_enabled; | 369 | r = !hv_enabled; |
333 | break; | 370 | break; |
334 | #ifdef CONFIG_KVM_MMIO | 371 | #ifdef CONFIG_KVM_MMIO |
335 | case KVM_CAP_COALESCED_MMIO: | 372 | case KVM_CAP_COALESCED_MMIO: |
@@ -354,13 +391,13 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
354 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 391 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
355 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 392 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
356 | case KVM_CAP_PPC_SMT: | 393 | case KVM_CAP_PPC_SMT: |
357 | if (kvmppc_ops->is_hv_enabled) | 394 | if (hv_enabled) |
358 | r = threads_per_core; | 395 | r = threads_per_core; |
359 | else | 396 | else |
360 | r = 0; | 397 | r = 0; |
361 | break; | 398 | break; |
362 | case KVM_CAP_PPC_RMA: | 399 | case KVM_CAP_PPC_RMA: |
363 | r = kvmppc_ops->is_hv_enabled; | 400 | r = hv_enabled; |
364 | /* PPC970 requires an RMA */ | 401 | /* PPC970 requires an RMA */ |
365 | if (r && cpu_has_feature(CPU_FTR_ARCH_201)) | 402 | if (r && cpu_has_feature(CPU_FTR_ARCH_201)) |
366 | r = 2; | 403 | r = 2; |
@@ -368,7 +405,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
368 | #endif | 405 | #endif |
369 | case KVM_CAP_SYNC_MMU: | 406 | case KVM_CAP_SYNC_MMU: |
370 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 407 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
371 | if (kvmppc_ops->is_hv_enabled) | 408 | if (hv_enabled) |
372 | r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; | 409 | r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; |
373 | else | 410 | else |
374 | r = 0; | 411 | r = 0; |
@@ -380,7 +417,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
380 | break; | 417 | break; |
381 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 418 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
382 | case KVM_CAP_PPC_HTAB_FD: | 419 | case KVM_CAP_PPC_HTAB_FD: |
383 | r = kvmppc_ops->is_hv_enabled; | 420 | r = hv_enabled; |
384 | break; | 421 | break; |
385 | #endif | 422 | #endif |
386 | case KVM_CAP_NR_VCPUS: | 423 | case KVM_CAP_NR_VCPUS: |
@@ -390,7 +427,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
390 | * will have secondary threads "offline"), and for other KVM | 427 | * will have secondary threads "offline"), and for other KVM |
391 | * implementations just count online CPUs. | 428 | * implementations just count online CPUs. |
392 | */ | 429 | */ |
393 | if (kvmppc_ops->is_hv_enabled) | 430 | if (hv_enabled) |
394 | r = num_present_cpus(); | 431 | r = num_present_cpus(); |
395 | else | 432 | else |
396 | r = num_online_cpus(); | 433 | r = num_online_cpus(); |
@@ -1039,9 +1076,10 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1039 | } | 1076 | } |
1040 | case KVM_PPC_GET_SMMU_INFO: { | 1077 | case KVM_PPC_GET_SMMU_INFO: { |
1041 | struct kvm_ppc_smmu_info info; | 1078 | struct kvm_ppc_smmu_info info; |
1079 | struct kvm *kvm = filp->private_data; | ||
1042 | 1080 | ||
1043 | memset(&info, 0, sizeof(info)); | 1081 | memset(&info, 0, sizeof(info)); |
1044 | r = kvmppc_ops->get_smmu_info(kvm, &info); | 1082 | r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); |
1045 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) | 1083 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
1046 | r = -EFAULT; | 1084 | r = -EFAULT; |
1047 | break; | 1085 | break; |
@@ -1052,9 +1090,10 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1052 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); | 1090 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); |
1053 | break; | 1091 | break; |
1054 | } | 1092 | } |
1055 | default: | 1093 | default: { |
1056 | r = kvmppc_ops->arch_vm_ioctl(filp, ioctl, arg); | 1094 | struct kvm *kvm = filp->private_data; |
1057 | 1095 | r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); | |
1096 | } | ||
1058 | #else /* CONFIG_PPC_BOOK3S_64 */ | 1097 | #else /* CONFIG_PPC_BOOK3S_64 */ |
1059 | default: | 1098 | default: |
1060 | r = -ENOTTY; | 1099 | r = -ENOTTY; |
@@ -1104,15 +1143,10 @@ EXPORT_SYMBOL_GPL(kvmppc_init_lpid); | |||
1104 | 1143 | ||
1105 | int kvm_arch_init(void *opaque) | 1144 | int kvm_arch_init(void *opaque) |
1106 | { | 1145 | { |
1107 | if (kvmppc_ops) { | ||
1108 | printk(KERN_ERR "kvm: already loaded the other module\n"); | ||
1109 | return -EEXIST; | ||
1110 | } | ||
1111 | kvmppc_ops = (struct kvmppc_ops *)opaque; | ||
1112 | return 0; | 1146 | return 0; |
1113 | } | 1147 | } |
1114 | 1148 | ||
1115 | void kvm_arch_exit(void) | 1149 | void kvm_arch_exit(void) |
1116 | { | 1150 | { |
1117 | kvmppc_ops = NULL; | 1151 | |
1118 | } | 1152 | } |