diff options
Diffstat (limited to 'arch/powerpc/kvm')
39 files changed, 2864 insertions, 1293 deletions
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 2f5c6b6d6877..93221e87b911 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c | |||
@@ -31,13 +31,13 @@ | |||
31 | #include "44x_tlb.h" | 31 | #include "44x_tlb.h" |
32 | #include "booke.h" | 32 | #include "booke.h" |
33 | 33 | ||
34 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 34 | static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu) |
35 | { | 35 | { |
36 | kvmppc_booke_vcpu_load(vcpu, cpu); | 36 | kvmppc_booke_vcpu_load(vcpu, cpu); |
37 | kvmppc_44x_tlb_load(vcpu); | 37 | kvmppc_44x_tlb_load(vcpu); |
38 | } | 38 | } |
39 | 39 | ||
40 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 40 | static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu) |
41 | { | 41 | { |
42 | kvmppc_44x_tlb_put(vcpu); | 42 | kvmppc_44x_tlb_put(vcpu); |
43 | kvmppc_booke_vcpu_put(vcpu); | 43 | kvmppc_booke_vcpu_put(vcpu); |
@@ -114,29 +114,32 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | |||
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
116 | 116 | ||
117 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 117 | static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu, |
118 | struct kvm_sregs *sregs) | ||
118 | { | 119 | { |
119 | kvmppc_get_sregs_ivor(vcpu, sregs); | 120 | return kvmppc_get_sregs_ivor(vcpu, sregs); |
120 | } | 121 | } |
121 | 122 | ||
122 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 123 | static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu, |
124 | struct kvm_sregs *sregs) | ||
123 | { | 125 | { |
124 | return kvmppc_set_sregs_ivor(vcpu, sregs); | 126 | return kvmppc_set_sregs_ivor(vcpu, sregs); |
125 | } | 127 | } |
126 | 128 | ||
127 | int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, | 129 | static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id, |
128 | union kvmppc_one_reg *val) | 130 | union kvmppc_one_reg *val) |
129 | { | 131 | { |
130 | return -EINVAL; | 132 | return -EINVAL; |
131 | } | 133 | } |
132 | 134 | ||
133 | int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, | 135 | static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id, |
134 | union kvmppc_one_reg *val) | 136 | union kvmppc_one_reg *val) |
135 | { | 137 | { |
136 | return -EINVAL; | 138 | return -EINVAL; |
137 | } | 139 | } |
138 | 140 | ||
139 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | 141 | static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm, |
142 | unsigned int id) | ||
140 | { | 143 | { |
141 | struct kvmppc_vcpu_44x *vcpu_44x; | 144 | struct kvmppc_vcpu_44x *vcpu_44x; |
142 | struct kvm_vcpu *vcpu; | 145 | struct kvm_vcpu *vcpu; |
@@ -167,7 +170,7 @@ out: | |||
167 | return ERR_PTR(err); | 170 | return ERR_PTR(err); |
168 | } | 171 | } |
169 | 172 | ||
170 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | 173 | static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu) |
171 | { | 174 | { |
172 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | 175 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
173 | 176 | ||
@@ -176,28 +179,53 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
176 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | 179 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); |
177 | } | 180 | } |
178 | 181 | ||
179 | int kvmppc_core_init_vm(struct kvm *kvm) | 182 | static int kvmppc_core_init_vm_44x(struct kvm *kvm) |
180 | { | 183 | { |
181 | return 0; | 184 | return 0; |
182 | } | 185 | } |
183 | 186 | ||
184 | void kvmppc_core_destroy_vm(struct kvm *kvm) | 187 | static void kvmppc_core_destroy_vm_44x(struct kvm *kvm) |
185 | { | 188 | { |
186 | } | 189 | } |
187 | 190 | ||
191 | static struct kvmppc_ops kvm_ops_44x = { | ||
192 | .get_sregs = kvmppc_core_get_sregs_44x, | ||
193 | .set_sregs = kvmppc_core_set_sregs_44x, | ||
194 | .get_one_reg = kvmppc_get_one_reg_44x, | ||
195 | .set_one_reg = kvmppc_set_one_reg_44x, | ||
196 | .vcpu_load = kvmppc_core_vcpu_load_44x, | ||
197 | .vcpu_put = kvmppc_core_vcpu_put_44x, | ||
198 | .vcpu_create = kvmppc_core_vcpu_create_44x, | ||
199 | .vcpu_free = kvmppc_core_vcpu_free_44x, | ||
200 | .mmu_destroy = kvmppc_mmu_destroy_44x, | ||
201 | .init_vm = kvmppc_core_init_vm_44x, | ||
202 | .destroy_vm = kvmppc_core_destroy_vm_44x, | ||
203 | .emulate_op = kvmppc_core_emulate_op_44x, | ||
204 | .emulate_mtspr = kvmppc_core_emulate_mtspr_44x, | ||
205 | .emulate_mfspr = kvmppc_core_emulate_mfspr_44x, | ||
206 | }; | ||
207 | |||
188 | static int __init kvmppc_44x_init(void) | 208 | static int __init kvmppc_44x_init(void) |
189 | { | 209 | { |
190 | int r; | 210 | int r; |
191 | 211 | ||
192 | r = kvmppc_booke_init(); | 212 | r = kvmppc_booke_init(); |
193 | if (r) | 213 | if (r) |
194 | return r; | 214 | goto err_out; |
215 | |||
216 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE); | ||
217 | if (r) | ||
218 | goto err_out; | ||
219 | kvm_ops_44x.owner = THIS_MODULE; | ||
220 | kvmppc_pr_ops = &kvm_ops_44x; | ||
195 | 221 | ||
196 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE); | 222 | err_out: |
223 | return r; | ||
197 | } | 224 | } |
198 | 225 | ||
199 | static void __exit kvmppc_44x_exit(void) | 226 | static void __exit kvmppc_44x_exit(void) |
200 | { | 227 | { |
228 | kvmppc_pr_ops = NULL; | ||
201 | kvmppc_booke_exit(); | 229 | kvmppc_booke_exit(); |
202 | } | 230 | } |
203 | 231 | ||
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 35ec0a8547da..92c9ab4bcfec 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c | |||
@@ -91,8 +91,8 @@ static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn) | |||
91 | return EMULATE_DONE; | 91 | return EMULATE_DONE; |
92 | } | 92 | } |
93 | 93 | ||
94 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 94 | int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu, |
95 | unsigned int inst, int *advance) | 95 | unsigned int inst, int *advance) |
96 | { | 96 | { |
97 | int emulated = EMULATE_DONE; | 97 | int emulated = EMULATE_DONE; |
98 | int dcrn = get_dcrn(inst); | 98 | int dcrn = get_dcrn(inst); |
@@ -152,7 +152,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
152 | return emulated; | 152 | return emulated; |
153 | } | 153 | } |
154 | 154 | ||
155 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | 155 | int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
156 | { | 156 | { |
157 | int emulated = EMULATE_DONE; | 157 | int emulated = EMULATE_DONE; |
158 | 158 | ||
@@ -172,7 +172,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | |||
172 | return emulated; | 172 | return emulated; |
173 | } | 173 | } |
174 | 174 | ||
175 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) | 175 | int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
176 | { | 176 | { |
177 | int emulated = EMULATE_DONE; | 177 | int emulated = EMULATE_DONE; |
178 | 178 | ||
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index ed0385448148..0deef1082e02 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -268,7 +268,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, | |||
268 | trace_kvm_stlb_inval(stlb_index); | 268 | trace_kvm_stlb_inval(stlb_index); |
269 | } | 269 | } |
270 | 270 | ||
271 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 271 | void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu) |
272 | { | 272 | { |
273 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | 273 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
274 | int i; | 274 | int i; |
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index ffaef2cb101a..8aeeda1ff42a 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -34,17 +34,20 @@ config KVM_BOOK3S_64_HANDLER | |||
34 | bool | 34 | bool |
35 | select KVM_BOOK3S_HANDLER | 35 | select KVM_BOOK3S_HANDLER |
36 | 36 | ||
37 | config KVM_BOOK3S_PR | 37 | config KVM_BOOK3S_PR_POSSIBLE |
38 | bool | 38 | bool |
39 | select KVM_MMIO | 39 | select KVM_MMIO |
40 | select MMU_NOTIFIER | 40 | select MMU_NOTIFIER |
41 | 41 | ||
42 | config KVM_BOOK3S_HV_POSSIBLE | ||
43 | bool | ||
44 | |||
42 | config KVM_BOOK3S_32 | 45 | config KVM_BOOK3S_32 |
43 | tristate "KVM support for PowerPC book3s_32 processors" | 46 | tristate "KVM support for PowerPC book3s_32 processors" |
44 | depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT | 47 | depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT |
45 | select KVM | 48 | select KVM |
46 | select KVM_BOOK3S_32_HANDLER | 49 | select KVM_BOOK3S_32_HANDLER |
47 | select KVM_BOOK3S_PR | 50 | select KVM_BOOK3S_PR_POSSIBLE |
48 | ---help--- | 51 | ---help--- |
49 | Support running unmodified book3s_32 guest kernels | 52 | Support running unmodified book3s_32 guest kernels |
50 | in virtual machines on book3s_32 host processors. | 53 | in virtual machines on book3s_32 host processors. |
@@ -59,6 +62,7 @@ config KVM_BOOK3S_64 | |||
59 | depends on PPC_BOOK3S_64 | 62 | depends on PPC_BOOK3S_64 |
60 | select KVM_BOOK3S_64_HANDLER | 63 | select KVM_BOOK3S_64_HANDLER |
61 | select KVM | 64 | select KVM |
65 | select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE | ||
62 | ---help--- | 66 | ---help--- |
63 | Support running unmodified book3s_64 and book3s_32 guest kernels | 67 | Support running unmodified book3s_64 and book3s_32 guest kernels |
64 | in virtual machines on book3s_64 host processors. | 68 | in virtual machines on book3s_64 host processors. |
@@ -69,8 +73,9 @@ config KVM_BOOK3S_64 | |||
69 | If unsure, say N. | 73 | If unsure, say N. |
70 | 74 | ||
71 | config KVM_BOOK3S_64_HV | 75 | config KVM_BOOK3S_64_HV |
72 | bool "KVM support for POWER7 and PPC970 using hypervisor mode in host" | 76 | tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host" |
73 | depends on KVM_BOOK3S_64 | 77 | depends on KVM_BOOK3S_64 |
78 | select KVM_BOOK3S_HV_POSSIBLE | ||
74 | select MMU_NOTIFIER | 79 | select MMU_NOTIFIER |
75 | select CMA | 80 | select CMA |
76 | ---help--- | 81 | ---help--- |
@@ -89,9 +94,20 @@ config KVM_BOOK3S_64_HV | |||
89 | If unsure, say N. | 94 | If unsure, say N. |
90 | 95 | ||
91 | config KVM_BOOK3S_64_PR | 96 | config KVM_BOOK3S_64_PR |
92 | def_bool y | 97 | tristate "KVM support without using hypervisor mode in host" |
93 | depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV | 98 | depends on KVM_BOOK3S_64 |
94 | select KVM_BOOK3S_PR | 99 | select KVM_BOOK3S_PR_POSSIBLE |
100 | ---help--- | ||
101 | Support running guest kernels in virtual machines on processors | ||
102 | without using hypervisor mode in the host, by running the | ||
103 | guest in user mode (problem state) and emulating all | ||
104 | privileged instructions and registers. | ||
105 | |||
106 | This is not as fast as using hypervisor mode, but works on | ||
107 | machines where hypervisor mode is not available or not usable, | ||
108 | and can emulate processors that are different from the host | ||
109 | processor, including emulating 32-bit processors on a 64-bit | ||
110 | host. | ||
95 | 111 | ||
96 | config KVM_BOOKE_HV | 112 | config KVM_BOOKE_HV |
97 | bool | 113 | bool |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 6646c952c5e3..ce569b6bf4d8 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -53,41 +53,51 @@ kvm-e500mc-objs := \ | |||
53 | e500_emulate.o | 53 | e500_emulate.o |
54 | kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) | 54 | kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) |
55 | 55 | ||
56 | kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ | 56 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \ |
57 | $(KVM)/coalesced_mmio.o \ | 57 | book3s_64_vio_hv.o |
58 | |||
59 | kvm-pr-y := \ | ||
58 | fpu.o \ | 60 | fpu.o \ |
59 | book3s_paired_singles.o \ | 61 | book3s_paired_singles.o \ |
60 | book3s_pr.o \ | 62 | book3s_pr.o \ |
61 | book3s_pr_papr.o \ | 63 | book3s_pr_papr.o \ |
62 | book3s_64_vio_hv.o \ | ||
63 | book3s_emulate.o \ | 64 | book3s_emulate.o \ |
64 | book3s_interrupts.o \ | 65 | book3s_interrupts.o \ |
65 | book3s_mmu_hpte.o \ | 66 | book3s_mmu_hpte.o \ |
66 | book3s_64_mmu_host.o \ | 67 | book3s_64_mmu_host.o \ |
67 | book3s_64_mmu.o \ | 68 | book3s_64_mmu.o \ |
68 | book3s_32_mmu.o | 69 | book3s_32_mmu.o |
69 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ | 70 | |
71 | ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | ||
72 | kvm-book3s_64-module-objs := \ | ||
73 | $(KVM)/coalesced_mmio.o | ||
74 | |||
75 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ | ||
70 | book3s_rmhandlers.o | 76 | book3s_rmhandlers.o |
77 | endif | ||
71 | 78 | ||
72 | kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ | 79 | kvm-hv-y += \ |
73 | book3s_hv.o \ | 80 | book3s_hv.o \ |
74 | book3s_hv_interrupts.o \ | 81 | book3s_hv_interrupts.o \ |
75 | book3s_64_mmu_hv.o | 82 | book3s_64_mmu_hv.o |
83 | |||
76 | kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \ | 84 | kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \ |
77 | book3s_hv_rm_xics.o | 85 | book3s_hv_rm_xics.o |
78 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ | 86 | |
87 | ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | ||
88 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ | ||
79 | book3s_hv_rmhandlers.o \ | 89 | book3s_hv_rmhandlers.o \ |
80 | book3s_hv_rm_mmu.o \ | 90 | book3s_hv_rm_mmu.o \ |
81 | book3s_64_vio_hv.o \ | ||
82 | book3s_hv_ras.o \ | 91 | book3s_hv_ras.o \ |
83 | book3s_hv_builtin.o \ | 92 | book3s_hv_builtin.o \ |
84 | book3s_hv_cma.o \ | 93 | book3s_hv_cma.o \ |
85 | $(kvm-book3s_64-builtin-xics-objs-y) | 94 | $(kvm-book3s_64-builtin-xics-objs-y) |
95 | endif | ||
86 | 96 | ||
87 | kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ | 97 | kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ |
88 | book3s_xics.o | 98 | book3s_xics.o |
89 | 99 | ||
90 | kvm-book3s_64-module-objs := \ | 100 | kvm-book3s_64-module-objs += \ |
91 | $(KVM)/kvm_main.o \ | 101 | $(KVM)/kvm_main.o \ |
92 | $(KVM)/eventfd.o \ | 102 | $(KVM)/eventfd.o \ |
93 | powerpc.o \ | 103 | powerpc.o \ |
@@ -123,4 +133,7 @@ obj-$(CONFIG_KVM_E500MC) += kvm.o | |||
123 | obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o | 133 | obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o |
124 | obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o | 134 | obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o |
125 | 135 | ||
136 | obj-$(CONFIG_KVM_BOOK3S_64_PR) += kvm-pr.o | ||
137 | obj-$(CONFIG_KVM_BOOK3S_64_HV) += kvm-hv.o | ||
138 | |||
126 | obj-y += $(kvm-book3s_64-builtin-objs-y) | 139 | obj-y += $(kvm-book3s_64-builtin-objs-y) |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 700df6f1d32c..8912608b7e1b 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/vmalloc.h> | 34 | #include <linux/vmalloc.h> |
35 | #include <linux/highmem.h> | 35 | #include <linux/highmem.h> |
36 | 36 | ||
37 | #include "book3s.h" | ||
37 | #include "trace.h" | 38 | #include "trace.h" |
38 | 39 | ||
39 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 40 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
@@ -69,6 +70,50 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
69 | { | 70 | { |
70 | } | 71 | } |
71 | 72 | ||
73 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) | ||
74 | { | ||
75 | if (!is_kvmppc_hv_enabled(vcpu->kvm)) | ||
76 | return to_book3s(vcpu)->hior; | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, | ||
81 | unsigned long pending_now, unsigned long old_pending) | ||
82 | { | ||
83 | if (is_kvmppc_hv_enabled(vcpu->kvm)) | ||
84 | return; | ||
85 | if (pending_now) | ||
86 | vcpu->arch.shared->int_pending = 1; | ||
87 | else if (old_pending) | ||
88 | vcpu->arch.shared->int_pending = 0; | ||
89 | } | ||
90 | |||
91 | static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | ||
92 | { | ||
93 | ulong crit_raw; | ||
94 | ulong crit_r1; | ||
95 | bool crit; | ||
96 | |||
97 | if (is_kvmppc_hv_enabled(vcpu->kvm)) | ||
98 | return false; | ||
99 | |||
100 | crit_raw = vcpu->arch.shared->critical; | ||
101 | crit_r1 = kvmppc_get_gpr(vcpu, 1); | ||
102 | |||
103 | /* Truncate crit indicators in 32 bit mode */ | ||
104 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | ||
105 | crit_raw &= 0xffffffff; | ||
106 | crit_r1 &= 0xffffffff; | ||
107 | } | ||
108 | |||
109 | /* Critical section when crit == r1 */ | ||
110 | crit = (crit_raw == crit_r1); | ||
111 | /* ... and we're in supervisor mode */ | ||
112 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | ||
113 | |||
114 | return crit; | ||
115 | } | ||
116 | |||
72 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | 117 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) |
73 | { | 118 | { |
74 | vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); | 119 | vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); |
@@ -126,28 +171,32 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | |||
126 | printk(KERN_INFO "Queueing interrupt %x\n", vec); | 171 | printk(KERN_INFO "Queueing interrupt %x\n", vec); |
127 | #endif | 172 | #endif |
128 | } | 173 | } |
129 | 174 | EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); | |
130 | 175 | ||
131 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) | 176 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) |
132 | { | 177 | { |
133 | /* might as well deliver this straight away */ | 178 | /* might as well deliver this straight away */ |
134 | kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); | 179 | kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); |
135 | } | 180 | } |
181 | EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); | ||
136 | 182 | ||
137 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) | 183 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) |
138 | { | 184 | { |
139 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | 185 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); |
140 | } | 186 | } |
187 | EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec); | ||
141 | 188 | ||
142 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | 189 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) |
143 | { | 190 | { |
144 | return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | 191 | return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); |
145 | } | 192 | } |
193 | EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec); | ||
146 | 194 | ||
147 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | 195 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) |
148 | { | 196 | { |
149 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | 197 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); |
150 | } | 198 | } |
199 | EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec); | ||
151 | 200 | ||
152 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 201 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
153 | struct kvm_interrupt *irq) | 202 | struct kvm_interrupt *irq) |
@@ -285,8 +334,10 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
285 | 334 | ||
286 | return 0; | 335 | return 0; |
287 | } | 336 | } |
337 | EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); | ||
288 | 338 | ||
289 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) | 339 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, |
340 | bool *writable) | ||
290 | { | 341 | { |
291 | ulong mp_pa = vcpu->arch.magic_page_pa; | 342 | ulong mp_pa = vcpu->arch.magic_page_pa; |
292 | 343 | ||
@@ -302,20 +353,23 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
302 | 353 | ||
303 | pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; | 354 | pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; |
304 | get_page(pfn_to_page(pfn)); | 355 | get_page(pfn_to_page(pfn)); |
356 | if (writable) | ||
357 | *writable = true; | ||
305 | return pfn; | 358 | return pfn; |
306 | } | 359 | } |
307 | 360 | ||
308 | return gfn_to_pfn(vcpu->kvm, gfn); | 361 | return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); |
309 | } | 362 | } |
363 | EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn); | ||
310 | 364 | ||
311 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | 365 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, |
312 | struct kvmppc_pte *pte) | 366 | bool iswrite, struct kvmppc_pte *pte) |
313 | { | 367 | { |
314 | int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); | 368 | int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); |
315 | int r; | 369 | int r; |
316 | 370 | ||
317 | if (relocated) { | 371 | if (relocated) { |
318 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); | 372 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); |
319 | } else { | 373 | } else { |
320 | pte->eaddr = eaddr; | 374 | pte->eaddr = eaddr; |
321 | pte->raddr = eaddr & KVM_PAM; | 375 | pte->raddr = eaddr & KVM_PAM; |
@@ -361,7 +415,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |||
361 | 415 | ||
362 | vcpu->stat.st++; | 416 | vcpu->stat.st++; |
363 | 417 | ||
364 | if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) | 418 | if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte)) |
365 | return -ENOENT; | 419 | return -ENOENT; |
366 | 420 | ||
367 | *eaddr = pte.raddr; | 421 | *eaddr = pte.raddr; |
@@ -374,6 +428,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |||
374 | 428 | ||
375 | return EMULATE_DONE; | 429 | return EMULATE_DONE; |
376 | } | 430 | } |
431 | EXPORT_SYMBOL_GPL(kvmppc_st); | ||
377 | 432 | ||
378 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | 433 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
379 | bool data) | 434 | bool data) |
@@ -383,7 +438,7 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |||
383 | 438 | ||
384 | vcpu->stat.ld++; | 439 | vcpu->stat.ld++; |
385 | 440 | ||
386 | if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) | 441 | if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte)) |
387 | goto nopte; | 442 | goto nopte; |
388 | 443 | ||
389 | *eaddr = pte.raddr; | 444 | *eaddr = pte.raddr; |
@@ -404,6 +459,7 @@ nopte: | |||
404 | mmio: | 459 | mmio: |
405 | return EMULATE_DO_MMIO; | 460 | return EMULATE_DO_MMIO; |
406 | } | 461 | } |
462 | EXPORT_SYMBOL_GPL(kvmppc_ld); | ||
407 | 463 | ||
408 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 464 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
409 | { | 465 | { |
@@ -419,6 +475,18 @@ void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) | |||
419 | { | 475 | { |
420 | } | 476 | } |
421 | 477 | ||
478 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
479 | struct kvm_sregs *sregs) | ||
480 | { | ||
481 | return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); | ||
482 | } | ||
483 | |||
484 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
485 | struct kvm_sregs *sregs) | ||
486 | { | ||
487 | return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); | ||
488 | } | ||
489 | |||
422 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 490 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
423 | { | 491 | { |
424 | int i; | 492 | int i; |
@@ -495,8 +563,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
495 | if (size > sizeof(val)) | 563 | if (size > sizeof(val)) |
496 | return -EINVAL; | 564 | return -EINVAL; |
497 | 565 | ||
498 | r = kvmppc_get_one_reg(vcpu, reg->id, &val); | 566 | r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val); |
499 | |||
500 | if (r == -EINVAL) { | 567 | if (r == -EINVAL) { |
501 | r = 0; | 568 | r = 0; |
502 | switch (reg->id) { | 569 | switch (reg->id) { |
@@ -528,6 +595,9 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
528 | } | 595 | } |
529 | val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); | 596 | val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); |
530 | break; | 597 | break; |
598 | case KVM_REG_PPC_VRSAVE: | ||
599 | val = get_reg_val(reg->id, vcpu->arch.vrsave); | ||
600 | break; | ||
531 | #endif /* CONFIG_ALTIVEC */ | 601 | #endif /* CONFIG_ALTIVEC */ |
532 | case KVM_REG_PPC_DEBUG_INST: { | 602 | case KVM_REG_PPC_DEBUG_INST: { |
533 | u32 opcode = INS_TW; | 603 | u32 opcode = INS_TW; |
@@ -572,8 +642,7 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
572 | if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) | 642 | if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) |
573 | return -EFAULT; | 643 | return -EFAULT; |
574 | 644 | ||
575 | r = kvmppc_set_one_reg(vcpu, reg->id, &val); | 645 | r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val); |
576 | |||
577 | if (r == -EINVAL) { | 646 | if (r == -EINVAL) { |
578 | r = 0; | 647 | r = 0; |
579 | switch (reg->id) { | 648 | switch (reg->id) { |
@@ -605,6 +674,13 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
605 | } | 674 | } |
606 | vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); | 675 | vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); |
607 | break; | 676 | break; |
677 | case KVM_REG_PPC_VRSAVE: | ||
678 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | ||
679 | r = -ENXIO; | ||
680 | break; | ||
681 | } | ||
682 | vcpu->arch.vrsave = set_reg_val(reg->id, val); | ||
683 | break; | ||
608 | #endif /* CONFIG_ALTIVEC */ | 684 | #endif /* CONFIG_ALTIVEC */ |
609 | #ifdef CONFIG_KVM_XICS | 685 | #ifdef CONFIG_KVM_XICS |
610 | case KVM_REG_PPC_ICP_STATE: | 686 | case KVM_REG_PPC_ICP_STATE: |
@@ -625,6 +701,27 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
625 | return r; | 701 | return r; |
626 | } | 702 | } |
627 | 703 | ||
704 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
705 | { | ||
706 | vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); | ||
707 | } | ||
708 | |||
709 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | ||
710 | { | ||
711 | vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); | ||
712 | } | ||
713 | |||
714 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | ||
715 | { | ||
716 | vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); | ||
717 | } | ||
718 | EXPORT_SYMBOL_GPL(kvmppc_set_msr); | ||
719 | |||
720 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | ||
721 | { | ||
722 | return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); | ||
723 | } | ||
724 | |||
628 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | 725 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
629 | struct kvm_translation *tr) | 726 | struct kvm_translation *tr) |
630 | { | 727 | { |
@@ -644,3 +741,141 @@ void kvmppc_decrementer_func(unsigned long data) | |||
644 | kvmppc_core_queue_dec(vcpu); | 741 | kvmppc_core_queue_dec(vcpu); |
645 | kvm_vcpu_kick(vcpu); | 742 | kvm_vcpu_kick(vcpu); |
646 | } | 743 | } |
744 | |||
745 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | ||
746 | { | ||
747 | return kvm->arch.kvm_ops->vcpu_create(kvm, id); | ||
748 | } | ||
749 | |||
750 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | ||
751 | { | ||
752 | vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); | ||
753 | } | ||
754 | |||
755 | int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | ||
756 | { | ||
757 | return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); | ||
758 | } | ||
759 | |||
760 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | ||
761 | { | ||
762 | return kvm->arch.kvm_ops->get_dirty_log(kvm, log); | ||
763 | } | ||
764 | |||
765 | void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | ||
766 | struct kvm_memory_slot *dont) | ||
767 | { | ||
768 | kvm->arch.kvm_ops->free_memslot(free, dont); | ||
769 | } | ||
770 | |||
771 | int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | ||
772 | unsigned long npages) | ||
773 | { | ||
774 | return kvm->arch.kvm_ops->create_memslot(slot, npages); | ||
775 | } | ||
776 | |||
777 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) | ||
778 | { | ||
779 | kvm->arch.kvm_ops->flush_memslot(kvm, memslot); | ||
780 | } | ||
781 | |||
782 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | ||
783 | struct kvm_memory_slot *memslot, | ||
784 | struct kvm_userspace_memory_region *mem) | ||
785 | { | ||
786 | return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); | ||
787 | } | ||
788 | |||
789 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | ||
790 | struct kvm_userspace_memory_region *mem, | ||
791 | const struct kvm_memory_slot *old) | ||
792 | { | ||
793 | kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old); | ||
794 | } | ||
795 | |||
796 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | ||
797 | { | ||
798 | return kvm->arch.kvm_ops->unmap_hva(kvm, hva); | ||
799 | } | ||
800 | EXPORT_SYMBOL_GPL(kvm_unmap_hva); | ||
801 | |||
802 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | ||
803 | { | ||
804 | return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); | ||
805 | } | ||
806 | |||
807 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | ||
808 | { | ||
809 | return kvm->arch.kvm_ops->age_hva(kvm, hva); | ||
810 | } | ||
811 | |||
812 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | ||
813 | { | ||
814 | return kvm->arch.kvm_ops->test_age_hva(kvm, hva); | ||
815 | } | ||
816 | |||
817 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | ||
818 | { | ||
819 | kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); | ||
820 | } | ||
821 | |||
822 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | ||
823 | { | ||
824 | vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); | ||
825 | } | ||
826 | |||
827 | int kvmppc_core_init_vm(struct kvm *kvm) | ||
828 | { | ||
829 | |||
830 | #ifdef CONFIG_PPC64 | ||
831 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); | ||
832 | INIT_LIST_HEAD(&kvm->arch.rtas_tokens); | ||
833 | #endif | ||
834 | |||
835 | return kvm->arch.kvm_ops->init_vm(kvm); | ||
836 | } | ||
837 | |||
838 | void kvmppc_core_destroy_vm(struct kvm *kvm) | ||
839 | { | ||
840 | kvm->arch.kvm_ops->destroy_vm(kvm); | ||
841 | |||
842 | #ifdef CONFIG_PPC64 | ||
843 | kvmppc_rtas_tokens_free(kvm); | ||
844 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | ||
845 | #endif | ||
846 | } | ||
847 | |||
848 | int kvmppc_core_check_processor_compat(void) | ||
849 | { | ||
850 | /* | ||
851 | * We always return 0 for book3s. We check | ||
852 | * for compatability while loading the HV | ||
853 | * or PR module | ||
854 | */ | ||
855 | return 0; | ||
856 | } | ||
857 | |||
858 | static int kvmppc_book3s_init(void) | ||
859 | { | ||
860 | int r; | ||
861 | |||
862 | r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | ||
863 | if (r) | ||
864 | return r; | ||
865 | #ifdef CONFIG_KVM_BOOK3S_32 | ||
866 | r = kvmppc_book3s_init_pr(); | ||
867 | #endif | ||
868 | return r; | ||
869 | |||
870 | } | ||
871 | |||
872 | static void kvmppc_book3s_exit(void) | ||
873 | { | ||
874 | #ifdef CONFIG_KVM_BOOK3S_32 | ||
875 | kvmppc_book3s_exit_pr(); | ||
876 | #endif | ||
877 | kvm_exit(); | ||
878 | } | ||
879 | |||
880 | module_init(kvmppc_book3s_init); | ||
881 | module_exit(kvmppc_book3s_exit); | ||
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h new file mode 100644 index 000000000000..4bf956cf94d6 --- /dev/null +++ b/arch/powerpc/kvm/book3s.h | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corporation, 2013 | ||
3 | * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation; either version 2 of the | ||
8 | * License or (at your optional) any later version of the license. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #ifndef __POWERPC_KVM_BOOK3S_H__ | ||
13 | #define __POWERPC_KVM_BOOK3S_H__ | ||
14 | |||
15 | extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm, | ||
16 | struct kvm_memory_slot *memslot); | ||
17 | extern int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva); | ||
18 | extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, | ||
19 | unsigned long end); | ||
20 | extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva); | ||
21 | extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva); | ||
22 | extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte); | ||
23 | |||
24 | extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); | ||
25 | extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
26 | unsigned int inst, int *advance); | ||
27 | extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, | ||
28 | int sprn, ulong spr_val); | ||
29 | extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, | ||
30 | int sprn, ulong *spr_val); | ||
31 | extern int kvmppc_book3s_init_pr(void); | ||
32 | extern void kvmppc_book3s_exit_pr(void); | ||
33 | |||
34 | #endif | ||
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index c8cefdd15fd8..76a64ce6a5b6 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c | |||
@@ -84,7 +84,8 @@ static inline bool sr_nx(u32 sr_raw) | |||
84 | } | 84 | } |
85 | 85 | ||
86 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | 86 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, |
87 | struct kvmppc_pte *pte, bool data); | 87 | struct kvmppc_pte *pte, bool data, |
88 | bool iswrite); | ||
88 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | 89 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, |
89 | u64 *vsid); | 90 | u64 *vsid); |
90 | 91 | ||
@@ -99,7 +100,7 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
99 | u64 vsid; | 100 | u64 vsid; |
100 | struct kvmppc_pte pte; | 101 | struct kvmppc_pte pte; |
101 | 102 | ||
102 | if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) | 103 | if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false)) |
103 | return pte.vpage; | 104 | return pte.vpage; |
104 | 105 | ||
105 | kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | 106 | kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); |
@@ -111,10 +112,11 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) | |||
111 | kvmppc_set_msr(vcpu, 0); | 112 | kvmppc_set_msr(vcpu, 0); |
112 | } | 113 | } |
113 | 114 | ||
114 | static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s, | 115 | static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu, |
115 | u32 sre, gva_t eaddr, | 116 | u32 sre, gva_t eaddr, |
116 | bool primary) | 117 | bool primary) |
117 | { | 118 | { |
119 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
118 | u32 page, hash, pteg, htabmask; | 120 | u32 page, hash, pteg, htabmask; |
119 | hva_t r; | 121 | hva_t r; |
120 | 122 | ||
@@ -132,7 +134,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3 | |||
132 | kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg, | 134 | kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg, |
133 | sr_vsid(sre)); | 135 | sr_vsid(sre)); |
134 | 136 | ||
135 | r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); | 137 | r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT); |
136 | if (kvm_is_error_hva(r)) | 138 | if (kvm_is_error_hva(r)) |
137 | return r; | 139 | return r; |
138 | return r | (pteg & ~PAGE_MASK); | 140 | return r | (pteg & ~PAGE_MASK); |
@@ -145,7 +147,8 @@ static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary) | |||
145 | } | 147 | } |
146 | 148 | ||
147 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | 149 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, |
148 | struct kvmppc_pte *pte, bool data) | 150 | struct kvmppc_pte *pte, bool data, |
151 | bool iswrite) | ||
149 | { | 152 | { |
150 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 153 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
151 | struct kvmppc_bat *bat; | 154 | struct kvmppc_bat *bat; |
@@ -186,8 +189,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
186 | printk(KERN_INFO "BAT is not readable!\n"); | 189 | printk(KERN_INFO "BAT is not readable!\n"); |
187 | continue; | 190 | continue; |
188 | } | 191 | } |
189 | if (!pte->may_write) { | 192 | if (iswrite && !pte->may_write) { |
190 | /* let's treat r/o BATs as not-readable for now */ | ||
191 | dprintk_pte("BAT is read-only!\n"); | 193 | dprintk_pte("BAT is read-only!\n"); |
192 | continue; | 194 | continue; |
193 | } | 195 | } |
@@ -201,9 +203,8 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
201 | 203 | ||
202 | static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | 204 | static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, |
203 | struct kvmppc_pte *pte, bool data, | 205 | struct kvmppc_pte *pte, bool data, |
204 | bool primary) | 206 | bool iswrite, bool primary) |
205 | { | 207 | { |
206 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
207 | u32 sre; | 208 | u32 sre; |
208 | hva_t ptegp; | 209 | hva_t ptegp; |
209 | u32 pteg[16]; | 210 | u32 pteg[16]; |
@@ -218,7 +219,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
218 | 219 | ||
219 | pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); | 220 | pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); |
220 | 221 | ||
221 | ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary); | 222 | ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary); |
222 | if (kvm_is_error_hva(ptegp)) { | 223 | if (kvm_is_error_hva(ptegp)) { |
223 | printk(KERN_INFO "KVM: Invalid PTEG!\n"); | 224 | printk(KERN_INFO "KVM: Invalid PTEG!\n"); |
224 | goto no_page_found; | 225 | goto no_page_found; |
@@ -258,9 +259,6 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
258 | break; | 259 | break; |
259 | } | 260 | } |
260 | 261 | ||
261 | if ( !pte->may_read ) | ||
262 | continue; | ||
263 | |||
264 | dprintk_pte("MMU: Found PTE -> %x %x - %x\n", | 262 | dprintk_pte("MMU: Found PTE -> %x %x - %x\n", |
265 | pteg[i], pteg[i+1], pp); | 263 | pteg[i], pteg[i+1], pp); |
266 | found = 1; | 264 | found = 1; |
@@ -271,19 +269,23 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
271 | /* Update PTE C and A bits, so the guest's swapper knows we used the | 269 | /* Update PTE C and A bits, so the guest's swapper knows we used the |
272 | page */ | 270 | page */ |
273 | if (found) { | 271 | if (found) { |
274 | u32 oldpte = pteg[i+1]; | 272 | u32 pte_r = pteg[i+1]; |
275 | 273 | char __user *addr = (char __user *) &pteg[i+1]; | |
276 | if (pte->may_read) | 274 | |
277 | pteg[i+1] |= PTEG_FLAG_ACCESSED; | 275 | /* |
278 | if (pte->may_write) | 276 | * Use single-byte writes to update the HPTE, to |
279 | pteg[i+1] |= PTEG_FLAG_DIRTY; | 277 | * conform to what real hardware does. |
280 | else | 278 | */ |
281 | dprintk_pte("KVM: Mapping read-only page!\n"); | 279 | if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) { |
282 | 280 | pte_r |= PTEG_FLAG_ACCESSED; | |
283 | /* Write back into the PTEG */ | 281 | put_user(pte_r >> 8, addr + 2); |
284 | if (pteg[i+1] != oldpte) | 282 | } |
285 | copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); | 283 | if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) { |
286 | 284 | pte_r |= PTEG_FLAG_DIRTY; | |
285 | put_user(pte_r, addr + 3); | ||
286 | } | ||
287 | if (!pte->may_read || (iswrite && !pte->may_write)) | ||
288 | return -EPERM; | ||
287 | return 0; | 289 | return 0; |
288 | } | 290 | } |
289 | 291 | ||
@@ -302,12 +304,14 @@ no_page_found: | |||
302 | } | 304 | } |
303 | 305 | ||
304 | static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | 306 | static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
305 | struct kvmppc_pte *pte, bool data) | 307 | struct kvmppc_pte *pte, bool data, |
308 | bool iswrite) | ||
306 | { | 309 | { |
307 | int r; | 310 | int r; |
308 | ulong mp_ea = vcpu->arch.magic_page_ea; | 311 | ulong mp_ea = vcpu->arch.magic_page_ea; |
309 | 312 | ||
310 | pte->eaddr = eaddr; | 313 | pte->eaddr = eaddr; |
314 | pte->page_size = MMU_PAGE_4K; | ||
311 | 315 | ||
312 | /* Magic page override */ | 316 | /* Magic page override */ |
313 | if (unlikely(mp_ea) && | 317 | if (unlikely(mp_ea) && |
@@ -323,11 +327,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
323 | return 0; | 327 | return 0; |
324 | } | 328 | } |
325 | 329 | ||
326 | r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); | 330 | r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite); |
327 | if (r < 0) | 331 | if (r < 0) |
328 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); | 332 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, |
333 | data, iswrite, true); | ||
329 | if (r < 0) | 334 | if (r < 0) |
330 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false); | 335 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, |
336 | data, iswrite, false); | ||
331 | 337 | ||
332 | return r; | 338 | return r; |
333 | } | 339 | } |
@@ -347,7 +353,12 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, | |||
347 | 353 | ||
348 | static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) | 354 | static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) |
349 | { | 355 | { |
350 | kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000); | 356 | int i; |
357 | struct kvm_vcpu *v; | ||
358 | |||
359 | /* flush this VA on all cpus */ | ||
360 | kvm_for_each_vcpu(i, v, vcpu->kvm) | ||
361 | kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000); | ||
351 | } | 362 | } |
352 | 363 | ||
353 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | 364 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, |
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 00e619bf608e..3a0abd2e5a15 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c | |||
@@ -138,7 +138,8 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, | |||
138 | 138 | ||
139 | extern char etext[]; | 139 | extern char etext[]; |
140 | 140 | ||
141 | int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | 141 | int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, |
142 | bool iswrite) | ||
142 | { | 143 | { |
143 | pfn_t hpaddr; | 144 | pfn_t hpaddr; |
144 | u64 vpn; | 145 | u64 vpn; |
@@ -152,9 +153,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | |||
152 | bool evict = false; | 153 | bool evict = false; |
153 | struct hpte_cache *pte; | 154 | struct hpte_cache *pte; |
154 | int r = 0; | 155 | int r = 0; |
156 | bool writable; | ||
155 | 157 | ||
156 | /* Get host physical address for gpa */ | 158 | /* Get host physical address for gpa */ |
157 | hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); | 159 | hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT, |
160 | iswrite, &writable); | ||
158 | if (is_error_noslot_pfn(hpaddr)) { | 161 | if (is_error_noslot_pfn(hpaddr)) { |
159 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", | 162 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", |
160 | orig_pte->eaddr); | 163 | orig_pte->eaddr); |
@@ -204,7 +207,7 @@ next_pteg: | |||
204 | (primary ? 0 : PTE_SEC); | 207 | (primary ? 0 : PTE_SEC); |
205 | pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; | 208 | pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; |
206 | 209 | ||
207 | if (orig_pte->may_write) { | 210 | if (orig_pte->may_write && writable) { |
208 | pteg1 |= PP_RWRW; | 211 | pteg1 |= PP_RWRW; |
209 | mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); | 212 | mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); |
210 | } else { | 213 | } else { |
@@ -259,6 +262,11 @@ out: | |||
259 | return r; | 262 | return r; |
260 | } | 263 | } |
261 | 264 | ||
265 | void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | ||
266 | { | ||
267 | kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL); | ||
268 | } | ||
269 | |||
262 | static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | 270 | static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) |
263 | { | 271 | { |
264 | struct kvmppc_sid_map *map; | 272 | struct kvmppc_sid_map *map; |
@@ -341,7 +349,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | |||
341 | svcpu_put(svcpu); | 349 | svcpu_put(svcpu); |
342 | } | 350 | } |
343 | 351 | ||
344 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 352 | void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) |
345 | { | 353 | { |
346 | int i; | 354 | int i; |
347 | 355 | ||
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 7e345e00661a..83da1f868fd5 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
@@ -107,9 +107,20 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
107 | return kvmppc_slb_calc_vpn(slb, eaddr); | 107 | return kvmppc_slb_calc_vpn(slb, eaddr); |
108 | } | 108 | } |
109 | 109 | ||
110 | static int mmu_pagesize(int mmu_pg) | ||
111 | { | ||
112 | switch (mmu_pg) { | ||
113 | case MMU_PAGE_64K: | ||
114 | return 16; | ||
115 | case MMU_PAGE_16M: | ||
116 | return 24; | ||
117 | } | ||
118 | return 12; | ||
119 | } | ||
120 | |||
110 | static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) | 121 | static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) |
111 | { | 122 | { |
112 | return slbe->large ? 24 : 12; | 123 | return mmu_pagesize(slbe->base_page_size); |
113 | } | 124 | } |
114 | 125 | ||
115 | static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) | 126 | static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) |
@@ -119,11 +130,11 @@ static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) | |||
119 | return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); | 130 | return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); |
120 | } | 131 | } |
121 | 132 | ||
122 | static hva_t kvmppc_mmu_book3s_64_get_pteg( | 133 | static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu, |
123 | struct kvmppc_vcpu_book3s *vcpu_book3s, | ||
124 | struct kvmppc_slb *slbe, gva_t eaddr, | 134 | struct kvmppc_slb *slbe, gva_t eaddr, |
125 | bool second) | 135 | bool second) |
126 | { | 136 | { |
137 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
127 | u64 hash, pteg, htabsize; | 138 | u64 hash, pteg, htabsize; |
128 | u32 ssize; | 139 | u32 ssize; |
129 | hva_t r; | 140 | hva_t r; |
@@ -148,10 +159,10 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg( | |||
148 | 159 | ||
149 | /* When running a PAPR guest, SDR1 contains a HVA address instead | 160 | /* When running a PAPR guest, SDR1 contains a HVA address instead |
150 | of a GPA */ | 161 | of a GPA */ |
151 | if (vcpu_book3s->vcpu.arch.papr_enabled) | 162 | if (vcpu->arch.papr_enabled) |
152 | r = pteg; | 163 | r = pteg; |
153 | else | 164 | else |
154 | r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); | 165 | r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT); |
155 | 166 | ||
156 | if (kvm_is_error_hva(r)) | 167 | if (kvm_is_error_hva(r)) |
157 | return r; | 168 | return r; |
@@ -166,18 +177,38 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) | |||
166 | avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); | 177 | avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); |
167 | avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); | 178 | avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); |
168 | 179 | ||
169 | if (p < 24) | 180 | if (p < 16) |
170 | avpn >>= ((80 - p) - 56) - 8; | 181 | avpn >>= ((80 - p) - 56) - 8; /* 16 - p */ |
171 | else | 182 | else |
172 | avpn <<= 8; | 183 | avpn <<= p - 16; |
173 | 184 | ||
174 | return avpn; | 185 | return avpn; |
175 | } | 186 | } |
176 | 187 | ||
188 | /* | ||
189 | * Return page size encoded in the second word of a HPTE, or | ||
190 | * -1 for an invalid encoding for the base page size indicated by | ||
191 | * the SLB entry. This doesn't handle mixed pagesize segments yet. | ||
192 | */ | ||
193 | static int decode_pagesize(struct kvmppc_slb *slbe, u64 r) | ||
194 | { | ||
195 | switch (slbe->base_page_size) { | ||
196 | case MMU_PAGE_64K: | ||
197 | if ((r & 0xf000) == 0x1000) | ||
198 | return MMU_PAGE_64K; | ||
199 | break; | ||
200 | case MMU_PAGE_16M: | ||
201 | if ((r & 0xff000) == 0) | ||
202 | return MMU_PAGE_16M; | ||
203 | break; | ||
204 | } | ||
205 | return -1; | ||
206 | } | ||
207 | |||
177 | static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | 208 | static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
178 | struct kvmppc_pte *gpte, bool data) | 209 | struct kvmppc_pte *gpte, bool data, |
210 | bool iswrite) | ||
179 | { | 211 | { |
180 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
181 | struct kvmppc_slb *slbe; | 212 | struct kvmppc_slb *slbe; |
182 | hva_t ptegp; | 213 | hva_t ptegp; |
183 | u64 pteg[16]; | 214 | u64 pteg[16]; |
@@ -189,6 +220,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
189 | u8 pp, key = 0; | 220 | u8 pp, key = 0; |
190 | bool found = false; | 221 | bool found = false; |
191 | bool second = false; | 222 | bool second = false; |
223 | int pgsize; | ||
192 | ulong mp_ea = vcpu->arch.magic_page_ea; | 224 | ulong mp_ea = vcpu->arch.magic_page_ea; |
193 | 225 | ||
194 | /* Magic page override */ | 226 | /* Magic page override */ |
@@ -202,6 +234,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
202 | gpte->may_execute = true; | 234 | gpte->may_execute = true; |
203 | gpte->may_read = true; | 235 | gpte->may_read = true; |
204 | gpte->may_write = true; | 236 | gpte->may_write = true; |
237 | gpte->page_size = MMU_PAGE_4K; | ||
205 | 238 | ||
206 | return 0; | 239 | return 0; |
207 | } | 240 | } |
@@ -222,8 +255,12 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
222 | v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID | | 255 | v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID | |
223 | HPTE_V_SECONDARY; | 256 | HPTE_V_SECONDARY; |
224 | 257 | ||
258 | pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K; | ||
259 | |||
260 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | ||
261 | |||
225 | do_second: | 262 | do_second: |
226 | ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); | 263 | ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second); |
227 | if (kvm_is_error_hva(ptegp)) | 264 | if (kvm_is_error_hva(ptegp)) |
228 | goto no_page_found; | 265 | goto no_page_found; |
229 | 266 | ||
@@ -240,6 +277,13 @@ do_second: | |||
240 | for (i=0; i<16; i+=2) { | 277 | for (i=0; i<16; i+=2) { |
241 | /* Check all relevant fields of 1st dword */ | 278 | /* Check all relevant fields of 1st dword */ |
242 | if ((pteg[i] & v_mask) == v_val) { | 279 | if ((pteg[i] & v_mask) == v_val) { |
280 | /* If large page bit is set, check pgsize encoding */ | ||
281 | if (slbe->large && | ||
282 | (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { | ||
283 | pgsize = decode_pagesize(slbe, pteg[i+1]); | ||
284 | if (pgsize < 0) | ||
285 | continue; | ||
286 | } | ||
243 | found = true; | 287 | found = true; |
244 | break; | 288 | break; |
245 | } | 289 | } |
@@ -256,13 +300,15 @@ do_second: | |||
256 | v = pteg[i]; | 300 | v = pteg[i]; |
257 | r = pteg[i+1]; | 301 | r = pteg[i+1]; |
258 | pp = (r & HPTE_R_PP) | key; | 302 | pp = (r & HPTE_R_PP) | key; |
259 | eaddr_mask = 0xFFF; | 303 | if (r & HPTE_R_PP0) |
304 | pp |= 8; | ||
260 | 305 | ||
261 | gpte->eaddr = eaddr; | 306 | gpte->eaddr = eaddr; |
262 | gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); | 307 | gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); |
263 | if (slbe->large) | 308 | |
264 | eaddr_mask = 0xFFFFFF; | 309 | eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1; |
265 | gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); | 310 | gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); |
311 | gpte->page_size = pgsize; | ||
266 | gpte->may_execute = ((r & HPTE_R_N) ? false : true); | 312 | gpte->may_execute = ((r & HPTE_R_N) ? false : true); |
267 | gpte->may_read = false; | 313 | gpte->may_read = false; |
268 | gpte->may_write = false; | 314 | gpte->may_write = false; |
@@ -277,6 +323,7 @@ do_second: | |||
277 | case 3: | 323 | case 3: |
278 | case 5: | 324 | case 5: |
279 | case 7: | 325 | case 7: |
326 | case 10: | ||
280 | gpte->may_read = true; | 327 | gpte->may_read = true; |
281 | break; | 328 | break; |
282 | } | 329 | } |
@@ -287,30 +334,37 @@ do_second: | |||
287 | 334 | ||
288 | /* Update PTE R and C bits, so the guest's swapper knows we used the | 335 | /* Update PTE R and C bits, so the guest's swapper knows we used the |
289 | * page */ | 336 | * page */ |
290 | if (gpte->may_read) { | 337 | if (gpte->may_read && !(r & HPTE_R_R)) { |
291 | /* Set the accessed flag */ | 338 | /* |
339 | * Set the accessed flag. | ||
340 | * We have to write this back with a single byte write | ||
341 | * because another vcpu may be accessing this on | ||
342 | * non-PAPR platforms such as mac99, and this is | ||
343 | * what real hardware does. | ||
344 | */ | ||
345 | char __user *addr = (char __user *) &pteg[i+1]; | ||
292 | r |= HPTE_R_R; | 346 | r |= HPTE_R_R; |
347 | put_user(r >> 8, addr + 6); | ||
293 | } | 348 | } |
294 | if (data && gpte->may_write) { | 349 | if (iswrite && gpte->may_write && !(r & HPTE_R_C)) { |
295 | /* Set the dirty flag -- XXX even if not writing */ | 350 | /* Set the dirty flag */ |
351 | /* Use a single byte write */ | ||
352 | char __user *addr = (char __user *) &pteg[i+1]; | ||
296 | r |= HPTE_R_C; | 353 | r |= HPTE_R_C; |
354 | put_user(r, addr + 7); | ||
297 | } | 355 | } |
298 | 356 | ||
299 | /* Write back into the PTEG */ | 357 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); |
300 | if (pteg[i+1] != r) { | ||
301 | pteg[i+1] = r; | ||
302 | copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); | ||
303 | } | ||
304 | 358 | ||
305 | if (!gpte->may_read) | 359 | if (!gpte->may_read || (iswrite && !gpte->may_write)) |
306 | return -EPERM; | 360 | return -EPERM; |
307 | return 0; | 361 | return 0; |
308 | 362 | ||
309 | no_page_found: | 363 | no_page_found: |
364 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); | ||
310 | return -ENOENT; | 365 | return -ENOENT; |
311 | 366 | ||
312 | no_seg_found: | 367 | no_seg_found: |
313 | |||
314 | dprintk("KVM MMU: Trigger segment fault\n"); | 368 | dprintk("KVM MMU: Trigger segment fault\n"); |
315 | return -EINVAL; | 369 | return -EINVAL; |
316 | } | 370 | } |
@@ -345,6 +399,21 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) | |||
345 | slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; | 399 | slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; |
346 | slbe->class = (rs & SLB_VSID_C) ? 1 : 0; | 400 | slbe->class = (rs & SLB_VSID_C) ? 1 : 0; |
347 | 401 | ||
402 | slbe->base_page_size = MMU_PAGE_4K; | ||
403 | if (slbe->large) { | ||
404 | if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) { | ||
405 | switch (rs & SLB_VSID_LP) { | ||
406 | case SLB_VSID_LP_00: | ||
407 | slbe->base_page_size = MMU_PAGE_16M; | ||
408 | break; | ||
409 | case SLB_VSID_LP_01: | ||
410 | slbe->base_page_size = MMU_PAGE_64K; | ||
411 | break; | ||
412 | } | ||
413 | } else | ||
414 | slbe->base_page_size = MMU_PAGE_16M; | ||
415 | } | ||
416 | |||
348 | slbe->orige = rb & (ESID_MASK | SLB_ESID_V); | 417 | slbe->orige = rb & (ESID_MASK | SLB_ESID_V); |
349 | slbe->origv = rs; | 418 | slbe->origv = rs; |
350 | 419 | ||
@@ -460,14 +529,45 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, | |||
460 | bool large) | 529 | bool large) |
461 | { | 530 | { |
462 | u64 mask = 0xFFFFFFFFFULL; | 531 | u64 mask = 0xFFFFFFFFFULL; |
532 | long i; | ||
533 | struct kvm_vcpu *v; | ||
463 | 534 | ||
464 | dprintk("KVM MMU: tlbie(0x%lx)\n", va); | 535 | dprintk("KVM MMU: tlbie(0x%lx)\n", va); |
465 | 536 | ||
466 | if (large) | 537 | /* |
467 | mask = 0xFFFFFF000ULL; | 538 | * The tlbie instruction changed behaviour starting with |
468 | kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask); | 539 | * POWER6. POWER6 and later don't have the large page flag |
540 | * in the instruction but in the RB value, along with bits | ||
541 | * indicating page and segment sizes. | ||
542 | */ | ||
543 | if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) { | ||
544 | /* POWER6 or later */ | ||
545 | if (va & 1) { /* L bit */ | ||
546 | if ((va & 0xf000) == 0x1000) | ||
547 | mask = 0xFFFFFFFF0ULL; /* 64k page */ | ||
548 | else | ||
549 | mask = 0xFFFFFF000ULL; /* 16M page */ | ||
550 | } | ||
551 | } else { | ||
552 | /* older processors, e.g. PPC970 */ | ||
553 | if (large) | ||
554 | mask = 0xFFFFFF000ULL; | ||
555 | } | ||
556 | /* flush this VA on all vcpus */ | ||
557 | kvm_for_each_vcpu(i, v, vcpu->kvm) | ||
558 | kvmppc_mmu_pte_vflush(v, va >> 12, mask); | ||
469 | } | 559 | } |
470 | 560 | ||
561 | #ifdef CONFIG_PPC_64K_PAGES | ||
562 | static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid) | ||
563 | { | ||
564 | ulong mp_ea = vcpu->arch.magic_page_ea; | ||
565 | |||
566 | return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) && | ||
567 | (mp_ea >> SID_SHIFT) == esid; | ||
568 | } | ||
569 | #endif | ||
570 | |||
471 | static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | 571 | static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, |
472 | u64 *vsid) | 572 | u64 *vsid) |
473 | { | 573 | { |
@@ -475,11 +575,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
475 | struct kvmppc_slb *slb; | 575 | struct kvmppc_slb *slb; |
476 | u64 gvsid = esid; | 576 | u64 gvsid = esid; |
477 | ulong mp_ea = vcpu->arch.magic_page_ea; | 577 | ulong mp_ea = vcpu->arch.magic_page_ea; |
578 | int pagesize = MMU_PAGE_64K; | ||
478 | 579 | ||
479 | if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | 580 | if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
480 | slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); | 581 | slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); |
481 | if (slb) { | 582 | if (slb) { |
482 | gvsid = slb->vsid; | 583 | gvsid = slb->vsid; |
584 | pagesize = slb->base_page_size; | ||
483 | if (slb->tb) { | 585 | if (slb->tb) { |
484 | gvsid <<= SID_SHIFT_1T - SID_SHIFT; | 586 | gvsid <<= SID_SHIFT_1T - SID_SHIFT; |
485 | gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1); | 587 | gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1); |
@@ -490,28 +592,41 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
490 | 592 | ||
491 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | 593 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
492 | case 0: | 594 | case 0: |
493 | *vsid = VSID_REAL | esid; | 595 | gvsid = VSID_REAL | esid; |
494 | break; | 596 | break; |
495 | case MSR_IR: | 597 | case MSR_IR: |
496 | *vsid = VSID_REAL_IR | gvsid; | 598 | gvsid |= VSID_REAL_IR; |
497 | break; | 599 | break; |
498 | case MSR_DR: | 600 | case MSR_DR: |
499 | *vsid = VSID_REAL_DR | gvsid; | 601 | gvsid |= VSID_REAL_DR; |
500 | break; | 602 | break; |
501 | case MSR_DR|MSR_IR: | 603 | case MSR_DR|MSR_IR: |
502 | if (!slb) | 604 | if (!slb) |
503 | goto no_slb; | 605 | goto no_slb; |
504 | 606 | ||
505 | *vsid = gvsid; | ||
506 | break; | 607 | break; |
507 | default: | 608 | default: |
508 | BUG(); | 609 | BUG(); |
509 | break; | 610 | break; |
510 | } | 611 | } |
511 | 612 | ||
613 | #ifdef CONFIG_PPC_64K_PAGES | ||
614 | /* | ||
615 | * Mark this as a 64k segment if the host is using | ||
616 | * 64k pages, the host MMU supports 64k pages and | ||
617 | * the guest segment page size is >= 64k, | ||
618 | * but not if this segment contains the magic page. | ||
619 | */ | ||
620 | if (pagesize >= MMU_PAGE_64K && | ||
621 | mmu_psize_defs[MMU_PAGE_64K].shift && | ||
622 | !segment_contains_magic_page(vcpu, esid)) | ||
623 | gvsid |= VSID_64K; | ||
624 | #endif | ||
625 | |||
512 | if (vcpu->arch.shared->msr & MSR_PR) | 626 | if (vcpu->arch.shared->msr & MSR_PR) |
513 | *vsid |= VSID_PR; | 627 | gvsid |= VSID_PR; |
514 | 628 | ||
629 | *vsid = gvsid; | ||
515 | return 0; | 630 | return 0; |
516 | 631 | ||
517 | no_slb: | 632 | no_slb: |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index e5240524bf6c..0d513af62bba 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -27,14 +27,14 @@ | |||
27 | #include <asm/machdep.h> | 27 | #include <asm/machdep.h> |
28 | #include <asm/mmu_context.h> | 28 | #include <asm/mmu_context.h> |
29 | #include <asm/hw_irq.h> | 29 | #include <asm/hw_irq.h> |
30 | #include "trace.h" | 30 | #include "trace_pr.h" |
31 | 31 | ||
32 | #define PTE_SIZE 12 | 32 | #define PTE_SIZE 12 |
33 | 33 | ||
34 | void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | 34 | void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
35 | { | 35 | { |
36 | ppc_md.hpte_invalidate(pte->slot, pte->host_vpn, | 36 | ppc_md.hpte_invalidate(pte->slot, pte->host_vpn, |
37 | MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M, | 37 | pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M, |
38 | false); | 38 | false); |
39 | } | 39 | } |
40 | 40 | ||
@@ -78,7 +78,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) | |||
78 | return NULL; | 78 | return NULL; |
79 | } | 79 | } |
80 | 80 | ||
81 | int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | 81 | int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, |
82 | bool iswrite) | ||
82 | { | 83 | { |
83 | unsigned long vpn; | 84 | unsigned long vpn; |
84 | pfn_t hpaddr; | 85 | pfn_t hpaddr; |
@@ -90,16 +91,26 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | |||
90 | int attempt = 0; | 91 | int attempt = 0; |
91 | struct kvmppc_sid_map *map; | 92 | struct kvmppc_sid_map *map; |
92 | int r = 0; | 93 | int r = 0; |
94 | int hpsize = MMU_PAGE_4K; | ||
95 | bool writable; | ||
96 | unsigned long mmu_seq; | ||
97 | struct kvm *kvm = vcpu->kvm; | ||
98 | struct hpte_cache *cpte; | ||
99 | unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; | ||
100 | unsigned long pfn; | ||
101 | |||
102 | /* used to check for invalidations in progress */ | ||
103 | mmu_seq = kvm->mmu_notifier_seq; | ||
104 | smp_rmb(); | ||
93 | 105 | ||
94 | /* Get host physical address for gpa */ | 106 | /* Get host physical address for gpa */ |
95 | hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); | 107 | pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable); |
96 | if (is_error_noslot_pfn(hpaddr)) { | 108 | if (is_error_noslot_pfn(pfn)) { |
97 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); | 109 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn); |
98 | r = -EINVAL; | 110 | r = -EINVAL; |
99 | goto out; | 111 | goto out; |
100 | } | 112 | } |
101 | hpaddr <<= PAGE_SHIFT; | 113 | hpaddr = pfn << PAGE_SHIFT; |
102 | hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); | ||
103 | 114 | ||
104 | /* and write the mapping ea -> hpa into the pt */ | 115 | /* and write the mapping ea -> hpa into the pt */ |
105 | vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); | 116 | vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); |
@@ -117,20 +128,39 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | |||
117 | goto out; | 128 | goto out; |
118 | } | 129 | } |
119 | 130 | ||
120 | vsid = map->host_vsid; | 131 | vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); |
121 | vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); | ||
122 | 132 | ||
123 | if (!orig_pte->may_write) | 133 | kvm_set_pfn_accessed(pfn); |
124 | rflags |= HPTE_R_PP; | 134 | if (!orig_pte->may_write || !writable) |
125 | else | 135 | rflags |= PP_RXRX; |
126 | mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); | 136 | else { |
137 | mark_page_dirty(vcpu->kvm, gfn); | ||
138 | kvm_set_pfn_dirty(pfn); | ||
139 | } | ||
127 | 140 | ||
128 | if (!orig_pte->may_execute) | 141 | if (!orig_pte->may_execute) |
129 | rflags |= HPTE_R_N; | 142 | rflags |= HPTE_R_N; |
130 | else | 143 | else |
131 | kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); | 144 | kvmppc_mmu_flush_icache(pfn); |
145 | |||
146 | /* | ||
147 | * Use 64K pages if possible; otherwise, on 64K page kernels, | ||
148 | * we need to transfer 4 more bits from guest real to host real addr. | ||
149 | */ | ||
150 | if (vsid & VSID_64K) | ||
151 | hpsize = MMU_PAGE_64K; | ||
152 | else | ||
153 | hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); | ||
154 | |||
155 | hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M); | ||
132 | 156 | ||
133 | hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M); | 157 | cpte = kvmppc_mmu_hpte_cache_next(vcpu); |
158 | |||
159 | spin_lock(&kvm->mmu_lock); | ||
160 | if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) { | ||
161 | r = -EAGAIN; | ||
162 | goto out_unlock; | ||
163 | } | ||
134 | 164 | ||
135 | map_again: | 165 | map_again: |
136 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | 166 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); |
@@ -139,11 +169,11 @@ map_again: | |||
139 | if (attempt > 1) | 169 | if (attempt > 1) |
140 | if (ppc_md.hpte_remove(hpteg) < 0) { | 170 | if (ppc_md.hpte_remove(hpteg) < 0) { |
141 | r = -1; | 171 | r = -1; |
142 | goto out; | 172 | goto out_unlock; |
143 | } | 173 | } |
144 | 174 | ||
145 | ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, | 175 | ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, |
146 | MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M); | 176 | hpsize, hpsize, MMU_SEGSIZE_256M); |
147 | 177 | ||
148 | if (ret < 0) { | 178 | if (ret < 0) { |
149 | /* If we couldn't map a primary PTE, try a secondary */ | 179 | /* If we couldn't map a primary PTE, try a secondary */ |
@@ -152,8 +182,6 @@ map_again: | |||
152 | attempt++; | 182 | attempt++; |
153 | goto map_again; | 183 | goto map_again; |
154 | } else { | 184 | } else { |
155 | struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); | ||
156 | |||
157 | trace_kvm_book3s_64_mmu_map(rflags, hpteg, | 185 | trace_kvm_book3s_64_mmu_map(rflags, hpteg, |
158 | vpn, hpaddr, orig_pte); | 186 | vpn, hpaddr, orig_pte); |
159 | 187 | ||
@@ -164,19 +192,37 @@ map_again: | |||
164 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | 192 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); |
165 | } | 193 | } |
166 | 194 | ||
167 | pte->slot = hpteg + (ret & 7); | 195 | cpte->slot = hpteg + (ret & 7); |
168 | pte->host_vpn = vpn; | 196 | cpte->host_vpn = vpn; |
169 | pte->pte = *orig_pte; | 197 | cpte->pte = *orig_pte; |
170 | pte->pfn = hpaddr >> PAGE_SHIFT; | 198 | cpte->pfn = pfn; |
199 | cpte->pagesize = hpsize; | ||
171 | 200 | ||
172 | kvmppc_mmu_hpte_cache_map(vcpu, pte); | 201 | kvmppc_mmu_hpte_cache_map(vcpu, cpte); |
202 | cpte = NULL; | ||
173 | } | 203 | } |
174 | kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); | 204 | |
205 | out_unlock: | ||
206 | spin_unlock(&kvm->mmu_lock); | ||
207 | kvm_release_pfn_clean(pfn); | ||
208 | if (cpte) | ||
209 | kvmppc_mmu_hpte_cache_free(cpte); | ||
175 | 210 | ||
176 | out: | 211 | out: |
177 | return r; | 212 | return r; |
178 | } | 213 | } |
179 | 214 | ||
215 | void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | ||
216 | { | ||
217 | u64 mask = 0xfffffffffULL; | ||
218 | u64 vsid; | ||
219 | |||
220 | vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); | ||
221 | if (vsid & VSID_64K) | ||
222 | mask = 0xffffffff0ULL; | ||
223 | kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask); | ||
224 | } | ||
225 | |||
180 | static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | 226 | static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) |
181 | { | 227 | { |
182 | struct kvmppc_sid_map *map; | 228 | struct kvmppc_sid_map *map; |
@@ -291,6 +337,12 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) | |||
291 | slb_vsid &= ~SLB_VSID_KP; | 337 | slb_vsid &= ~SLB_VSID_KP; |
292 | slb_esid |= slb_index; | 338 | slb_esid |= slb_index; |
293 | 339 | ||
340 | #ifdef CONFIG_PPC_64K_PAGES | ||
341 | /* Set host segment base page size to 64K if possible */ | ||
342 | if (gvsid & VSID_64K) | ||
343 | slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp; | ||
344 | #endif | ||
345 | |||
294 | svcpu->slb[slb_index].esid = slb_esid; | 346 | svcpu->slb[slb_index].esid = slb_esid; |
295 | svcpu->slb[slb_index].vsid = slb_vsid; | 347 | svcpu->slb[slb_index].vsid = slb_vsid; |
296 | 348 | ||
@@ -326,7 +378,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | |||
326 | svcpu_put(svcpu); | 378 | svcpu_put(svcpu); |
327 | } | 379 | } |
328 | 380 | ||
329 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 381 | void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) |
330 | { | 382 | { |
331 | kvmppc_mmu_hpte_destroy(vcpu); | 383 | kvmppc_mmu_hpte_destroy(vcpu); |
332 | __destroy_context(to_book3s(vcpu)->context_id[0]); | 384 | __destroy_context(to_book3s(vcpu)->context_id[0]); |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 043eec8461e7..f3ff587a8b7d 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -260,10 +260,6 @@ int kvmppc_mmu_hv_init(void) | |||
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
262 | 262 | ||
263 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | ||
264 | { | ||
265 | } | ||
266 | |||
267 | static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) | 263 | static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) |
268 | { | 264 | { |
269 | kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); | 265 | kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); |
@@ -451,7 +447,7 @@ static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, | |||
451 | } | 447 | } |
452 | 448 | ||
453 | static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | 449 | static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
454 | struct kvmppc_pte *gpte, bool data) | 450 | struct kvmppc_pte *gpte, bool data, bool iswrite) |
455 | { | 451 | { |
456 | struct kvm *kvm = vcpu->kvm; | 452 | struct kvm *kvm = vcpu->kvm; |
457 | struct kvmppc_slb *slbe; | 453 | struct kvmppc_slb *slbe; |
@@ -906,21 +902,22 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
906 | return 0; | 902 | return 0; |
907 | } | 903 | } |
908 | 904 | ||
909 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 905 | int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva) |
910 | { | 906 | { |
911 | if (kvm->arch.using_mmu_notifiers) | 907 | if (kvm->arch.using_mmu_notifiers) |
912 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); | 908 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); |
913 | return 0; | 909 | return 0; |
914 | } | 910 | } |
915 | 911 | ||
916 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | 912 | int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end) |
917 | { | 913 | { |
918 | if (kvm->arch.using_mmu_notifiers) | 914 | if (kvm->arch.using_mmu_notifiers) |
919 | kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp); | 915 | kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp); |
920 | return 0; | 916 | return 0; |
921 | } | 917 | } |
922 | 918 | ||
923 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) | 919 | void kvmppc_core_flush_memslot_hv(struct kvm *kvm, |
920 | struct kvm_memory_slot *memslot) | ||
924 | { | 921 | { |
925 | unsigned long *rmapp; | 922 | unsigned long *rmapp; |
926 | unsigned long gfn; | 923 | unsigned long gfn; |
@@ -994,7 +991,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
994 | return ret; | 991 | return ret; |
995 | } | 992 | } |
996 | 993 | ||
997 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | 994 | int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva) |
998 | { | 995 | { |
999 | if (!kvm->arch.using_mmu_notifiers) | 996 | if (!kvm->arch.using_mmu_notifiers) |
1000 | return 0; | 997 | return 0; |
@@ -1032,14 +1029,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
1032 | return ret; | 1029 | return ret; |
1033 | } | 1030 | } |
1034 | 1031 | ||
1035 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | 1032 | int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva) |
1036 | { | 1033 | { |
1037 | if (!kvm->arch.using_mmu_notifiers) | 1034 | if (!kvm->arch.using_mmu_notifiers) |
1038 | return 0; | 1035 | return 0; |
1039 | return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); | 1036 | return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); |
1040 | } | 1037 | } |
1041 | 1038 | ||
1042 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | 1039 | void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte) |
1043 | { | 1040 | { |
1044 | if (!kvm->arch.using_mmu_notifiers) | 1041 | if (!kvm->arch.using_mmu_notifiers) |
1045 | return; | 1042 | return; |
@@ -1512,9 +1509,8 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1512 | 1509 | ||
1513 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | | 1510 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | |
1514 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | 1511 | (VRMA_VSID << SLB_VSID_SHIFT_1T); |
1515 | lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; | 1512 | lpcr = senc << (LPCR_VRMASD_SH - 4); |
1516 | lpcr |= senc << (LPCR_VRMASD_SH - 4); | 1513 | kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); |
1517 | kvm->arch.lpcr = lpcr; | ||
1518 | rma_setup = 1; | 1514 | rma_setup = 1; |
1519 | } | 1515 | } |
1520 | ++i; | 1516 | ++i; |
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 30c2f3b134c6..2c25f5412bdb 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c | |||
@@ -74,3 +74,4 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |||
74 | /* Didn't find the liobn, punt it to userspace */ | 74 | /* Didn't find the liobn, punt it to userspace */ |
75 | return H_TOO_HARD; | 75 | return H_TOO_HARD; |
76 | } | 76 | } |
77 | EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); | ||
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 360ce68c9809..99d40f8977e8 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c | |||
@@ -86,8 +86,8 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) | |||
86 | return true; | 86 | return true; |
87 | } | 87 | } |
88 | 88 | ||
89 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 89 | int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, |
90 | unsigned int inst, int *advance) | 90 | unsigned int inst, int *advance) |
91 | { | 91 | { |
92 | int emulated = EMULATE_DONE; | 92 | int emulated = EMULATE_DONE; |
93 | int rt = get_rt(inst); | 93 | int rt = get_rt(inst); |
@@ -172,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
172 | vcpu->arch.mmu.tlbie(vcpu, addr, large); | 172 | vcpu->arch.mmu.tlbie(vcpu, addr, large); |
173 | break; | 173 | break; |
174 | } | 174 | } |
175 | #ifdef CONFIG_KVM_BOOK3S_64_PR | 175 | #ifdef CONFIG_PPC_BOOK3S_64 |
176 | case OP_31_XOP_FAKE_SC1: | 176 | case OP_31_XOP_FAKE_SC1: |
177 | { | 177 | { |
178 | /* SC 1 papr hypercalls */ | 178 | /* SC 1 papr hypercalls */ |
@@ -267,12 +267,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
267 | 267 | ||
268 | r = kvmppc_st(vcpu, &addr, 32, zeros, true); | 268 | r = kvmppc_st(vcpu, &addr, 32, zeros, true); |
269 | if ((r == -ENOENT) || (r == -EPERM)) { | 269 | if ((r == -ENOENT) || (r == -EPERM)) { |
270 | struct kvmppc_book3s_shadow_vcpu *svcpu; | ||
271 | |||
272 | svcpu = svcpu_get(vcpu); | ||
273 | *advance = 0; | 270 | *advance = 0; |
274 | vcpu->arch.shared->dar = vaddr; | 271 | vcpu->arch.shared->dar = vaddr; |
275 | svcpu->fault_dar = vaddr; | 272 | vcpu->arch.fault_dar = vaddr; |
276 | 273 | ||
277 | dsisr = DSISR_ISSTORE; | 274 | dsisr = DSISR_ISSTORE; |
278 | if (r == -ENOENT) | 275 | if (r == -ENOENT) |
@@ -281,8 +278,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
281 | dsisr |= DSISR_PROTFAULT; | 278 | dsisr |= DSISR_PROTFAULT; |
282 | 279 | ||
283 | vcpu->arch.shared->dsisr = dsisr; | 280 | vcpu->arch.shared->dsisr = dsisr; |
284 | svcpu->fault_dsisr = dsisr; | 281 | vcpu->arch.fault_dsisr = dsisr; |
285 | svcpu_put(svcpu); | ||
286 | 282 | ||
287 | kvmppc_book3s_queue_irqprio(vcpu, | 283 | kvmppc_book3s_queue_irqprio(vcpu, |
288 | BOOK3S_INTERRUPT_DATA_STORAGE); | 284 | BOOK3S_INTERRUPT_DATA_STORAGE); |
@@ -349,7 +345,7 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) | |||
349 | return bat; | 345 | return bat; |
350 | } | 346 | } |
351 | 347 | ||
352 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | 348 | int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
353 | { | 349 | { |
354 | int emulated = EMULATE_DONE; | 350 | int emulated = EMULATE_DONE; |
355 | 351 | ||
@@ -472,7 +468,7 @@ unprivileged: | |||
472 | return emulated; | 468 | return emulated; |
473 | } | 469 | } |
474 | 470 | ||
475 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) | 471 | int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
476 | { | 472 | { |
477 | int emulated = EMULATE_DONE; | 473 | int emulated = EMULATE_DONE; |
478 | 474 | ||
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c index 7057a02f0906..852989a9bad3 100644 --- a/arch/powerpc/kvm/book3s_exports.c +++ b/arch/powerpc/kvm/book3s_exports.c | |||
@@ -20,9 +20,10 @@ | |||
20 | #include <linux/export.h> | 20 | #include <linux/export.h> |
21 | #include <asm/kvm_book3s.h> | 21 | #include <asm/kvm_book3s.h> |
22 | 22 | ||
23 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 23 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
24 | EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); | 24 | EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); |
25 | #else | 25 | #endif |
26 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | ||
26 | EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); | 27 | EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); |
27 | EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); | 28 | EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); |
28 | #ifdef CONFIG_ALTIVEC | 29 | #ifdef CONFIG_ALTIVEC |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 62a2b5ab08ed..072287f1c3bc 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -52,6 +52,9 @@ | |||
52 | #include <linux/vmalloc.h> | 52 | #include <linux/vmalloc.h> |
53 | #include <linux/highmem.h> | 53 | #include <linux/highmem.h> |
54 | #include <linux/hugetlb.h> | 54 | #include <linux/hugetlb.h> |
55 | #include <linux/module.h> | ||
56 | |||
57 | #include "book3s.h" | ||
55 | 58 | ||
56 | /* #define EXIT_DEBUG */ | 59 | /* #define EXIT_DEBUG */ |
57 | /* #define EXIT_DEBUG_SIMPLE */ | 60 | /* #define EXIT_DEBUG_SIMPLE */ |
@@ -66,7 +69,7 @@ | |||
66 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); | 69 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); |
67 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); | 70 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); |
68 | 71 | ||
69 | void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) | 72 | static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) |
70 | { | 73 | { |
71 | int me; | 74 | int me; |
72 | int cpu = vcpu->cpu; | 75 | int cpu = vcpu->cpu; |
@@ -125,7 +128,7 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) | |||
125 | * purely defensive; they should never fail.) | 128 | * purely defensive; they should never fail.) |
126 | */ | 129 | */ |
127 | 130 | ||
128 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) |
129 | { | 132 | { |
130 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
131 | 134 | ||
@@ -143,7 +146,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
143 | spin_unlock(&vcpu->arch.tbacct_lock); | 146 | spin_unlock(&vcpu->arch.tbacct_lock); |
144 | } | 147 | } |
145 | 148 | ||
146 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 149 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) |
147 | { | 150 | { |
148 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 151 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
149 | 152 | ||
@@ -155,17 +158,46 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |||
155 | spin_unlock(&vcpu->arch.tbacct_lock); | 158 | spin_unlock(&vcpu->arch.tbacct_lock); |
156 | } | 159 | } |
157 | 160 | ||
158 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | 161 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) |
159 | { | 162 | { |
160 | vcpu->arch.shregs.msr = msr; | 163 | vcpu->arch.shregs.msr = msr; |
161 | kvmppc_end_cede(vcpu); | 164 | kvmppc_end_cede(vcpu); |
162 | } | 165 | } |
163 | 166 | ||
164 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | 167 | void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) |
165 | { | 168 | { |
166 | vcpu->arch.pvr = pvr; | 169 | vcpu->arch.pvr = pvr; |
167 | } | 170 | } |
168 | 171 | ||
172 | int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) | ||
173 | { | ||
174 | unsigned long pcr = 0; | ||
175 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | ||
176 | |||
177 | if (arch_compat) { | ||
178 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) | ||
179 | return -EINVAL; /* 970 has no compat mode support */ | ||
180 | |||
181 | switch (arch_compat) { | ||
182 | case PVR_ARCH_205: | ||
183 | pcr = PCR_ARCH_205; | ||
184 | break; | ||
185 | case PVR_ARCH_206: | ||
186 | case PVR_ARCH_206p: | ||
187 | break; | ||
188 | default: | ||
189 | return -EINVAL; | ||
190 | } | ||
191 | } | ||
192 | |||
193 | spin_lock(&vc->lock); | ||
194 | vc->arch_compat = arch_compat; | ||
195 | vc->pcr = pcr; | ||
196 | spin_unlock(&vc->lock); | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
169 | void kvmppc_dump_regs(struct kvm_vcpu *vcpu) | 201 | void kvmppc_dump_regs(struct kvm_vcpu *vcpu) |
170 | { | 202 | { |
171 | int r; | 203 | int r; |
@@ -195,7 +227,7 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu) | |||
195 | pr_err(" ESID = %.16llx VSID = %.16llx\n", | 227 | pr_err(" ESID = %.16llx VSID = %.16llx\n", |
196 | vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); | 228 | vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); |
197 | pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", | 229 | pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", |
198 | vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1, | 230 | vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, |
199 | vcpu->arch.last_inst); | 231 | vcpu->arch.last_inst); |
200 | } | 232 | } |
201 | 233 | ||
@@ -489,7 +521,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, | |||
489 | memset(dt, 0, sizeof(struct dtl_entry)); | 521 | memset(dt, 0, sizeof(struct dtl_entry)); |
490 | dt->dispatch_reason = 7; | 522 | dt->dispatch_reason = 7; |
491 | dt->processor_id = vc->pcpu + vcpu->arch.ptid; | 523 | dt->processor_id = vc->pcpu + vcpu->arch.ptid; |
492 | dt->timebase = now; | 524 | dt->timebase = now + vc->tb_offset; |
493 | dt->enqueue_to_dispatch_time = stolen; | 525 | dt->enqueue_to_dispatch_time = stolen; |
494 | dt->srr0 = kvmppc_get_pc(vcpu); | 526 | dt->srr0 = kvmppc_get_pc(vcpu); |
495 | dt->srr1 = vcpu->arch.shregs.msr; | 527 | dt->srr1 = vcpu->arch.shregs.msr; |
@@ -538,6 +570,15 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
538 | } | 570 | } |
539 | break; | 571 | break; |
540 | case H_CONFER: | 572 | case H_CONFER: |
573 | target = kvmppc_get_gpr(vcpu, 4); | ||
574 | if (target == -1) | ||
575 | break; | ||
576 | tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); | ||
577 | if (!tvcpu) { | ||
578 | ret = H_PARAMETER; | ||
579 | break; | ||
580 | } | ||
581 | kvm_vcpu_yield_to(tvcpu); | ||
541 | break; | 582 | break; |
542 | case H_REGISTER_VPA: | 583 | case H_REGISTER_VPA: |
543 | ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), | 584 | ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), |
@@ -576,8 +617,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
576 | return RESUME_GUEST; | 617 | return RESUME_GUEST; |
577 | } | 618 | } |
578 | 619 | ||
579 | static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | 620 | static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, |
580 | struct task_struct *tsk) | 621 | struct task_struct *tsk) |
581 | { | 622 | { |
582 | int r = RESUME_HOST; | 623 | int r = RESUME_HOST; |
583 | 624 | ||
@@ -671,16 +712,16 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
671 | printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", | 712 | printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", |
672 | vcpu->arch.trap, kvmppc_get_pc(vcpu), | 713 | vcpu->arch.trap, kvmppc_get_pc(vcpu), |
673 | vcpu->arch.shregs.msr); | 714 | vcpu->arch.shregs.msr); |
715 | run->hw.hardware_exit_reason = vcpu->arch.trap; | ||
674 | r = RESUME_HOST; | 716 | r = RESUME_HOST; |
675 | BUG(); | ||
676 | break; | 717 | break; |
677 | } | 718 | } |
678 | 719 | ||
679 | return r; | 720 | return r; |
680 | } | 721 | } |
681 | 722 | ||
682 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 723 | static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, |
683 | struct kvm_sregs *sregs) | 724 | struct kvm_sregs *sregs) |
684 | { | 725 | { |
685 | int i; | 726 | int i; |
686 | 727 | ||
@@ -694,12 +735,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
694 | return 0; | 735 | return 0; |
695 | } | 736 | } |
696 | 737 | ||
697 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 738 | static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, |
698 | struct kvm_sregs *sregs) | 739 | struct kvm_sregs *sregs) |
699 | { | 740 | { |
700 | int i, j; | 741 | int i, j; |
701 | 742 | ||
702 | kvmppc_set_pvr(vcpu, sregs->pvr); | 743 | kvmppc_set_pvr_hv(vcpu, sregs->pvr); |
703 | 744 | ||
704 | j = 0; | 745 | j = 0; |
705 | for (i = 0; i < vcpu->arch.slb_nr; i++) { | 746 | for (i = 0; i < vcpu->arch.slb_nr; i++) { |
@@ -714,7 +755,23 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
714 | return 0; | 755 | return 0; |
715 | } | 756 | } |
716 | 757 | ||
717 | int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | 758 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) |
759 | { | ||
760 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | ||
761 | u64 mask; | ||
762 | |||
763 | spin_lock(&vc->lock); | ||
764 | /* | ||
765 | * Userspace can only modify DPFD (default prefetch depth), | ||
766 | * ILE (interrupt little-endian) and TC (translation control). | ||
767 | */ | ||
768 | mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; | ||
769 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); | ||
770 | spin_unlock(&vc->lock); | ||
771 | } | ||
772 | |||
773 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | ||
774 | union kvmppc_one_reg *val) | ||
718 | { | 775 | { |
719 | int r = 0; | 776 | int r = 0; |
720 | long int i; | 777 | long int i; |
@@ -749,6 +806,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | |||
749 | i = id - KVM_REG_PPC_PMC1; | 806 | i = id - KVM_REG_PPC_PMC1; |
750 | *val = get_reg_val(id, vcpu->arch.pmc[i]); | 807 | *val = get_reg_val(id, vcpu->arch.pmc[i]); |
751 | break; | 808 | break; |
809 | case KVM_REG_PPC_SIAR: | ||
810 | *val = get_reg_val(id, vcpu->arch.siar); | ||
811 | break; | ||
812 | case KVM_REG_PPC_SDAR: | ||
813 | *val = get_reg_val(id, vcpu->arch.sdar); | ||
814 | break; | ||
752 | #ifdef CONFIG_VSX | 815 | #ifdef CONFIG_VSX |
753 | case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: | 816 | case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: |
754 | if (cpu_has_feature(CPU_FTR_VSX)) { | 817 | if (cpu_has_feature(CPU_FTR_VSX)) { |
@@ -787,6 +850,18 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | |||
787 | val->vpaval.length = vcpu->arch.dtl.len; | 850 | val->vpaval.length = vcpu->arch.dtl.len; |
788 | spin_unlock(&vcpu->arch.vpa_update_lock); | 851 | spin_unlock(&vcpu->arch.vpa_update_lock); |
789 | break; | 852 | break; |
853 | case KVM_REG_PPC_TB_OFFSET: | ||
854 | *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); | ||
855 | break; | ||
856 | case KVM_REG_PPC_LPCR: | ||
857 | *val = get_reg_val(id, vcpu->arch.vcore->lpcr); | ||
858 | break; | ||
859 | case KVM_REG_PPC_PPR: | ||
860 | *val = get_reg_val(id, vcpu->arch.ppr); | ||
861 | break; | ||
862 | case KVM_REG_PPC_ARCH_COMPAT: | ||
863 | *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); | ||
864 | break; | ||
790 | default: | 865 | default: |
791 | r = -EINVAL; | 866 | r = -EINVAL; |
792 | break; | 867 | break; |
@@ -795,7 +870,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | |||
795 | return r; | 870 | return r; |
796 | } | 871 | } |
797 | 872 | ||
798 | int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | 873 | static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
874 | union kvmppc_one_reg *val) | ||
799 | { | 875 | { |
800 | int r = 0; | 876 | int r = 0; |
801 | long int i; | 877 | long int i; |
@@ -833,6 +909,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | |||
833 | i = id - KVM_REG_PPC_PMC1; | 909 | i = id - KVM_REG_PPC_PMC1; |
834 | vcpu->arch.pmc[i] = set_reg_val(id, *val); | 910 | vcpu->arch.pmc[i] = set_reg_val(id, *val); |
835 | break; | 911 | break; |
912 | case KVM_REG_PPC_SIAR: | ||
913 | vcpu->arch.siar = set_reg_val(id, *val); | ||
914 | break; | ||
915 | case KVM_REG_PPC_SDAR: | ||
916 | vcpu->arch.sdar = set_reg_val(id, *val); | ||
917 | break; | ||
836 | #ifdef CONFIG_VSX | 918 | #ifdef CONFIG_VSX |
837 | case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: | 919 | case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: |
838 | if (cpu_has_feature(CPU_FTR_VSX)) { | 920 | if (cpu_has_feature(CPU_FTR_VSX)) { |
@@ -880,6 +962,20 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | |||
880 | len -= len % sizeof(struct dtl_entry); | 962 | len -= len % sizeof(struct dtl_entry); |
881 | r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); | 963 | r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); |
882 | break; | 964 | break; |
965 | case KVM_REG_PPC_TB_OFFSET: | ||
966 | /* round up to multiple of 2^24 */ | ||
967 | vcpu->arch.vcore->tb_offset = | ||
968 | ALIGN(set_reg_val(id, *val), 1UL << 24); | ||
969 | break; | ||
970 | case KVM_REG_PPC_LPCR: | ||
971 | kvmppc_set_lpcr(vcpu, set_reg_val(id, *val)); | ||
972 | break; | ||
973 | case KVM_REG_PPC_PPR: | ||
974 | vcpu->arch.ppr = set_reg_val(id, *val); | ||
975 | break; | ||
976 | case KVM_REG_PPC_ARCH_COMPAT: | ||
977 | r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); | ||
978 | break; | ||
883 | default: | 979 | default: |
884 | r = -EINVAL; | 980 | r = -EINVAL; |
885 | break; | 981 | break; |
@@ -888,14 +984,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | |||
888 | return r; | 984 | return r; |
889 | } | 985 | } |
890 | 986 | ||
891 | int kvmppc_core_check_processor_compat(void) | 987 | static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, |
892 | { | 988 | unsigned int id) |
893 | if (cpu_has_feature(CPU_FTR_HVMODE)) | ||
894 | return 0; | ||
895 | return -EIO; | ||
896 | } | ||
897 | |||
898 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | ||
899 | { | 989 | { |
900 | struct kvm_vcpu *vcpu; | 990 | struct kvm_vcpu *vcpu; |
901 | int err = -EINVAL; | 991 | int err = -EINVAL; |
@@ -919,8 +1009,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
919 | vcpu->arch.mmcr[0] = MMCR0_FC; | 1009 | vcpu->arch.mmcr[0] = MMCR0_FC; |
920 | vcpu->arch.ctrl = CTRL_RUNLATCH; | 1010 | vcpu->arch.ctrl = CTRL_RUNLATCH; |
921 | /* default to host PVR, since we can't spoof it */ | 1011 | /* default to host PVR, since we can't spoof it */ |
922 | vcpu->arch.pvr = mfspr(SPRN_PVR); | 1012 | kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); |
923 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | ||
924 | spin_lock_init(&vcpu->arch.vpa_update_lock); | 1013 | spin_lock_init(&vcpu->arch.vpa_update_lock); |
925 | spin_lock_init(&vcpu->arch.tbacct_lock); | 1014 | spin_lock_init(&vcpu->arch.tbacct_lock); |
926 | vcpu->arch.busy_preempt = TB_NIL; | 1015 | vcpu->arch.busy_preempt = TB_NIL; |
@@ -940,6 +1029,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
940 | spin_lock_init(&vcore->lock); | 1029 | spin_lock_init(&vcore->lock); |
941 | init_waitqueue_head(&vcore->wq); | 1030 | init_waitqueue_head(&vcore->wq); |
942 | vcore->preempt_tb = TB_NIL; | 1031 | vcore->preempt_tb = TB_NIL; |
1032 | vcore->lpcr = kvm->arch.lpcr; | ||
943 | } | 1033 | } |
944 | kvm->arch.vcores[core] = vcore; | 1034 | kvm->arch.vcores[core] = vcore; |
945 | kvm->arch.online_vcores++; | 1035 | kvm->arch.online_vcores++; |
@@ -972,7 +1062,7 @@ static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa) | |||
972 | vpa->dirty); | 1062 | vpa->dirty); |
973 | } | 1063 | } |
974 | 1064 | ||
975 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | 1065 | static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) |
976 | { | 1066 | { |
977 | spin_lock(&vcpu->arch.vpa_update_lock); | 1067 | spin_lock(&vcpu->arch.vpa_update_lock); |
978 | unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); | 1068 | unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); |
@@ -983,6 +1073,12 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
983 | kmem_cache_free(kvm_vcpu_cache, vcpu); | 1073 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
984 | } | 1074 | } |
985 | 1075 | ||
1076 | static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) | ||
1077 | { | ||
1078 | /* Indicate we want to get back into the guest */ | ||
1079 | return 1; | ||
1080 | } | ||
1081 | |||
986 | static void kvmppc_set_timer(struct kvm_vcpu *vcpu) | 1082 | static void kvmppc_set_timer(struct kvm_vcpu *vcpu) |
987 | { | 1083 | { |
988 | unsigned long dec_nsec, now; | 1084 | unsigned long dec_nsec, now; |
@@ -1264,8 +1360,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
1264 | 1360 | ||
1265 | ret = RESUME_GUEST; | 1361 | ret = RESUME_GUEST; |
1266 | if (vcpu->arch.trap) | 1362 | if (vcpu->arch.trap) |
1267 | ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu, | 1363 | ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, |
1268 | vcpu->arch.run_task); | 1364 | vcpu->arch.run_task); |
1269 | 1365 | ||
1270 | vcpu->arch.ret = ret; | 1366 | vcpu->arch.ret = ret; |
1271 | vcpu->arch.trap = 0; | 1367 | vcpu->arch.trap = 0; |
@@ -1424,7 +1520,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1424 | return vcpu->arch.ret; | 1520 | return vcpu->arch.ret; |
1425 | } | 1521 | } |
1426 | 1522 | ||
1427 | int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | 1523 | static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) |
1428 | { | 1524 | { |
1429 | int r; | 1525 | int r; |
1430 | int srcu_idx; | 1526 | int srcu_idx; |
@@ -1546,7 +1642,8 @@ static const struct file_operations kvm_rma_fops = { | |||
1546 | .release = kvm_rma_release, | 1642 | .release = kvm_rma_release, |
1547 | }; | 1643 | }; |
1548 | 1644 | ||
1549 | long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) | 1645 | static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, |
1646 | struct kvm_allocate_rma *ret) | ||
1550 | { | 1647 | { |
1551 | long fd; | 1648 | long fd; |
1552 | struct kvm_rma_info *ri; | 1649 | struct kvm_rma_info *ri; |
@@ -1592,7 +1689,8 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, | |||
1592 | (*sps)++; | 1689 | (*sps)++; |
1593 | } | 1690 | } |
1594 | 1691 | ||
1595 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | 1692 | static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, |
1693 | struct kvm_ppc_smmu_info *info) | ||
1596 | { | 1694 | { |
1597 | struct kvm_ppc_one_seg_page_size *sps; | 1695 | struct kvm_ppc_one_seg_page_size *sps; |
1598 | 1696 | ||
@@ -1613,7 +1711,8 @@ int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | |||
1613 | /* | 1711 | /* |
1614 | * Get (and clear) the dirty memory log for a memory slot. | 1712 | * Get (and clear) the dirty memory log for a memory slot. |
1615 | */ | 1713 | */ |
1616 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | 1714 | static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, |
1715 | struct kvm_dirty_log *log) | ||
1617 | { | 1716 | { |
1618 | struct kvm_memory_slot *memslot; | 1717 | struct kvm_memory_slot *memslot; |
1619 | int r; | 1718 | int r; |
@@ -1667,8 +1766,8 @@ static void unpin_slot(struct kvm_memory_slot *memslot) | |||
1667 | } | 1766 | } |
1668 | } | 1767 | } |
1669 | 1768 | ||
1670 | void kvmppc_core_free_memslot(struct kvm_memory_slot *free, | 1769 | static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free, |
1671 | struct kvm_memory_slot *dont) | 1770 | struct kvm_memory_slot *dont) |
1672 | { | 1771 | { |
1673 | if (!dont || free->arch.rmap != dont->arch.rmap) { | 1772 | if (!dont || free->arch.rmap != dont->arch.rmap) { |
1674 | vfree(free->arch.rmap); | 1773 | vfree(free->arch.rmap); |
@@ -1681,8 +1780,8 @@ void kvmppc_core_free_memslot(struct kvm_memory_slot *free, | |||
1681 | } | 1780 | } |
1682 | } | 1781 | } |
1683 | 1782 | ||
1684 | int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, | 1783 | static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot, |
1685 | unsigned long npages) | 1784 | unsigned long npages) |
1686 | { | 1785 | { |
1687 | slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); | 1786 | slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); |
1688 | if (!slot->arch.rmap) | 1787 | if (!slot->arch.rmap) |
@@ -1692,9 +1791,9 @@ int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, | |||
1692 | return 0; | 1791 | return 0; |
1693 | } | 1792 | } |
1694 | 1793 | ||
1695 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 1794 | static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, |
1696 | struct kvm_memory_slot *memslot, | 1795 | struct kvm_memory_slot *memslot, |
1697 | struct kvm_userspace_memory_region *mem) | 1796 | struct kvm_userspace_memory_region *mem) |
1698 | { | 1797 | { |
1699 | unsigned long *phys; | 1798 | unsigned long *phys; |
1700 | 1799 | ||
@@ -1710,9 +1809,9 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, | |||
1710 | return 0; | 1809 | return 0; |
1711 | } | 1810 | } |
1712 | 1811 | ||
1713 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | 1812 | static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, |
1714 | struct kvm_userspace_memory_region *mem, | 1813 | struct kvm_userspace_memory_region *mem, |
1715 | const struct kvm_memory_slot *old) | 1814 | const struct kvm_memory_slot *old) |
1716 | { | 1815 | { |
1717 | unsigned long npages = mem->memory_size >> PAGE_SHIFT; | 1816 | unsigned long npages = mem->memory_size >> PAGE_SHIFT; |
1718 | struct kvm_memory_slot *memslot; | 1817 | struct kvm_memory_slot *memslot; |
@@ -1729,6 +1828,37 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, | |||
1729 | } | 1828 | } |
1730 | } | 1829 | } |
1731 | 1830 | ||
1831 | /* | ||
1832 | * Update LPCR values in kvm->arch and in vcores. | ||
1833 | * Caller must hold kvm->lock. | ||
1834 | */ | ||
1835 | void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) | ||
1836 | { | ||
1837 | long int i; | ||
1838 | u32 cores_done = 0; | ||
1839 | |||
1840 | if ((kvm->arch.lpcr & mask) == lpcr) | ||
1841 | return; | ||
1842 | |||
1843 | kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; | ||
1844 | |||
1845 | for (i = 0; i < KVM_MAX_VCORES; ++i) { | ||
1846 | struct kvmppc_vcore *vc = kvm->arch.vcores[i]; | ||
1847 | if (!vc) | ||
1848 | continue; | ||
1849 | spin_lock(&vc->lock); | ||
1850 | vc->lpcr = (vc->lpcr & ~mask) | lpcr; | ||
1851 | spin_unlock(&vc->lock); | ||
1852 | if (++cores_done >= kvm->arch.online_vcores) | ||
1853 | break; | ||
1854 | } | ||
1855 | } | ||
1856 | |||
1857 | static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu) | ||
1858 | { | ||
1859 | return; | ||
1860 | } | ||
1861 | |||
1732 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) | 1862 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) |
1733 | { | 1863 | { |
1734 | int err = 0; | 1864 | int err = 0; |
@@ -1737,7 +1867,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) | |||
1737 | unsigned long hva; | 1867 | unsigned long hva; |
1738 | struct kvm_memory_slot *memslot; | 1868 | struct kvm_memory_slot *memslot; |
1739 | struct vm_area_struct *vma; | 1869 | struct vm_area_struct *vma; |
1740 | unsigned long lpcr, senc; | 1870 | unsigned long lpcr = 0, senc; |
1871 | unsigned long lpcr_mask = 0; | ||
1741 | unsigned long psize, porder; | 1872 | unsigned long psize, porder; |
1742 | unsigned long rma_size; | 1873 | unsigned long rma_size; |
1743 | unsigned long rmls; | 1874 | unsigned long rmls; |
@@ -1802,9 +1933,9 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) | |||
1802 | senc = slb_pgsize_encoding(psize); | 1933 | senc = slb_pgsize_encoding(psize); |
1803 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | | 1934 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | |
1804 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | 1935 | (VRMA_VSID << SLB_VSID_SHIFT_1T); |
1805 | lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; | 1936 | lpcr_mask = LPCR_VRMASD; |
1806 | lpcr |= senc << (LPCR_VRMASD_SH - 4); | 1937 | /* the -4 is to account for senc values starting at 0x10 */ |
1807 | kvm->arch.lpcr = lpcr; | 1938 | lpcr = senc << (LPCR_VRMASD_SH - 4); |
1808 | 1939 | ||
1809 | /* Create HPTEs in the hash page table for the VRMA */ | 1940 | /* Create HPTEs in the hash page table for the VRMA */ |
1810 | kvmppc_map_vrma(vcpu, memslot, porder); | 1941 | kvmppc_map_vrma(vcpu, memslot, porder); |
@@ -1825,23 +1956,21 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) | |||
1825 | kvm->arch.rma = ri; | 1956 | kvm->arch.rma = ri; |
1826 | 1957 | ||
1827 | /* Update LPCR and RMOR */ | 1958 | /* Update LPCR and RMOR */ |
1828 | lpcr = kvm->arch.lpcr; | ||
1829 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { | 1959 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { |
1830 | /* PPC970; insert RMLS value (split field) in HID4 */ | 1960 | /* PPC970; insert RMLS value (split field) in HID4 */ |
1831 | lpcr &= ~((1ul << HID4_RMLS0_SH) | | 1961 | lpcr_mask = (1ul << HID4_RMLS0_SH) | |
1832 | (3ul << HID4_RMLS2_SH)); | 1962 | (3ul << HID4_RMLS2_SH) | HID4_RMOR; |
1833 | lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) | | 1963 | lpcr = ((rmls >> 2) << HID4_RMLS0_SH) | |
1834 | ((rmls & 3) << HID4_RMLS2_SH); | 1964 | ((rmls & 3) << HID4_RMLS2_SH); |
1835 | /* RMOR is also in HID4 */ | 1965 | /* RMOR is also in HID4 */ |
1836 | lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff) | 1966 | lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff) |
1837 | << HID4_RMOR_SH; | 1967 | << HID4_RMOR_SH; |
1838 | } else { | 1968 | } else { |
1839 | /* POWER7 */ | 1969 | /* POWER7 */ |
1840 | lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L); | 1970 | lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS; |
1841 | lpcr |= rmls << LPCR_RMLS_SH; | 1971 | lpcr = rmls << LPCR_RMLS_SH; |
1842 | kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT; | 1972 | kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT; |
1843 | } | 1973 | } |
1844 | kvm->arch.lpcr = lpcr; | ||
1845 | pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", | 1974 | pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", |
1846 | ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); | 1975 | ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); |
1847 | 1976 | ||
@@ -1860,6 +1989,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) | |||
1860 | } | 1989 | } |
1861 | } | 1990 | } |
1862 | 1991 | ||
1992 | kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); | ||
1993 | |||
1863 | /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ | 1994 | /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ |
1864 | smp_wmb(); | 1995 | smp_wmb(); |
1865 | kvm->arch.rma_setup_done = 1; | 1996 | kvm->arch.rma_setup_done = 1; |
@@ -1875,7 +2006,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) | |||
1875 | goto out_srcu; | 2006 | goto out_srcu; |
1876 | } | 2007 | } |
1877 | 2008 | ||
1878 | int kvmppc_core_init_vm(struct kvm *kvm) | 2009 | static int kvmppc_core_init_vm_hv(struct kvm *kvm) |
1879 | { | 2010 | { |
1880 | unsigned long lpcr, lpid; | 2011 | unsigned long lpcr, lpid; |
1881 | 2012 | ||
@@ -1893,9 +2024,6 @@ int kvmppc_core_init_vm(struct kvm *kvm) | |||
1893 | */ | 2024 | */ |
1894 | cpumask_setall(&kvm->arch.need_tlb_flush); | 2025 | cpumask_setall(&kvm->arch.need_tlb_flush); |
1895 | 2026 | ||
1896 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); | ||
1897 | INIT_LIST_HEAD(&kvm->arch.rtas_tokens); | ||
1898 | |||
1899 | kvm->arch.rma = NULL; | 2027 | kvm->arch.rma = NULL; |
1900 | 2028 | ||
1901 | kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); | 2029 | kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); |
@@ -1931,61 +2059,162 @@ int kvmppc_core_init_vm(struct kvm *kvm) | |||
1931 | return 0; | 2059 | return 0; |
1932 | } | 2060 | } |
1933 | 2061 | ||
1934 | void kvmppc_core_destroy_vm(struct kvm *kvm) | 2062 | static void kvmppc_free_vcores(struct kvm *kvm) |
2063 | { | ||
2064 | long int i; | ||
2065 | |||
2066 | for (i = 0; i < KVM_MAX_VCORES; ++i) | ||
2067 | kfree(kvm->arch.vcores[i]); | ||
2068 | kvm->arch.online_vcores = 0; | ||
2069 | } | ||
2070 | |||
2071 | static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) | ||
1935 | { | 2072 | { |
1936 | uninhibit_secondary_onlining(); | 2073 | uninhibit_secondary_onlining(); |
1937 | 2074 | ||
2075 | kvmppc_free_vcores(kvm); | ||
1938 | if (kvm->arch.rma) { | 2076 | if (kvm->arch.rma) { |
1939 | kvm_release_rma(kvm->arch.rma); | 2077 | kvm_release_rma(kvm->arch.rma); |
1940 | kvm->arch.rma = NULL; | 2078 | kvm->arch.rma = NULL; |
1941 | } | 2079 | } |
1942 | 2080 | ||
1943 | kvmppc_rtas_tokens_free(kvm); | ||
1944 | |||
1945 | kvmppc_free_hpt(kvm); | 2081 | kvmppc_free_hpt(kvm); |
1946 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | ||
1947 | } | 2082 | } |
1948 | 2083 | ||
1949 | /* These are stubs for now */ | 2084 | /* We don't need to emulate any privileged instructions or dcbz */ |
1950 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) | 2085 | static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, |
2086 | unsigned int inst, int *advance) | ||
1951 | { | 2087 | { |
2088 | return EMULATE_FAIL; | ||
1952 | } | 2089 | } |
1953 | 2090 | ||
1954 | /* We don't need to emulate any privileged instructions or dcbz */ | 2091 | static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn, |
1955 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 2092 | ulong spr_val) |
1956 | unsigned int inst, int *advance) | ||
1957 | { | 2093 | { |
1958 | return EMULATE_FAIL; | 2094 | return EMULATE_FAIL; |
1959 | } | 2095 | } |
1960 | 2096 | ||
1961 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | 2097 | static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, |
2098 | ulong *spr_val) | ||
1962 | { | 2099 | { |
1963 | return EMULATE_FAIL; | 2100 | return EMULATE_FAIL; |
1964 | } | 2101 | } |
1965 | 2102 | ||
1966 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) | 2103 | static int kvmppc_core_check_processor_compat_hv(void) |
1967 | { | 2104 | { |
1968 | return EMULATE_FAIL; | 2105 | if (!cpu_has_feature(CPU_FTR_HVMODE)) |
2106 | return -EIO; | ||
2107 | return 0; | ||
1969 | } | 2108 | } |
1970 | 2109 | ||
1971 | static int kvmppc_book3s_hv_init(void) | 2110 | static long kvm_arch_vm_ioctl_hv(struct file *filp, |
2111 | unsigned int ioctl, unsigned long arg) | ||
1972 | { | 2112 | { |
1973 | int r; | 2113 | struct kvm *kvm __maybe_unused = filp->private_data; |
2114 | void __user *argp = (void __user *)arg; | ||
2115 | long r; | ||
1974 | 2116 | ||
1975 | r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | 2117 | switch (ioctl) { |
1976 | 2118 | ||
1977 | if (r) | 2119 | case KVM_ALLOCATE_RMA: { |
2120 | struct kvm_allocate_rma rma; | ||
2121 | struct kvm *kvm = filp->private_data; | ||
2122 | |||
2123 | r = kvm_vm_ioctl_allocate_rma(kvm, &rma); | ||
2124 | if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) | ||
2125 | r = -EFAULT; | ||
2126 | break; | ||
2127 | } | ||
2128 | |||
2129 | case KVM_PPC_ALLOCATE_HTAB: { | ||
2130 | u32 htab_order; | ||
2131 | |||
2132 | r = -EFAULT; | ||
2133 | if (get_user(htab_order, (u32 __user *)argp)) | ||
2134 | break; | ||
2135 | r = kvmppc_alloc_reset_hpt(kvm, &htab_order); | ||
2136 | if (r) | ||
2137 | break; | ||
2138 | r = -EFAULT; | ||
2139 | if (put_user(htab_order, (u32 __user *)argp)) | ||
2140 | break; | ||
2141 | r = 0; | ||
2142 | break; | ||
2143 | } | ||
2144 | |||
2145 | case KVM_PPC_GET_HTAB_FD: { | ||
2146 | struct kvm_get_htab_fd ghf; | ||
2147 | |||
2148 | r = -EFAULT; | ||
2149 | if (copy_from_user(&ghf, argp, sizeof(ghf))) | ||
2150 | break; | ||
2151 | r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); | ||
2152 | break; | ||
2153 | } | ||
2154 | |||
2155 | default: | ||
2156 | r = -ENOTTY; | ||
2157 | } | ||
2158 | |||
2159 | return r; | ||
2160 | } | ||
2161 | |||
2162 | static struct kvmppc_ops kvm_ops_hv = { | ||
2163 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, | ||
2164 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, | ||
2165 | .get_one_reg = kvmppc_get_one_reg_hv, | ||
2166 | .set_one_reg = kvmppc_set_one_reg_hv, | ||
2167 | .vcpu_load = kvmppc_core_vcpu_load_hv, | ||
2168 | .vcpu_put = kvmppc_core_vcpu_put_hv, | ||
2169 | .set_msr = kvmppc_set_msr_hv, | ||
2170 | .vcpu_run = kvmppc_vcpu_run_hv, | ||
2171 | .vcpu_create = kvmppc_core_vcpu_create_hv, | ||
2172 | .vcpu_free = kvmppc_core_vcpu_free_hv, | ||
2173 | .check_requests = kvmppc_core_check_requests_hv, | ||
2174 | .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv, | ||
2175 | .flush_memslot = kvmppc_core_flush_memslot_hv, | ||
2176 | .prepare_memory_region = kvmppc_core_prepare_memory_region_hv, | ||
2177 | .commit_memory_region = kvmppc_core_commit_memory_region_hv, | ||
2178 | .unmap_hva = kvm_unmap_hva_hv, | ||
2179 | .unmap_hva_range = kvm_unmap_hva_range_hv, | ||
2180 | .age_hva = kvm_age_hva_hv, | ||
2181 | .test_age_hva = kvm_test_age_hva_hv, | ||
2182 | .set_spte_hva = kvm_set_spte_hva_hv, | ||
2183 | .mmu_destroy = kvmppc_mmu_destroy_hv, | ||
2184 | .free_memslot = kvmppc_core_free_memslot_hv, | ||
2185 | .create_memslot = kvmppc_core_create_memslot_hv, | ||
2186 | .init_vm = kvmppc_core_init_vm_hv, | ||
2187 | .destroy_vm = kvmppc_core_destroy_vm_hv, | ||
2188 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv, | ||
2189 | .emulate_op = kvmppc_core_emulate_op_hv, | ||
2190 | .emulate_mtspr = kvmppc_core_emulate_mtspr_hv, | ||
2191 | .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, | ||
2192 | .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, | ||
2193 | .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, | ||
2194 | }; | ||
2195 | |||
2196 | static int kvmppc_book3s_init_hv(void) | ||
2197 | { | ||
2198 | int r; | ||
2199 | /* | ||
2200 | * FIXME!! Do we need to check on all cpus ? | ||
2201 | */ | ||
2202 | r = kvmppc_core_check_processor_compat_hv(); | ||
2203 | if (r < 0) | ||
1978 | return r; | 2204 | return r; |
1979 | 2205 | ||
1980 | r = kvmppc_mmu_hv_init(); | 2206 | kvm_ops_hv.owner = THIS_MODULE; |
2207 | kvmppc_hv_ops = &kvm_ops_hv; | ||
1981 | 2208 | ||
2209 | r = kvmppc_mmu_hv_init(); | ||
1982 | return r; | 2210 | return r; |
1983 | } | 2211 | } |
1984 | 2212 | ||
1985 | static void kvmppc_book3s_hv_exit(void) | 2213 | static void kvmppc_book3s_exit_hv(void) |
1986 | { | 2214 | { |
1987 | kvm_exit(); | 2215 | kvmppc_hv_ops = NULL; |
1988 | } | 2216 | } |
1989 | 2217 | ||
1990 | module_init(kvmppc_book3s_hv_init); | 2218 | module_init(kvmppc_book3s_init_hv); |
1991 | module_exit(kvmppc_book3s_hv_exit); | 2219 | module_exit(kvmppc_book3s_exit_hv); |
2220 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 37f1cc417ca0..928142c64cb0 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S | |||
@@ -158,9 +158,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
158 | * Interrupts are enabled again at this point. | 158 | * Interrupts are enabled again at this point. |
159 | */ | 159 | */ |
160 | 160 | ||
161 | .global kvmppc_handler_highmem | ||
162 | kvmppc_handler_highmem: | ||
163 | |||
164 | /* | 161 | /* |
165 | * Register usage at this point: | 162 | * Register usage at this point: |
166 | * | 163 | * |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index c71103b8a748..bc8de75b1925 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -33,30 +33,6 @@ | |||
33 | #error Need to fix lppaca and SLB shadow accesses in little endian mode | 33 | #error Need to fix lppaca and SLB shadow accesses in little endian mode |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | /***************************************************************************** | ||
37 | * * | ||
38 | * Real Mode handlers that need to be in the linear mapping * | ||
39 | * * | ||
40 | ****************************************************************************/ | ||
41 | |||
42 | .globl kvmppc_skip_interrupt | ||
43 | kvmppc_skip_interrupt: | ||
44 | mfspr r13,SPRN_SRR0 | ||
45 | addi r13,r13,4 | ||
46 | mtspr SPRN_SRR0,r13 | ||
47 | GET_SCRATCH0(r13) | ||
48 | rfid | ||
49 | b . | ||
50 | |||
51 | .globl kvmppc_skip_Hinterrupt | ||
52 | kvmppc_skip_Hinterrupt: | ||
53 | mfspr r13,SPRN_HSRR0 | ||
54 | addi r13,r13,4 | ||
55 | mtspr SPRN_HSRR0,r13 | ||
56 | GET_SCRATCH0(r13) | ||
57 | hrfid | ||
58 | b . | ||
59 | |||
60 | /* | 36 | /* |
61 | * Call kvmppc_hv_entry in real mode. | 37 | * Call kvmppc_hv_entry in real mode. |
62 | * Must be called with interrupts hard-disabled. | 38 | * Must be called with interrupts hard-disabled. |
@@ -66,8 +42,11 @@ kvmppc_skip_Hinterrupt: | |||
66 | * LR = return address to continue at after eventually re-enabling MMU | 42 | * LR = return address to continue at after eventually re-enabling MMU |
67 | */ | 43 | */ |
68 | _GLOBAL(kvmppc_hv_entry_trampoline) | 44 | _GLOBAL(kvmppc_hv_entry_trampoline) |
45 | mflr r0 | ||
46 | std r0, PPC_LR_STKOFF(r1) | ||
47 | stdu r1, -112(r1) | ||
69 | mfmsr r10 | 48 | mfmsr r10 |
70 | LOAD_REG_ADDR(r5, kvmppc_hv_entry) | 49 | LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) |
71 | li r0,MSR_RI | 50 | li r0,MSR_RI |
72 | andc r0,r10,r0 | 51 | andc r0,r10,r0 |
73 | li r6,MSR_IR | MSR_DR | 52 | li r6,MSR_IR | MSR_DR |
@@ -77,11 +56,103 @@ _GLOBAL(kvmppc_hv_entry_trampoline) | |||
77 | mtsrr1 r6 | 56 | mtsrr1 r6 |
78 | RFI | 57 | RFI |
79 | 58 | ||
80 | /****************************************************************************** | 59 | kvmppc_call_hv_entry: |
81 | * * | 60 | bl kvmppc_hv_entry |
82 | * Entry code * | 61 | |
83 | * * | 62 | /* Back from guest - restore host state and return to caller */ |
84 | *****************************************************************************/ | 63 | |
64 | /* Restore host DABR and DABRX */ | ||
65 | ld r5,HSTATE_DABR(r13) | ||
66 | li r6,7 | ||
67 | mtspr SPRN_DABR,r5 | ||
68 | mtspr SPRN_DABRX,r6 | ||
69 | |||
70 | /* Restore SPRG3 */ | ||
71 | ld r3,PACA_SPRG3(r13) | ||
72 | mtspr SPRN_SPRG3,r3 | ||
73 | |||
74 | /* | ||
75 | * Reload DEC. HDEC interrupts were disabled when | ||
76 | * we reloaded the host's LPCR value. | ||
77 | */ | ||
78 | ld r3, HSTATE_DECEXP(r13) | ||
79 | mftb r4 | ||
80 | subf r4, r4, r3 | ||
81 | mtspr SPRN_DEC, r4 | ||
82 | |||
83 | /* Reload the host's PMU registers */ | ||
84 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ | ||
85 | lbz r4, LPPACA_PMCINUSE(r3) | ||
86 | cmpwi r4, 0 | ||
87 | beq 23f /* skip if not */ | ||
88 | lwz r3, HSTATE_PMC(r13) | ||
89 | lwz r4, HSTATE_PMC + 4(r13) | ||
90 | lwz r5, HSTATE_PMC + 8(r13) | ||
91 | lwz r6, HSTATE_PMC + 12(r13) | ||
92 | lwz r8, HSTATE_PMC + 16(r13) | ||
93 | lwz r9, HSTATE_PMC + 20(r13) | ||
94 | BEGIN_FTR_SECTION | ||
95 | lwz r10, HSTATE_PMC + 24(r13) | ||
96 | lwz r11, HSTATE_PMC + 28(r13) | ||
97 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | ||
98 | mtspr SPRN_PMC1, r3 | ||
99 | mtspr SPRN_PMC2, r4 | ||
100 | mtspr SPRN_PMC3, r5 | ||
101 | mtspr SPRN_PMC4, r6 | ||
102 | mtspr SPRN_PMC5, r8 | ||
103 | mtspr SPRN_PMC6, r9 | ||
104 | BEGIN_FTR_SECTION | ||
105 | mtspr SPRN_PMC7, r10 | ||
106 | mtspr SPRN_PMC8, r11 | ||
107 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | ||
108 | ld r3, HSTATE_MMCR(r13) | ||
109 | ld r4, HSTATE_MMCR + 8(r13) | ||
110 | ld r5, HSTATE_MMCR + 16(r13) | ||
111 | mtspr SPRN_MMCR1, r4 | ||
112 | mtspr SPRN_MMCRA, r5 | ||
113 | mtspr SPRN_MMCR0, r3 | ||
114 | isync | ||
115 | 23: | ||
116 | |||
117 | /* | ||
118 | * For external and machine check interrupts, we need | ||
119 | * to call the Linux handler to process the interrupt. | ||
120 | * We do that by jumping to absolute address 0x500 for | ||
121 | * external interrupts, or the machine_check_fwnmi label | ||
122 | * for machine checks (since firmware might have patched | ||
123 | * the vector area at 0x200). The [h]rfid at the end of the | ||
124 | * handler will return to the book3s_hv_interrupts.S code. | ||
125 | * For other interrupts we do the rfid to get back | ||
126 | * to the book3s_hv_interrupts.S code here. | ||
127 | */ | ||
128 | ld r8, 112+PPC_LR_STKOFF(r1) | ||
129 | addi r1, r1, 112 | ||
130 | ld r7, HSTATE_HOST_MSR(r13) | ||
131 | |||
132 | cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK | ||
133 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | ||
134 | BEGIN_FTR_SECTION | ||
135 | beq 11f | ||
136 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | ||
137 | |||
138 | /* RFI into the highmem handler, or branch to interrupt handler */ | ||
139 | mfmsr r6 | ||
140 | li r0, MSR_RI | ||
141 | andc r6, r6, r0 | ||
142 | mtmsrd r6, 1 /* Clear RI in MSR */ | ||
143 | mtsrr0 r8 | ||
144 | mtsrr1 r7 | ||
145 | beqa 0x500 /* external interrupt (PPC970) */ | ||
146 | beq cr1, 13f /* machine check */ | ||
147 | RFI | ||
148 | |||
149 | /* On POWER7, we have external interrupts set to use HSRR0/1 */ | ||
150 | 11: mtspr SPRN_HSRR0, r8 | ||
151 | mtspr SPRN_HSRR1, r7 | ||
152 | ba 0x500 | ||
153 | |||
154 | 13: b machine_check_fwnmi | ||
155 | |||
85 | 156 | ||
86 | /* | 157 | /* |
87 | * We come in here when wakened from nap mode on a secondary hw thread. | 158 | * We come in here when wakened from nap mode on a secondary hw thread. |
@@ -137,7 +208,7 @@ kvm_start_guest: | |||
137 | cmpdi r4,0 | 208 | cmpdi r4,0 |
138 | /* if we have no vcpu to run, go back to sleep */ | 209 | /* if we have no vcpu to run, go back to sleep */ |
139 | beq kvm_no_guest | 210 | beq kvm_no_guest |
140 | b kvmppc_hv_entry | 211 | b 30f |
141 | 212 | ||
142 | 27: /* XXX should handle hypervisor maintenance interrupts etc. here */ | 213 | 27: /* XXX should handle hypervisor maintenance interrupts etc. here */ |
143 | b kvm_no_guest | 214 | b kvm_no_guest |
@@ -147,6 +218,57 @@ kvm_start_guest: | |||
147 | stw r8,HSTATE_SAVED_XIRR(r13) | 218 | stw r8,HSTATE_SAVED_XIRR(r13) |
148 | b kvm_no_guest | 219 | b kvm_no_guest |
149 | 220 | ||
221 | 30: bl kvmppc_hv_entry | ||
222 | |||
223 | /* Back from the guest, go back to nap */ | ||
224 | /* Clear our vcpu pointer so we don't come back in early */ | ||
225 | li r0, 0 | ||
226 | std r0, HSTATE_KVM_VCPU(r13) | ||
227 | lwsync | ||
228 | /* Clear any pending IPI - we're an offline thread */ | ||
229 | ld r5, HSTATE_XICS_PHYS(r13) | ||
230 | li r7, XICS_XIRR | ||
231 | lwzcix r3, r5, r7 /* ack any pending interrupt */ | ||
232 | rlwinm. r0, r3, 0, 0xffffff /* any pending? */ | ||
233 | beq 37f | ||
234 | sync | ||
235 | li r0, 0xff | ||
236 | li r6, XICS_MFRR | ||
237 | stbcix r0, r5, r6 /* clear the IPI */ | ||
238 | stwcix r3, r5, r7 /* EOI it */ | ||
239 | 37: sync | ||
240 | |||
241 | /* increment the nap count and then go to nap mode */ | ||
242 | ld r4, HSTATE_KVM_VCORE(r13) | ||
243 | addi r4, r4, VCORE_NAP_COUNT | ||
244 | lwsync /* make previous updates visible */ | ||
245 | 51: lwarx r3, 0, r4 | ||
246 | addi r3, r3, 1 | ||
247 | stwcx. r3, 0, r4 | ||
248 | bne 51b | ||
249 | |||
250 | kvm_no_guest: | ||
251 | li r0, KVM_HWTHREAD_IN_NAP | ||
252 | stb r0, HSTATE_HWTHREAD_STATE(r13) | ||
253 | li r3, LPCR_PECE0 | ||
254 | mfspr r4, SPRN_LPCR | ||
255 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 | ||
256 | mtspr SPRN_LPCR, r4 | ||
257 | isync | ||
258 | std r0, HSTATE_SCRATCH0(r13) | ||
259 | ptesync | ||
260 | ld r0, HSTATE_SCRATCH0(r13) | ||
261 | 1: cmpd r0, r0 | ||
262 | bne 1b | ||
263 | nap | ||
264 | b . | ||
265 | |||
266 | /****************************************************************************** | ||
267 | * * | ||
268 | * Entry code * | ||
269 | * * | ||
270 | *****************************************************************************/ | ||
271 | |||
150 | .global kvmppc_hv_entry | 272 | .global kvmppc_hv_entry |
151 | kvmppc_hv_entry: | 273 | kvmppc_hv_entry: |
152 | 274 | ||
@@ -159,7 +281,8 @@ kvmppc_hv_entry: | |||
159 | * all other volatile GPRS = free | 281 | * all other volatile GPRS = free |
160 | */ | 282 | */ |
161 | mflr r0 | 283 | mflr r0 |
162 | std r0, HSTATE_VMHANDLER(r13) | 284 | std r0, PPC_LR_STKOFF(r1) |
285 | stdu r1, -112(r1) | ||
163 | 286 | ||
164 | /* Set partition DABR */ | 287 | /* Set partition DABR */ |
165 | /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ | 288 | /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ |
@@ -200,8 +323,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
200 | ld r3, VCPU_MMCR(r4) | 323 | ld r3, VCPU_MMCR(r4) |
201 | ld r5, VCPU_MMCR + 8(r4) | 324 | ld r5, VCPU_MMCR + 8(r4) |
202 | ld r6, VCPU_MMCR + 16(r4) | 325 | ld r6, VCPU_MMCR + 16(r4) |
326 | ld r7, VCPU_SIAR(r4) | ||
327 | ld r8, VCPU_SDAR(r4) | ||
203 | mtspr SPRN_MMCR1, r5 | 328 | mtspr SPRN_MMCR1, r5 |
204 | mtspr SPRN_MMCRA, r6 | 329 | mtspr SPRN_MMCRA, r6 |
330 | mtspr SPRN_SIAR, r7 | ||
331 | mtspr SPRN_SDAR, r8 | ||
205 | mtspr SPRN_MMCR0, r3 | 332 | mtspr SPRN_MMCR0, r3 |
206 | isync | 333 | isync |
207 | 334 | ||
@@ -254,22 +381,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
254 | /* Save R1 in the PACA */ | 381 | /* Save R1 in the PACA */ |
255 | std r1, HSTATE_HOST_R1(r13) | 382 | std r1, HSTATE_HOST_R1(r13) |
256 | 383 | ||
257 | /* Increment yield count if they have a VPA */ | ||
258 | ld r3, VCPU_VPA(r4) | ||
259 | cmpdi r3, 0 | ||
260 | beq 25f | ||
261 | lwz r5, LPPACA_YIELDCOUNT(r3) | ||
262 | addi r5, r5, 1 | ||
263 | stw r5, LPPACA_YIELDCOUNT(r3) | ||
264 | li r6, 1 | ||
265 | stb r6, VCPU_VPA_DIRTY(r4) | ||
266 | 25: | ||
267 | /* Load up DAR and DSISR */ | 384 | /* Load up DAR and DSISR */ |
268 | ld r5, VCPU_DAR(r4) | 385 | ld r5, VCPU_DAR(r4) |
269 | lwz r6, VCPU_DSISR(r4) | 386 | lwz r6, VCPU_DSISR(r4) |
270 | mtspr SPRN_DAR, r5 | 387 | mtspr SPRN_DAR, r5 |
271 | mtspr SPRN_DSISR, r6 | 388 | mtspr SPRN_DSISR, r6 |
272 | 389 | ||
390 | li r6, KVM_GUEST_MODE_HOST_HV | ||
391 | stb r6, HSTATE_IN_GUEST(r13) | ||
392 | |||
273 | BEGIN_FTR_SECTION | 393 | BEGIN_FTR_SECTION |
274 | /* Restore AMR and UAMOR, set AMOR to all 1s */ | 394 | /* Restore AMR and UAMOR, set AMOR to all 1s */ |
275 | ld r5,VCPU_AMR(r4) | 395 | ld r5,VCPU_AMR(r4) |
@@ -343,7 +463,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
343 | bdnz 28b | 463 | bdnz 28b |
344 | ptesync | 464 | ptesync |
345 | 465 | ||
346 | 22: li r0,1 | 466 | /* Add timebase offset onto timebase */ |
467 | 22: ld r8,VCORE_TB_OFFSET(r5) | ||
468 | cmpdi r8,0 | ||
469 | beq 37f | ||
470 | mftb r6 /* current host timebase */ | ||
471 | add r8,r8,r6 | ||
472 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ | ||
473 | mftb r7 /* check if lower 24 bits overflowed */ | ||
474 | clrldi r6,r6,40 | ||
475 | clrldi r7,r7,40 | ||
476 | cmpld r7,r6 | ||
477 | bge 37f | ||
478 | addis r8,r8,0x100 /* if so, increment upper 40 bits */ | ||
479 | mtspr SPRN_TBU40,r8 | ||
480 | |||
481 | /* Load guest PCR value to select appropriate compat mode */ | ||
482 | 37: ld r7, VCORE_PCR(r5) | ||
483 | cmpdi r7, 0 | ||
484 | beq 38f | ||
485 | mtspr SPRN_PCR, r7 | ||
486 | 38: | ||
487 | li r0,1 | ||
347 | stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ | 488 | stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ |
348 | b 10f | 489 | b 10f |
349 | 490 | ||
@@ -353,12 +494,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
353 | beq 20b | 494 | beq 20b |
354 | 495 | ||
355 | /* Set LPCR and RMOR. */ | 496 | /* Set LPCR and RMOR. */ |
356 | 10: ld r8,KVM_LPCR(r9) | 497 | 10: ld r8,VCORE_LPCR(r5) |
357 | mtspr SPRN_LPCR,r8 | 498 | mtspr SPRN_LPCR,r8 |
358 | ld r8,KVM_RMOR(r9) | 499 | ld r8,KVM_RMOR(r9) |
359 | mtspr SPRN_RMOR,r8 | 500 | mtspr SPRN_RMOR,r8 |
360 | isync | 501 | isync |
361 | 502 | ||
503 | /* Increment yield count if they have a VPA */ | ||
504 | ld r3, VCPU_VPA(r4) | ||
505 | cmpdi r3, 0 | ||
506 | beq 25f | ||
507 | lwz r5, LPPACA_YIELDCOUNT(r3) | ||
508 | addi r5, r5, 1 | ||
509 | stw r5, LPPACA_YIELDCOUNT(r3) | ||
510 | li r6, 1 | ||
511 | stb r6, VCPU_VPA_DIRTY(r4) | ||
512 | 25: | ||
362 | /* Check if HDEC expires soon */ | 513 | /* Check if HDEC expires soon */ |
363 | mfspr r3,SPRN_HDEC | 514 | mfspr r3,SPRN_HDEC |
364 | cmpwi r3,10 | 515 | cmpwi r3,10 |
@@ -405,7 +556,8 @@ toc_tlbie_lock: | |||
405 | bne 24b | 556 | bne 24b |
406 | isync | 557 | isync |
407 | 558 | ||
408 | ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */ | 559 | ld r5,HSTATE_KVM_VCORE(r13) |
560 | ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */ | ||
409 | li r0,0x18f | 561 | li r0,0x18f |
410 | rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ | 562 | rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ |
411 | or r0,r7,r0 | 563 | or r0,r7,r0 |
@@ -541,7 +693,7 @@ fast_guest_return: | |||
541 | mtspr SPRN_HSRR1,r11 | 693 | mtspr SPRN_HSRR1,r11 |
542 | 694 | ||
543 | /* Activate guest mode, so faults get handled by KVM */ | 695 | /* Activate guest mode, so faults get handled by KVM */ |
544 | li r9, KVM_GUEST_MODE_GUEST | 696 | li r9, KVM_GUEST_MODE_GUEST_HV |
545 | stb r9, HSTATE_IN_GUEST(r13) | 697 | stb r9, HSTATE_IN_GUEST(r13) |
546 | 698 | ||
547 | /* Enter guest */ | 699 | /* Enter guest */ |
@@ -550,13 +702,15 @@ BEGIN_FTR_SECTION | |||
550 | ld r5, VCPU_CFAR(r4) | 702 | ld r5, VCPU_CFAR(r4) |
551 | mtspr SPRN_CFAR, r5 | 703 | mtspr SPRN_CFAR, r5 |
552 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | 704 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) |
705 | BEGIN_FTR_SECTION | ||
706 | ld r0, VCPU_PPR(r4) | ||
707 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | ||
553 | 708 | ||
554 | ld r5, VCPU_LR(r4) | 709 | ld r5, VCPU_LR(r4) |
555 | lwz r6, VCPU_CR(r4) | 710 | lwz r6, VCPU_CR(r4) |
556 | mtlr r5 | 711 | mtlr r5 |
557 | mtcr r6 | 712 | mtcr r6 |
558 | 713 | ||
559 | ld r0, VCPU_GPR(R0)(r4) | ||
560 | ld r1, VCPU_GPR(R1)(r4) | 714 | ld r1, VCPU_GPR(R1)(r4) |
561 | ld r2, VCPU_GPR(R2)(r4) | 715 | ld r2, VCPU_GPR(R2)(r4) |
562 | ld r3, VCPU_GPR(R3)(r4) | 716 | ld r3, VCPU_GPR(R3)(r4) |
@@ -570,6 +724,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |||
570 | ld r12, VCPU_GPR(R12)(r4) | 724 | ld r12, VCPU_GPR(R12)(r4) |
571 | ld r13, VCPU_GPR(R13)(r4) | 725 | ld r13, VCPU_GPR(R13)(r4) |
572 | 726 | ||
727 | BEGIN_FTR_SECTION | ||
728 | mtspr SPRN_PPR, r0 | ||
729 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | ||
730 | ld r0, VCPU_GPR(R0)(r4) | ||
573 | ld r4, VCPU_GPR(R4)(r4) | 731 | ld r4, VCPU_GPR(R4)(r4) |
574 | 732 | ||
575 | hrfid | 733 | hrfid |
@@ -584,8 +742,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |||
584 | /* | 742 | /* |
585 | * We come here from the first-level interrupt handlers. | 743 | * We come here from the first-level interrupt handlers. |
586 | */ | 744 | */ |
587 | .globl kvmppc_interrupt | 745 | .globl kvmppc_interrupt_hv |
588 | kvmppc_interrupt: | 746 | kvmppc_interrupt_hv: |
589 | /* | 747 | /* |
590 | * Register contents: | 748 | * Register contents: |
591 | * R12 = interrupt vector | 749 | * R12 = interrupt vector |
@@ -595,6 +753,19 @@ kvmppc_interrupt: | |||
595 | */ | 753 | */ |
596 | /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ | 754 | /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ |
597 | std r9, HSTATE_HOST_R2(r13) | 755 | std r9, HSTATE_HOST_R2(r13) |
756 | |||
757 | lbz r9, HSTATE_IN_GUEST(r13) | ||
758 | cmpwi r9, KVM_GUEST_MODE_HOST_HV | ||
759 | beq kvmppc_bad_host_intr | ||
760 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | ||
761 | cmpwi r9, KVM_GUEST_MODE_GUEST | ||
762 | ld r9, HSTATE_HOST_R2(r13) | ||
763 | beq kvmppc_interrupt_pr | ||
764 | #endif | ||
765 | /* We're now back in the host but in guest MMU context */ | ||
766 | li r9, KVM_GUEST_MODE_HOST_HV | ||
767 | stb r9, HSTATE_IN_GUEST(r13) | ||
768 | |||
598 | ld r9, HSTATE_KVM_VCPU(r13) | 769 | ld r9, HSTATE_KVM_VCPU(r13) |
599 | 770 | ||
600 | /* Save registers */ | 771 | /* Save registers */ |
@@ -620,6 +791,10 @@ BEGIN_FTR_SECTION | |||
620 | ld r3, HSTATE_CFAR(r13) | 791 | ld r3, HSTATE_CFAR(r13) |
621 | std r3, VCPU_CFAR(r9) | 792 | std r3, VCPU_CFAR(r9) |
622 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | 793 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) |
794 | BEGIN_FTR_SECTION | ||
795 | ld r4, HSTATE_PPR(r13) | ||
796 | std r4, VCPU_PPR(r9) | ||
797 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | ||
623 | 798 | ||
624 | /* Restore R1/R2 so we can handle faults */ | 799 | /* Restore R1/R2 so we can handle faults */ |
625 | ld r1, HSTATE_HOST_R1(r13) | 800 | ld r1, HSTATE_HOST_R1(r13) |
@@ -642,10 +817,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |||
642 | std r3, VCPU_GPR(R13)(r9) | 817 | std r3, VCPU_GPR(R13)(r9) |
643 | std r4, VCPU_LR(r9) | 818 | std r4, VCPU_LR(r9) |
644 | 819 | ||
645 | /* Unset guest mode */ | ||
646 | li r0, KVM_GUEST_MODE_NONE | ||
647 | stb r0, HSTATE_IN_GUEST(r13) | ||
648 | |||
649 | stw r12,VCPU_TRAP(r9) | 820 | stw r12,VCPU_TRAP(r9) |
650 | 821 | ||
651 | /* Save HEIR (HV emulation assist reg) in last_inst | 822 | /* Save HEIR (HV emulation assist reg) in last_inst |
@@ -696,46 +867,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
696 | * set, we know the host wants us out so let's do it now | 867 | * set, we know the host wants us out so let's do it now |
697 | */ | 868 | */ |
698 | do_ext_interrupt: | 869 | do_ext_interrupt: |
699 | lbz r0, HSTATE_HOST_IPI(r13) | 870 | bl kvmppc_read_intr |
700 | cmpwi r0, 0 | 871 | cmpdi r3, 0 |
701 | bne ext_interrupt_to_host | 872 | bgt ext_interrupt_to_host |
702 | |||
703 | /* Now read the interrupt from the ICP */ | ||
704 | ld r5, HSTATE_XICS_PHYS(r13) | ||
705 | li r7, XICS_XIRR | ||
706 | cmpdi r5, 0 | ||
707 | beq- ext_interrupt_to_host | ||
708 | lwzcix r3, r5, r7 | ||
709 | rlwinm. r0, r3, 0, 0xffffff | ||
710 | sync | ||
711 | beq 3f /* if nothing pending in the ICP */ | ||
712 | |||
713 | /* We found something in the ICP... | ||
714 | * | ||
715 | * If it's not an IPI, stash it in the PACA and return to | ||
716 | * the host, we don't (yet) handle directing real external | ||
717 | * interrupts directly to the guest | ||
718 | */ | ||
719 | cmpwi r0, XICS_IPI | ||
720 | bne ext_stash_for_host | ||
721 | |||
722 | /* It's an IPI, clear the MFRR and EOI it */ | ||
723 | li r0, 0xff | ||
724 | li r6, XICS_MFRR | ||
725 | stbcix r0, r5, r6 /* clear the IPI */ | ||
726 | stwcix r3, r5, r7 /* EOI it */ | ||
727 | sync | ||
728 | |||
729 | /* We need to re-check host IPI now in case it got set in the | ||
730 | * meantime. If it's clear, we bounce the interrupt to the | ||
731 | * guest | ||
732 | */ | ||
733 | lbz r0, HSTATE_HOST_IPI(r13) | ||
734 | cmpwi r0, 0 | ||
735 | bne- 1f | ||
736 | 873 | ||
737 | /* Allright, looks like an IPI for the guest, we need to set MER */ | 874 | /* Allright, looks like an IPI for the guest, we need to set MER */ |
738 | 3: | ||
739 | /* Check if any CPU is heading out to the host, if so head out too */ | 875 | /* Check if any CPU is heading out to the host, if so head out too */ |
740 | ld r5, HSTATE_KVM_VCORE(r13) | 876 | ld r5, HSTATE_KVM_VCORE(r13) |
741 | lwz r0, VCORE_ENTRY_EXIT(r5) | 877 | lwz r0, VCORE_ENTRY_EXIT(r5) |
@@ -764,27 +900,9 @@ do_ext_interrupt: | |||
764 | mtspr SPRN_LPCR, r8 | 900 | mtspr SPRN_LPCR, r8 |
765 | b fast_guest_return | 901 | b fast_guest_return |
766 | 902 | ||
767 | /* We raced with the host, we need to resend that IPI, bummer */ | ||
768 | 1: li r0, IPI_PRIORITY | ||
769 | stbcix r0, r5, r6 /* set the IPI */ | ||
770 | sync | ||
771 | b ext_interrupt_to_host | ||
772 | |||
773 | ext_stash_for_host: | ||
774 | /* It's not an IPI and it's for the host, stash it in the PACA | ||
775 | * before exit, it will be picked up by the host ICP driver | ||
776 | */ | ||
777 | stw r3, HSTATE_SAVED_XIRR(r13) | ||
778 | ext_interrupt_to_host: | 903 | ext_interrupt_to_host: |
779 | 904 | ||
780 | guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ | 905 | guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ |
781 | /* Save DEC */ | ||
782 | mfspr r5,SPRN_DEC | ||
783 | mftb r6 | ||
784 | extsw r5,r5 | ||
785 | add r5,r5,r6 | ||
786 | std r5,VCPU_DEC_EXPIRES(r9) | ||
787 | |||
788 | /* Save more register state */ | 906 | /* Save more register state */ |
789 | mfdar r6 | 907 | mfdar r6 |
790 | mfdsisr r7 | 908 | mfdsisr r7 |
@@ -954,7 +1072,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
954 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | 1072 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ |
955 | mtspr SPRN_LPID,r7 | 1073 | mtspr SPRN_LPID,r7 |
956 | isync | 1074 | isync |
957 | li r0,0 | 1075 | |
1076 | /* Subtract timebase offset from timebase */ | ||
1077 | ld r8,VCORE_TB_OFFSET(r5) | ||
1078 | cmpdi r8,0 | ||
1079 | beq 17f | ||
1080 | mftb r6 /* current host timebase */ | ||
1081 | subf r8,r8,r6 | ||
1082 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ | ||
1083 | mftb r7 /* check if lower 24 bits overflowed */ | ||
1084 | clrldi r6,r6,40 | ||
1085 | clrldi r7,r7,40 | ||
1086 | cmpld r7,r6 | ||
1087 | bge 17f | ||
1088 | addis r8,r8,0x100 /* if so, increment upper 40 bits */ | ||
1089 | mtspr SPRN_TBU40,r8 | ||
1090 | |||
1091 | /* Reset PCR */ | ||
1092 | 17: ld r0, VCORE_PCR(r5) | ||
1093 | cmpdi r0, 0 | ||
1094 | beq 18f | ||
1095 | li r0, 0 | ||
1096 | mtspr SPRN_PCR, r0 | ||
1097 | 18: | ||
1098 | /* Signal secondary CPUs to continue */ | ||
958 | stb r0,VCORE_IN_GUEST(r5) | 1099 | stb r0,VCORE_IN_GUEST(r5) |
959 | lis r8,0x7fff /* MAX_INT@h */ | 1100 | lis r8,0x7fff /* MAX_INT@h */ |
960 | mtspr SPRN_HDEC,r8 | 1101 | mtspr SPRN_HDEC,r8 |
@@ -1052,6 +1193,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
1052 | 1: addi r8,r8,16 | 1193 | 1: addi r8,r8,16 |
1053 | .endr | 1194 | .endr |
1054 | 1195 | ||
1196 | /* Save DEC */ | ||
1197 | mfspr r5,SPRN_DEC | ||
1198 | mftb r6 | ||
1199 | extsw r5,r5 | ||
1200 | add r5,r5,r6 | ||
1201 | std r5,VCPU_DEC_EXPIRES(r9) | ||
1202 | |||
1055 | /* Save and reset AMR and UAMOR before turning on the MMU */ | 1203 | /* Save and reset AMR and UAMOR before turning on the MMU */ |
1056 | BEGIN_FTR_SECTION | 1204 | BEGIN_FTR_SECTION |
1057 | mfspr r5,SPRN_AMR | 1205 | mfspr r5,SPRN_AMR |
@@ -1062,6 +1210,10 @@ BEGIN_FTR_SECTION | |||
1062 | mtspr SPRN_AMR,r6 | 1210 | mtspr SPRN_AMR,r6 |
1063 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | 1211 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
1064 | 1212 | ||
1213 | /* Unset guest mode */ | ||
1214 | li r0, KVM_GUEST_MODE_NONE | ||
1215 | stb r0, HSTATE_IN_GUEST(r13) | ||
1216 | |||
1065 | /* Switch DSCR back to host value */ | 1217 | /* Switch DSCR back to host value */ |
1066 | BEGIN_FTR_SECTION | 1218 | BEGIN_FTR_SECTION |
1067 | mfspr r8, SPRN_DSCR | 1219 | mfspr r8, SPRN_DSCR |
@@ -1134,9 +1286,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
1134 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ | 1286 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ |
1135 | b 22f | 1287 | b 22f |
1136 | 21: mfspr r5, SPRN_MMCR1 | 1288 | 21: mfspr r5, SPRN_MMCR1 |
1289 | mfspr r7, SPRN_SIAR | ||
1290 | mfspr r8, SPRN_SDAR | ||
1137 | std r4, VCPU_MMCR(r9) | 1291 | std r4, VCPU_MMCR(r9) |
1138 | std r5, VCPU_MMCR + 8(r9) | 1292 | std r5, VCPU_MMCR + 8(r9) |
1139 | std r6, VCPU_MMCR + 16(r9) | 1293 | std r6, VCPU_MMCR + 16(r9) |
1294 | std r7, VCPU_SIAR(r9) | ||
1295 | std r8, VCPU_SDAR(r9) | ||
1140 | mfspr r3, SPRN_PMC1 | 1296 | mfspr r3, SPRN_PMC1 |
1141 | mfspr r4, SPRN_PMC2 | 1297 | mfspr r4, SPRN_PMC2 |
1142 | mfspr r5, SPRN_PMC3 | 1298 | mfspr r5, SPRN_PMC3 |
@@ -1158,103 +1314,30 @@ BEGIN_FTR_SECTION | |||
1158 | stw r11, VCPU_PMC + 28(r9) | 1314 | stw r11, VCPU_PMC + 28(r9) |
1159 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | 1315 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
1160 | 22: | 1316 | 22: |
1317 | ld r0, 112+PPC_LR_STKOFF(r1) | ||
1318 | addi r1, r1, 112 | ||
1319 | mtlr r0 | ||
1320 | blr | ||
1321 | secondary_too_late: | ||
1322 | ld r5,HSTATE_KVM_VCORE(r13) | ||
1323 | HMT_LOW | ||
1324 | 13: lbz r3,VCORE_IN_GUEST(r5) | ||
1325 | cmpwi r3,0 | ||
1326 | bne 13b | ||
1327 | HMT_MEDIUM | ||
1328 | li r0, KVM_GUEST_MODE_NONE | ||
1329 | stb r0, HSTATE_IN_GUEST(r13) | ||
1330 | ld r11,PACA_SLBSHADOWPTR(r13) | ||
1161 | 1331 | ||
1162 | /* Secondary threads go off to take a nap on POWER7 */ | 1332 | .rept SLB_NUM_BOLTED |
1163 | BEGIN_FTR_SECTION | 1333 | ld r5,SLBSHADOW_SAVEAREA(r11) |
1164 | lwz r0,VCPU_PTID(r9) | 1334 | ld r6,SLBSHADOW_SAVEAREA+8(r11) |
1165 | cmpwi r0,0 | 1335 | andis. r7,r5,SLB_ESID_V@h |
1166 | bne secondary_nap | 1336 | beq 1f |
1167 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | 1337 | slbmte r6,r5 |
1168 | 1338 | 1: addi r11,r11,16 | |
1169 | /* Restore host DABR and DABRX */ | 1339 | .endr |
1170 | ld r5,HSTATE_DABR(r13) | 1340 | b 22b |
1171 | li r6,7 | ||
1172 | mtspr SPRN_DABR,r5 | ||
1173 | mtspr SPRN_DABRX,r6 | ||
1174 | |||
1175 | /* Restore SPRG3 */ | ||
1176 | ld r3,PACA_SPRG3(r13) | ||
1177 | mtspr SPRN_SPRG3,r3 | ||
1178 | |||
1179 | /* | ||
1180 | * Reload DEC. HDEC interrupts were disabled when | ||
1181 | * we reloaded the host's LPCR value. | ||
1182 | */ | ||
1183 | ld r3, HSTATE_DECEXP(r13) | ||
1184 | mftb r4 | ||
1185 | subf r4, r4, r3 | ||
1186 | mtspr SPRN_DEC, r4 | ||
1187 | |||
1188 | /* Reload the host's PMU registers */ | ||
1189 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ | ||
1190 | lbz r4, LPPACA_PMCINUSE(r3) | ||
1191 | cmpwi r4, 0 | ||
1192 | beq 23f /* skip if not */ | ||
1193 | lwz r3, HSTATE_PMC(r13) | ||
1194 | lwz r4, HSTATE_PMC + 4(r13) | ||
1195 | lwz r5, HSTATE_PMC + 8(r13) | ||
1196 | lwz r6, HSTATE_PMC + 12(r13) | ||
1197 | lwz r8, HSTATE_PMC + 16(r13) | ||
1198 | lwz r9, HSTATE_PMC + 20(r13) | ||
1199 | BEGIN_FTR_SECTION | ||
1200 | lwz r10, HSTATE_PMC + 24(r13) | ||
1201 | lwz r11, HSTATE_PMC + 28(r13) | ||
1202 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | ||
1203 | mtspr SPRN_PMC1, r3 | ||
1204 | mtspr SPRN_PMC2, r4 | ||
1205 | mtspr SPRN_PMC3, r5 | ||
1206 | mtspr SPRN_PMC4, r6 | ||
1207 | mtspr SPRN_PMC5, r8 | ||
1208 | mtspr SPRN_PMC6, r9 | ||
1209 | BEGIN_FTR_SECTION | ||
1210 | mtspr SPRN_PMC7, r10 | ||
1211 | mtspr SPRN_PMC8, r11 | ||
1212 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | ||
1213 | ld r3, HSTATE_MMCR(r13) | ||
1214 | ld r4, HSTATE_MMCR + 8(r13) | ||
1215 | ld r5, HSTATE_MMCR + 16(r13) | ||
1216 | mtspr SPRN_MMCR1, r4 | ||
1217 | mtspr SPRN_MMCRA, r5 | ||
1218 | mtspr SPRN_MMCR0, r3 | ||
1219 | isync | ||
1220 | 23: | ||
1221 | /* | ||
1222 | * For external and machine check interrupts, we need | ||
1223 | * to call the Linux handler to process the interrupt. | ||
1224 | * We do that by jumping to absolute address 0x500 for | ||
1225 | * external interrupts, or the machine_check_fwnmi label | ||
1226 | * for machine checks (since firmware might have patched | ||
1227 | * the vector area at 0x200). The [h]rfid at the end of the | ||
1228 | * handler will return to the book3s_hv_interrupts.S code. | ||
1229 | * For other interrupts we do the rfid to get back | ||
1230 | * to the book3s_hv_interrupts.S code here. | ||
1231 | */ | ||
1232 | ld r8, HSTATE_VMHANDLER(r13) | ||
1233 | ld r7, HSTATE_HOST_MSR(r13) | ||
1234 | |||
1235 | cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK | ||
1236 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | ||
1237 | BEGIN_FTR_SECTION | ||
1238 | beq 11f | ||
1239 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | ||
1240 | |||
1241 | /* RFI into the highmem handler, or branch to interrupt handler */ | ||
1242 | mfmsr r6 | ||
1243 | li r0, MSR_RI | ||
1244 | andc r6, r6, r0 | ||
1245 | mtmsrd r6, 1 /* Clear RI in MSR */ | ||
1246 | mtsrr0 r8 | ||
1247 | mtsrr1 r7 | ||
1248 | beqa 0x500 /* external interrupt (PPC970) */ | ||
1249 | beq cr1, 13f /* machine check */ | ||
1250 | RFI | ||
1251 | |||
1252 | /* On POWER7, we have external interrupts set to use HSRR0/1 */ | ||
1253 | 11: mtspr SPRN_HSRR0, r8 | ||
1254 | mtspr SPRN_HSRR1, r7 | ||
1255 | ba 0x500 | ||
1256 | |||
1257 | 13: b machine_check_fwnmi | ||
1258 | 1341 | ||
1259 | /* | 1342 | /* |
1260 | * Check whether an HDSI is an HPTE not found fault or something else. | 1343 | * Check whether an HDSI is an HPTE not found fault or something else. |
@@ -1333,7 +1416,7 @@ fast_interrupt_c_return: | |||
1333 | stw r8, VCPU_LAST_INST(r9) | 1416 | stw r8, VCPU_LAST_INST(r9) |
1334 | 1417 | ||
1335 | /* Unset guest mode. */ | 1418 | /* Unset guest mode. */ |
1336 | li r0, KVM_GUEST_MODE_NONE | 1419 | li r0, KVM_GUEST_MODE_HOST_HV |
1337 | stb r0, HSTATE_IN_GUEST(r13) | 1420 | stb r0, HSTATE_IN_GUEST(r13) |
1338 | b guest_exit_cont | 1421 | b guest_exit_cont |
1339 | 1422 | ||
@@ -1701,67 +1784,70 @@ machine_check_realmode: | |||
1701 | rotldi r11, r11, 63 | 1784 | rotldi r11, r11, 63 |
1702 | b fast_interrupt_c_return | 1785 | b fast_interrupt_c_return |
1703 | 1786 | ||
1704 | secondary_too_late: | 1787 | /* |
1705 | ld r5,HSTATE_KVM_VCORE(r13) | 1788 | * Determine what sort of external interrupt is pending (if any). |
1706 | HMT_LOW | 1789 | * Returns: |
1707 | 13: lbz r3,VCORE_IN_GUEST(r5) | 1790 | * 0 if no interrupt is pending |
1708 | cmpwi r3,0 | 1791 | * 1 if an interrupt is pending that needs to be handled by the host |
1709 | bne 13b | 1792 | * -1 if there was a guest wakeup IPI (which has now been cleared) |
1710 | HMT_MEDIUM | 1793 | */ |
1711 | ld r11,PACA_SLBSHADOWPTR(r13) | 1794 | kvmppc_read_intr: |
1712 | 1795 | /* see if a host IPI is pending */ | |
1713 | .rept SLB_NUM_BOLTED | 1796 | li r3, 1 |
1714 | ld r5,SLBSHADOW_SAVEAREA(r11) | 1797 | lbz r0, HSTATE_HOST_IPI(r13) |
1715 | ld r6,SLBSHADOW_SAVEAREA+8(r11) | 1798 | cmpwi r0, 0 |
1716 | andis. r7,r5,SLB_ESID_V@h | 1799 | bne 1f |
1717 | beq 1f | ||
1718 | slbmte r6,r5 | ||
1719 | 1: addi r11,r11,16 | ||
1720 | .endr | ||
1721 | 1800 | ||
1722 | secondary_nap: | 1801 | /* Now read the interrupt from the ICP */ |
1723 | /* Clear our vcpu pointer so we don't come back in early */ | 1802 | ld r6, HSTATE_XICS_PHYS(r13) |
1724 | li r0, 0 | ||
1725 | std r0, HSTATE_KVM_VCPU(r13) | ||
1726 | lwsync | ||
1727 | /* Clear any pending IPI - assume we're a secondary thread */ | ||
1728 | ld r5, HSTATE_XICS_PHYS(r13) | ||
1729 | li r7, XICS_XIRR | 1803 | li r7, XICS_XIRR |
1730 | lwzcix r3, r5, r7 /* ack any pending interrupt */ | 1804 | cmpdi r6, 0 |
1731 | rlwinm. r0, r3, 0, 0xffffff /* any pending? */ | 1805 | beq- 1f |
1732 | beq 37f | 1806 | lwzcix r0, r6, r7 |
1807 | rlwinm. r3, r0, 0, 0xffffff | ||
1733 | sync | 1808 | sync |
1734 | li r0, 0xff | 1809 | beq 1f /* if nothing pending in the ICP */ |
1735 | li r6, XICS_MFRR | ||
1736 | stbcix r0, r5, r6 /* clear the IPI */ | ||
1737 | stwcix r3, r5, r7 /* EOI it */ | ||
1738 | 37: sync | ||
1739 | 1810 | ||
1740 | /* increment the nap count and then go to nap mode */ | 1811 | /* We found something in the ICP... |
1741 | ld r4, HSTATE_KVM_VCORE(r13) | 1812 | * |
1742 | addi r4, r4, VCORE_NAP_COUNT | 1813 | * If it's not an IPI, stash it in the PACA and return to |
1743 | lwsync /* make previous updates visible */ | 1814 | * the host, we don't (yet) handle directing real external |
1744 | 51: lwarx r3, 0, r4 | 1815 | * interrupts directly to the guest |
1745 | addi r3, r3, 1 | 1816 | */ |
1746 | stwcx. r3, 0, r4 | 1817 | cmpwi r3, XICS_IPI /* if there is, is it an IPI? */ |
1747 | bne 51b | 1818 | li r3, 1 |
1819 | bne 42f | ||
1748 | 1820 | ||
1749 | kvm_no_guest: | 1821 | /* It's an IPI, clear the MFRR and EOI it */ |
1750 | li r0, KVM_HWTHREAD_IN_NAP | 1822 | li r3, 0xff |
1751 | stb r0, HSTATE_HWTHREAD_STATE(r13) | 1823 | li r8, XICS_MFRR |
1824 | stbcix r3, r6, r8 /* clear the IPI */ | ||
1825 | stwcix r0, r6, r7 /* EOI it */ | ||
1826 | sync | ||
1752 | 1827 | ||
1753 | li r3, LPCR_PECE0 | 1828 | /* We need to re-check host IPI now in case it got set in the |
1754 | mfspr r4, SPRN_LPCR | 1829 | * meantime. If it's clear, we bounce the interrupt to the |
1755 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 | 1830 | * guest |
1756 | mtspr SPRN_LPCR, r4 | 1831 | */ |
1757 | isync | 1832 | lbz r0, HSTATE_HOST_IPI(r13) |
1758 | std r0, HSTATE_SCRATCH0(r13) | 1833 | cmpwi r0, 0 |
1759 | ptesync | 1834 | bne- 43f |
1760 | ld r0, HSTATE_SCRATCH0(r13) | 1835 | |
1761 | 1: cmpd r0, r0 | 1836 | /* OK, it's an IPI for us */ |
1762 | bne 1b | 1837 | li r3, -1 |
1763 | nap | 1838 | 1: blr |
1764 | b . | 1839 | |
1840 | 42: /* It's not an IPI and it's for the host, stash it in the PACA | ||
1841 | * before exit, it will be picked up by the host ICP driver | ||
1842 | */ | ||
1843 | stw r0, HSTATE_SAVED_XIRR(r13) | ||
1844 | b 1b | ||
1845 | |||
1846 | 43: /* We raced with the host, we need to resend that IPI, bummer */ | ||
1847 | li r0, IPI_PRIORITY | ||
1848 | stbcix r0, r6, r8 /* set the IPI */ | ||
1849 | sync | ||
1850 | b 1b | ||
1765 | 1851 | ||
1766 | /* | 1852 | /* |
1767 | * Save away FP, VMX and VSX registers. | 1853 | * Save away FP, VMX and VSX registers. |
@@ -1879,3 +1965,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
1879 | lwz r7,VCPU_VRSAVE(r4) | 1965 | lwz r7,VCPU_VRSAVE(r4) |
1880 | mtspr SPRN_VRSAVE,r7 | 1966 | mtspr SPRN_VRSAVE,r7 |
1881 | blr | 1967 | blr |
1968 | |||
1969 | /* | ||
1970 | * We come here if we get any exception or interrupt while we are | ||
1971 | * executing host real mode code while in guest MMU context. | ||
1972 | * For now just spin, but we should do something better. | ||
1973 | */ | ||
1974 | kvmppc_bad_host_intr: | ||
1975 | b . | ||
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 17cfae5497a3..f4dd041c14ea 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
@@ -26,8 +26,12 @@ | |||
26 | 26 | ||
27 | #if defined(CONFIG_PPC_BOOK3S_64) | 27 | #if defined(CONFIG_PPC_BOOK3S_64) |
28 | #define FUNC(name) GLUE(.,name) | 28 | #define FUNC(name) GLUE(.,name) |
29 | #define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU | ||
30 | |||
29 | #elif defined(CONFIG_PPC_BOOK3S_32) | 31 | #elif defined(CONFIG_PPC_BOOK3S_32) |
30 | #define FUNC(name) name | 32 | #define FUNC(name) name |
33 | #define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) | ||
34 | |||
31 | #endif /* CONFIG_PPC_BOOK3S_XX */ | 35 | #endif /* CONFIG_PPC_BOOK3S_XX */ |
32 | 36 | ||
33 | #define VCPU_LOAD_NVGPRS(vcpu) \ | 37 | #define VCPU_LOAD_NVGPRS(vcpu) \ |
@@ -87,8 +91,14 @@ kvm_start_entry: | |||
87 | VCPU_LOAD_NVGPRS(r4) | 91 | VCPU_LOAD_NVGPRS(r4) |
88 | 92 | ||
89 | kvm_start_lightweight: | 93 | kvm_start_lightweight: |
94 | /* Copy registers into shadow vcpu so we can access them in real mode */ | ||
95 | GET_SHADOW_VCPU(r3) | ||
96 | bl FUNC(kvmppc_copy_to_svcpu) | ||
97 | nop | ||
98 | REST_GPR(4, r1) | ||
90 | 99 | ||
91 | #ifdef CONFIG_PPC_BOOK3S_64 | 100 | #ifdef CONFIG_PPC_BOOK3S_64 |
101 | /* Get the dcbz32 flag */ | ||
92 | PPC_LL r3, VCPU_HFLAGS(r4) | 102 | PPC_LL r3, VCPU_HFLAGS(r4) |
93 | rldicl r3, r3, 0, 63 /* r3 &= 1 */ | 103 | rldicl r3, r3, 0, 63 /* r3 &= 1 */ |
94 | stb r3, HSTATE_RESTORE_HID5(r13) | 104 | stb r3, HSTATE_RESTORE_HID5(r13) |
@@ -111,9 +121,6 @@ kvm_start_lightweight: | |||
111 | * | 121 | * |
112 | */ | 122 | */ |
113 | 123 | ||
114 | .global kvmppc_handler_highmem | ||
115 | kvmppc_handler_highmem: | ||
116 | |||
117 | /* | 124 | /* |
118 | * Register usage at this point: | 125 | * Register usage at this point: |
119 | * | 126 | * |
@@ -125,18 +132,31 @@ kvmppc_handler_highmem: | |||
125 | * | 132 | * |
126 | */ | 133 | */ |
127 | 134 | ||
128 | /* R7 = vcpu */ | 135 | /* Transfer reg values from shadow vcpu back to vcpu struct */ |
129 | PPC_LL r7, GPR4(r1) | 136 | /* On 64-bit, interrupts are still off at this point */ |
137 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | ||
138 | GET_SHADOW_VCPU(r4) | ||
139 | bl FUNC(kvmppc_copy_from_svcpu) | ||
140 | nop | ||
130 | 141 | ||
131 | #ifdef CONFIG_PPC_BOOK3S_64 | 142 | #ifdef CONFIG_PPC_BOOK3S_64 |
143 | /* Re-enable interrupts */ | ||
144 | ld r3, HSTATE_HOST_MSR(r13) | ||
145 | ori r3, r3, MSR_EE | ||
146 | MTMSR_EERI(r3) | ||
147 | |||
132 | /* | 148 | /* |
133 | * Reload kernel SPRG3 value. | 149 | * Reload kernel SPRG3 value. |
134 | * No need to save guest value as usermode can't modify SPRG3. | 150 | * No need to save guest value as usermode can't modify SPRG3. |
135 | */ | 151 | */ |
136 | ld r3, PACA_SPRG3(r13) | 152 | ld r3, PACA_SPRG3(r13) |
137 | mtspr SPRN_SPRG3, r3 | 153 | mtspr SPRN_SPRG3, r3 |
154 | |||
138 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 155 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
139 | 156 | ||
157 | /* R7 = vcpu */ | ||
158 | PPC_LL r7, GPR4(r1) | ||
159 | |||
140 | PPC_STL r14, VCPU_GPR(R14)(r7) | 160 | PPC_STL r14, VCPU_GPR(R14)(r7) |
141 | PPC_STL r15, VCPU_GPR(R15)(r7) | 161 | PPC_STL r15, VCPU_GPR(R15)(r7) |
142 | PPC_STL r16, VCPU_GPR(R16)(r7) | 162 | PPC_STL r16, VCPU_GPR(R16)(r7) |
@@ -161,7 +181,7 @@ kvmppc_handler_highmem: | |||
161 | 181 | ||
162 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | 182 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
163 | REST_2GPRS(3, r1) | 183 | REST_2GPRS(3, r1) |
164 | bl FUNC(kvmppc_handle_exit) | 184 | bl FUNC(kvmppc_handle_exit_pr) |
165 | 185 | ||
166 | /* If RESUME_GUEST, get back in the loop */ | 186 | /* If RESUME_GUEST, get back in the loop */ |
167 | cmpwi r3, RESUME_GUEST | 187 | cmpwi r3, RESUME_GUEST |
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c index da8b13c4b776..5a1ab1250a05 100644 --- a/arch/powerpc/kvm/book3s_mmu_hpte.c +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <asm/mmu_context.h> | 28 | #include <asm/mmu_context.h> |
29 | #include <asm/hw_irq.h> | 29 | #include <asm/hw_irq.h> |
30 | 30 | ||
31 | #include "trace.h" | 31 | #include "trace_pr.h" |
32 | 32 | ||
33 | #define PTE_SIZE 12 | 33 | #define PTE_SIZE 12 |
34 | 34 | ||
@@ -56,6 +56,14 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage) | |||
56 | HPTEG_HASH_BITS_VPTE_LONG); | 56 | HPTEG_HASH_BITS_VPTE_LONG); |
57 | } | 57 | } |
58 | 58 | ||
59 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
60 | static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage) | ||
61 | { | ||
62 | return hash_64((vpage & 0xffffffff0ULL) >> 4, | ||
63 | HPTEG_HASH_BITS_VPTE_64K); | ||
64 | } | ||
65 | #endif | ||
66 | |||
59 | void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | 67 | void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
60 | { | 68 | { |
61 | u64 index; | 69 | u64 index; |
@@ -83,6 +91,15 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | |||
83 | hlist_add_head_rcu(&pte->list_vpte_long, | 91 | hlist_add_head_rcu(&pte->list_vpte_long, |
84 | &vcpu3s->hpte_hash_vpte_long[index]); | 92 | &vcpu3s->hpte_hash_vpte_long[index]); |
85 | 93 | ||
94 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
95 | /* Add to vPTE_64k list */ | ||
96 | index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage); | ||
97 | hlist_add_head_rcu(&pte->list_vpte_64k, | ||
98 | &vcpu3s->hpte_hash_vpte_64k[index]); | ||
99 | #endif | ||
100 | |||
101 | vcpu3s->hpte_cache_count++; | ||
102 | |||
86 | spin_unlock(&vcpu3s->mmu_lock); | 103 | spin_unlock(&vcpu3s->mmu_lock); |
87 | } | 104 | } |
88 | 105 | ||
@@ -113,10 +130,13 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | |||
113 | hlist_del_init_rcu(&pte->list_pte_long); | 130 | hlist_del_init_rcu(&pte->list_pte_long); |
114 | hlist_del_init_rcu(&pte->list_vpte); | 131 | hlist_del_init_rcu(&pte->list_vpte); |
115 | hlist_del_init_rcu(&pte->list_vpte_long); | 132 | hlist_del_init_rcu(&pte->list_vpte_long); |
133 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
134 | hlist_del_init_rcu(&pte->list_vpte_64k); | ||
135 | #endif | ||
136 | vcpu3s->hpte_cache_count--; | ||
116 | 137 | ||
117 | spin_unlock(&vcpu3s->mmu_lock); | 138 | spin_unlock(&vcpu3s->mmu_lock); |
118 | 139 | ||
119 | vcpu3s->hpte_cache_count--; | ||
120 | call_rcu(&pte->rcu_head, free_pte_rcu); | 140 | call_rcu(&pte->rcu_head, free_pte_rcu); |
121 | } | 141 | } |
122 | 142 | ||
@@ -219,6 +239,29 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) | |||
219 | rcu_read_unlock(); | 239 | rcu_read_unlock(); |
220 | } | 240 | } |
221 | 241 | ||
242 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
243 | /* Flush with mask 0xffffffff0 */ | ||
244 | static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp) | ||
245 | { | ||
246 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | ||
247 | struct hlist_head *list; | ||
248 | struct hpte_cache *pte; | ||
249 | u64 vp_mask = 0xffffffff0ULL; | ||
250 | |||
251 | list = &vcpu3s->hpte_hash_vpte_64k[ | ||
252 | kvmppc_mmu_hash_vpte_64k(guest_vp)]; | ||
253 | |||
254 | rcu_read_lock(); | ||
255 | |||
256 | /* Check the list for matching entries and invalidate */ | ||
257 | hlist_for_each_entry_rcu(pte, list, list_vpte_64k) | ||
258 | if ((pte->pte.vpage & vp_mask) == guest_vp) | ||
259 | invalidate_pte(vcpu, pte); | ||
260 | |||
261 | rcu_read_unlock(); | ||
262 | } | ||
263 | #endif | ||
264 | |||
222 | /* Flush with mask 0xffffff000 */ | 265 | /* Flush with mask 0xffffff000 */ |
223 | static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) | 266 | static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) |
224 | { | 267 | { |
@@ -249,6 +292,11 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) | |||
249 | case 0xfffffffffULL: | 292 | case 0xfffffffffULL: |
250 | kvmppc_mmu_pte_vflush_short(vcpu, guest_vp); | 293 | kvmppc_mmu_pte_vflush_short(vcpu, guest_vp); |
251 | break; | 294 | break; |
295 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
296 | case 0xffffffff0ULL: | ||
297 | kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp); | ||
298 | break; | ||
299 | #endif | ||
252 | case 0xffffff000ULL: | 300 | case 0xffffff000ULL: |
253 | kvmppc_mmu_pte_vflush_long(vcpu, guest_vp); | 301 | kvmppc_mmu_pte_vflush_long(vcpu, guest_vp); |
254 | break; | 302 | break; |
@@ -285,15 +333,19 @@ struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) | |||
285 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 333 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
286 | struct hpte_cache *pte; | 334 | struct hpte_cache *pte; |
287 | 335 | ||
288 | pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL); | ||
289 | vcpu3s->hpte_cache_count++; | ||
290 | |||
291 | if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM) | 336 | if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM) |
292 | kvmppc_mmu_pte_flush_all(vcpu); | 337 | kvmppc_mmu_pte_flush_all(vcpu); |
293 | 338 | ||
339 | pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL); | ||
340 | |||
294 | return pte; | 341 | return pte; |
295 | } | 342 | } |
296 | 343 | ||
344 | void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte) | ||
345 | { | ||
346 | kmem_cache_free(hpte_cache, pte); | ||
347 | } | ||
348 | |||
297 | void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu) | 349 | void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu) |
298 | { | 350 | { |
299 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 351 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
@@ -320,6 +372,10 @@ int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) | |||
320 | ARRAY_SIZE(vcpu3s->hpte_hash_vpte)); | 372 | ARRAY_SIZE(vcpu3s->hpte_hash_vpte)); |
321 | kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long, | 373 | kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long, |
322 | ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long)); | 374 | ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long)); |
375 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
376 | kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k, | ||
377 | ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k)); | ||
378 | #endif | ||
323 | 379 | ||
324 | spin_lock_init(&vcpu3s->mmu_lock); | 380 | spin_lock_init(&vcpu3s->mmu_lock); |
325 | 381 | ||
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 27db1e665959..df36cf2ed22b 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -40,8 +40,12 @@ | |||
40 | #include <linux/sched.h> | 40 | #include <linux/sched.h> |
41 | #include <linux/vmalloc.h> | 41 | #include <linux/vmalloc.h> |
42 | #include <linux/highmem.h> | 42 | #include <linux/highmem.h> |
43 | #include <linux/module.h> | ||
43 | 44 | ||
44 | #include "trace.h" | 45 | #include "book3s.h" |
46 | |||
47 | #define CREATE_TRACE_POINTS | ||
48 | #include "trace_pr.h" | ||
45 | 49 | ||
46 | /* #define EXIT_DEBUG */ | 50 | /* #define EXIT_DEBUG */ |
47 | /* #define DEBUG_EXT */ | 51 | /* #define DEBUG_EXT */ |
@@ -56,29 +60,25 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
56 | #define HW_PAGE_SIZE PAGE_SIZE | 60 | #define HW_PAGE_SIZE PAGE_SIZE |
57 | #endif | 61 | #endif |
58 | 62 | ||
59 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 63 | static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) |
60 | { | 64 | { |
61 | #ifdef CONFIG_PPC_BOOK3S_64 | 65 | #ifdef CONFIG_PPC_BOOK3S_64 |
62 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
63 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | 67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); |
64 | memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, | ||
65 | sizeof(get_paca()->shadow_vcpu)); | ||
66 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; | 68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
67 | svcpu_put(svcpu); | 69 | svcpu_put(svcpu); |
68 | #endif | 70 | #endif |
69 | vcpu->cpu = smp_processor_id(); | 71 | vcpu->cpu = smp_processor_id(); |
70 | #ifdef CONFIG_PPC_BOOK3S_32 | 72 | #ifdef CONFIG_PPC_BOOK3S_32 |
71 | current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; | 73 | current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; |
72 | #endif | 74 | #endif |
73 | } | 75 | } |
74 | 76 | ||
75 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 77 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) |
76 | { | 78 | { |
77 | #ifdef CONFIG_PPC_BOOK3S_64 | 79 | #ifdef CONFIG_PPC_BOOK3S_64 |
78 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 80 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
79 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | 81 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
80 | memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | ||
81 | sizeof(get_paca()->shadow_vcpu)); | ||
82 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; | 82 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
83 | svcpu_put(svcpu); | 83 | svcpu_put(svcpu); |
84 | #endif | 84 | #endif |
@@ -87,7 +87,61 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |||
87 | vcpu->cpu = -1; | 87 | vcpu->cpu = -1; |
88 | } | 88 | } |
89 | 89 | ||
90 | int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | 90 | /* Copy data needed by real-mode code from vcpu to shadow vcpu */ |
91 | void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | ||
92 | struct kvm_vcpu *vcpu) | ||
93 | { | ||
94 | svcpu->gpr[0] = vcpu->arch.gpr[0]; | ||
95 | svcpu->gpr[1] = vcpu->arch.gpr[1]; | ||
96 | svcpu->gpr[2] = vcpu->arch.gpr[2]; | ||
97 | svcpu->gpr[3] = vcpu->arch.gpr[3]; | ||
98 | svcpu->gpr[4] = vcpu->arch.gpr[4]; | ||
99 | svcpu->gpr[5] = vcpu->arch.gpr[5]; | ||
100 | svcpu->gpr[6] = vcpu->arch.gpr[6]; | ||
101 | svcpu->gpr[7] = vcpu->arch.gpr[7]; | ||
102 | svcpu->gpr[8] = vcpu->arch.gpr[8]; | ||
103 | svcpu->gpr[9] = vcpu->arch.gpr[9]; | ||
104 | svcpu->gpr[10] = vcpu->arch.gpr[10]; | ||
105 | svcpu->gpr[11] = vcpu->arch.gpr[11]; | ||
106 | svcpu->gpr[12] = vcpu->arch.gpr[12]; | ||
107 | svcpu->gpr[13] = vcpu->arch.gpr[13]; | ||
108 | svcpu->cr = vcpu->arch.cr; | ||
109 | svcpu->xer = vcpu->arch.xer; | ||
110 | svcpu->ctr = vcpu->arch.ctr; | ||
111 | svcpu->lr = vcpu->arch.lr; | ||
112 | svcpu->pc = vcpu->arch.pc; | ||
113 | } | ||
114 | |||
115 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | ||
116 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | ||
117 | struct kvmppc_book3s_shadow_vcpu *svcpu) | ||
118 | { | ||
119 | vcpu->arch.gpr[0] = svcpu->gpr[0]; | ||
120 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | ||
121 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | ||
122 | vcpu->arch.gpr[3] = svcpu->gpr[3]; | ||
123 | vcpu->arch.gpr[4] = svcpu->gpr[4]; | ||
124 | vcpu->arch.gpr[5] = svcpu->gpr[5]; | ||
125 | vcpu->arch.gpr[6] = svcpu->gpr[6]; | ||
126 | vcpu->arch.gpr[7] = svcpu->gpr[7]; | ||
127 | vcpu->arch.gpr[8] = svcpu->gpr[8]; | ||
128 | vcpu->arch.gpr[9] = svcpu->gpr[9]; | ||
129 | vcpu->arch.gpr[10] = svcpu->gpr[10]; | ||
130 | vcpu->arch.gpr[11] = svcpu->gpr[11]; | ||
131 | vcpu->arch.gpr[12] = svcpu->gpr[12]; | ||
132 | vcpu->arch.gpr[13] = svcpu->gpr[13]; | ||
133 | vcpu->arch.cr = svcpu->cr; | ||
134 | vcpu->arch.xer = svcpu->xer; | ||
135 | vcpu->arch.ctr = svcpu->ctr; | ||
136 | vcpu->arch.lr = svcpu->lr; | ||
137 | vcpu->arch.pc = svcpu->pc; | ||
138 | vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; | ||
139 | vcpu->arch.fault_dar = svcpu->fault_dar; | ||
140 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | ||
141 | vcpu->arch.last_inst = svcpu->last_inst; | ||
142 | } | ||
143 | |||
144 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) | ||
91 | { | 145 | { |
92 | int r = 1; /* Indicate we want to get back into the guest */ | 146 | int r = 1; /* Indicate we want to get back into the guest */ |
93 | 147 | ||
@@ -100,44 +154,69 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | |||
100 | } | 154 | } |
101 | 155 | ||
102 | /************* MMU Notifiers *************/ | 156 | /************* MMU Notifiers *************/ |
157 | static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start, | ||
158 | unsigned long end) | ||
159 | { | ||
160 | long i; | ||
161 | struct kvm_vcpu *vcpu; | ||
162 | struct kvm_memslots *slots; | ||
163 | struct kvm_memory_slot *memslot; | ||
164 | |||
165 | slots = kvm_memslots(kvm); | ||
166 | kvm_for_each_memslot(memslot, slots) { | ||
167 | unsigned long hva_start, hva_end; | ||
168 | gfn_t gfn, gfn_end; | ||
169 | |||
170 | hva_start = max(start, memslot->userspace_addr); | ||
171 | hva_end = min(end, memslot->userspace_addr + | ||
172 | (memslot->npages << PAGE_SHIFT)); | ||
173 | if (hva_start >= hva_end) | ||
174 | continue; | ||
175 | /* | ||
176 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | ||
177 | * {gfn, gfn+1, ..., gfn_end-1}. | ||
178 | */ | ||
179 | gfn = hva_to_gfn_memslot(hva_start, memslot); | ||
180 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | ||
181 | kvm_for_each_vcpu(i, vcpu, kvm) | ||
182 | kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, | ||
183 | gfn_end << PAGE_SHIFT); | ||
184 | } | ||
185 | } | ||
103 | 186 | ||
104 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 187 | static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva) |
105 | { | 188 | { |
106 | trace_kvm_unmap_hva(hva); | 189 | trace_kvm_unmap_hva(hva); |
107 | 190 | ||
108 | /* | 191 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
109 | * Flush all shadow tlb entries everywhere. This is slow, but | ||
110 | * we are 100% sure that we catch the to be unmapped page | ||
111 | */ | ||
112 | kvm_flush_remote_tlbs(kvm); | ||
113 | 192 | ||
114 | return 0; | 193 | return 0; |
115 | } | 194 | } |
116 | 195 | ||
117 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | 196 | static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start, |
197 | unsigned long end) | ||
118 | { | 198 | { |
119 | /* kvm_unmap_hva flushes everything anyways */ | 199 | do_kvm_unmap_hva(kvm, start, end); |
120 | kvm_unmap_hva(kvm, start); | ||
121 | 200 | ||
122 | return 0; | 201 | return 0; |
123 | } | 202 | } |
124 | 203 | ||
125 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | 204 | static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva) |
126 | { | 205 | { |
127 | /* XXX could be more clever ;) */ | 206 | /* XXX could be more clever ;) */ |
128 | return 0; | 207 | return 0; |
129 | } | 208 | } |
130 | 209 | ||
131 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | 210 | static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva) |
132 | { | 211 | { |
133 | /* XXX could be more clever ;) */ | 212 | /* XXX could be more clever ;) */ |
134 | return 0; | 213 | return 0; |
135 | } | 214 | } |
136 | 215 | ||
137 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | 216 | static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) |
138 | { | 217 | { |
139 | /* The page will get remapped properly on its next fault */ | 218 | /* The page will get remapped properly on its next fault */ |
140 | kvm_unmap_hva(kvm, hva); | 219 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
141 | } | 220 | } |
142 | 221 | ||
143 | /*****************************************/ | 222 | /*****************************************/ |
@@ -159,7 +238,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | |||
159 | vcpu->arch.shadow_msr = smsr; | 238 | vcpu->arch.shadow_msr = smsr; |
160 | } | 239 | } |
161 | 240 | ||
162 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | 241 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) |
163 | { | 242 | { |
164 | ulong old_msr = vcpu->arch.shared->msr; | 243 | ulong old_msr = vcpu->arch.shared->msr; |
165 | 244 | ||
@@ -219,7 +298,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
219 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 298 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
220 | } | 299 | } |
221 | 300 | ||
222 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | 301 | void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |
223 | { | 302 | { |
224 | u32 host_pvr; | 303 | u32 host_pvr; |
225 | 304 | ||
@@ -256,6 +335,23 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |||
256 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) | 335 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) |
257 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); | 336 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); |
258 | 337 | ||
338 | /* | ||
339 | * If they're asking for POWER6 or later, set the flag | ||
340 | * indicating that we can do multiple large page sizes | ||
341 | * and 1TB segments. | ||
342 | * Also set the flag that indicates that tlbie has the large | ||
343 | * page bit in the RB operand instead of the instruction. | ||
344 | */ | ||
345 | switch (PVR_VER(pvr)) { | ||
346 | case PVR_POWER6: | ||
347 | case PVR_POWER7: | ||
348 | case PVR_POWER7p: | ||
349 | case PVR_POWER8: | ||
350 | vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | | ||
351 | BOOK3S_HFLAG_NEW_TLBIE; | ||
352 | break; | ||
353 | } | ||
354 | |||
259 | #ifdef CONFIG_PPC_BOOK3S_32 | 355 | #ifdef CONFIG_PPC_BOOK3S_32 |
260 | /* 32 bit Book3S always has 32 byte dcbz */ | 356 | /* 32 bit Book3S always has 32 byte dcbz */ |
261 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | 357 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; |
@@ -334,6 +430,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
334 | ulong eaddr, int vec) | 430 | ulong eaddr, int vec) |
335 | { | 431 | { |
336 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | 432 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); |
433 | bool iswrite = false; | ||
337 | int r = RESUME_GUEST; | 434 | int r = RESUME_GUEST; |
338 | int relocated; | 435 | int relocated; |
339 | int page_found = 0; | 436 | int page_found = 0; |
@@ -344,10 +441,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
344 | u64 vsid; | 441 | u64 vsid; |
345 | 442 | ||
346 | relocated = data ? dr : ir; | 443 | relocated = data ? dr : ir; |
444 | if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) | ||
445 | iswrite = true; | ||
347 | 446 | ||
348 | /* Resolve real address if translation turned on */ | 447 | /* Resolve real address if translation turned on */ |
349 | if (relocated) { | 448 | if (relocated) { |
350 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); | 449 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); |
351 | } else { | 450 | } else { |
352 | pte.may_execute = true; | 451 | pte.may_execute = true; |
353 | pte.may_read = true; | 452 | pte.may_read = true; |
@@ -355,6 +454,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
355 | pte.raddr = eaddr & KVM_PAM; | 454 | pte.raddr = eaddr & KVM_PAM; |
356 | pte.eaddr = eaddr; | 455 | pte.eaddr = eaddr; |
357 | pte.vpage = eaddr >> 12; | 456 | pte.vpage = eaddr >> 12; |
457 | pte.page_size = MMU_PAGE_64K; | ||
358 | } | 458 | } |
359 | 459 | ||
360 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | 460 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
@@ -388,22 +488,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
388 | 488 | ||
389 | if (page_found == -ENOENT) { | 489 | if (page_found == -ENOENT) { |
390 | /* Page not found in guest PTE entries */ | 490 | /* Page not found in guest PTE entries */ |
391 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
392 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | 491 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
393 | vcpu->arch.shared->dsisr = svcpu->fault_dsisr; | 492 | vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr; |
394 | vcpu->arch.shared->msr |= | 493 | vcpu->arch.shared->msr |= |
395 | (svcpu->shadow_srr1 & 0x00000000f8000000ULL); | 494 | vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; |
396 | svcpu_put(svcpu); | ||
397 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 495 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
398 | } else if (page_found == -EPERM) { | 496 | } else if (page_found == -EPERM) { |
399 | /* Storage protection */ | 497 | /* Storage protection */ |
400 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
401 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | 498 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
402 | vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; | 499 | vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; |
403 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; | 500 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; |
404 | vcpu->arch.shared->msr |= | 501 | vcpu->arch.shared->msr |= |
405 | svcpu->shadow_srr1 & 0x00000000f8000000ULL; | 502 | vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; |
406 | svcpu_put(svcpu); | ||
407 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 503 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
408 | } else if (page_found == -EINVAL) { | 504 | } else if (page_found == -EINVAL) { |
409 | /* Page not found in guest SLB */ | 505 | /* Page not found in guest SLB */ |
@@ -411,12 +507,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
411 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | 507 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
412 | } else if (!is_mmio && | 508 | } else if (!is_mmio && |
413 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | 509 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { |
510 | if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { | ||
511 | /* | ||
512 | * There is already a host HPTE there, presumably | ||
513 | * a read-only one for a page the guest thinks | ||
514 | * is writable, so get rid of it first. | ||
515 | */ | ||
516 | kvmppc_mmu_unmap_page(vcpu, &pte); | ||
517 | } | ||
414 | /* The guest's PTE is not mapped yet. Map on the host */ | 518 | /* The guest's PTE is not mapped yet. Map on the host */ |
415 | kvmppc_mmu_map_page(vcpu, &pte); | 519 | kvmppc_mmu_map_page(vcpu, &pte, iswrite); |
416 | if (data) | 520 | if (data) |
417 | vcpu->stat.sp_storage++; | 521 | vcpu->stat.sp_storage++; |
418 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 522 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
419 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) | 523 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) |
420 | kvmppc_patch_dcbz(vcpu, &pte); | 524 | kvmppc_patch_dcbz(vcpu, &pte); |
421 | } else { | 525 | } else { |
422 | /* MMIO */ | 526 | /* MMIO */ |
@@ -619,13 +723,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |||
619 | 723 | ||
620 | if (lost_ext & MSR_FP) | 724 | if (lost_ext & MSR_FP) |
621 | kvmppc_load_up_fpu(); | 725 | kvmppc_load_up_fpu(); |
726 | #ifdef CONFIG_ALTIVEC | ||
622 | if (lost_ext & MSR_VEC) | 727 | if (lost_ext & MSR_VEC) |
623 | kvmppc_load_up_altivec(); | 728 | kvmppc_load_up_altivec(); |
729 | #endif | ||
624 | current->thread.regs->msr |= lost_ext; | 730 | current->thread.regs->msr |= lost_ext; |
625 | } | 731 | } |
626 | 732 | ||
627 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | 733 | int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, |
628 | unsigned int exit_nr) | 734 | unsigned int exit_nr) |
629 | { | 735 | { |
630 | int r = RESUME_HOST; | 736 | int r = RESUME_HOST; |
631 | int s; | 737 | int s; |
@@ -643,25 +749,32 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
643 | switch (exit_nr) { | 749 | switch (exit_nr) { |
644 | case BOOK3S_INTERRUPT_INST_STORAGE: | 750 | case BOOK3S_INTERRUPT_INST_STORAGE: |
645 | { | 751 | { |
646 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 752 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
647 | ulong shadow_srr1 = svcpu->shadow_srr1; | ||
648 | vcpu->stat.pf_instruc++; | 753 | vcpu->stat.pf_instruc++; |
649 | 754 | ||
650 | #ifdef CONFIG_PPC_BOOK3S_32 | 755 | #ifdef CONFIG_PPC_BOOK3S_32 |
651 | /* We set segments as unused segments when invalidating them. So | 756 | /* We set segments as unused segments when invalidating them. So |
652 | * treat the respective fault as segment fault. */ | 757 | * treat the respective fault as segment fault. */ |
653 | if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { | 758 | { |
654 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | 759 | struct kvmppc_book3s_shadow_vcpu *svcpu; |
655 | r = RESUME_GUEST; | 760 | u32 sr; |
761 | |||
762 | svcpu = svcpu_get(vcpu); | ||
763 | sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; | ||
656 | svcpu_put(svcpu); | 764 | svcpu_put(svcpu); |
657 | break; | 765 | if (sr == SR_INVALID) { |
766 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | ||
767 | r = RESUME_GUEST; | ||
768 | break; | ||
769 | } | ||
658 | } | 770 | } |
659 | #endif | 771 | #endif |
660 | svcpu_put(svcpu); | ||
661 | 772 | ||
662 | /* only care about PTEG not found errors, but leave NX alone */ | 773 | /* only care about PTEG not found errors, but leave NX alone */ |
663 | if (shadow_srr1 & 0x40000000) { | 774 | if (shadow_srr1 & 0x40000000) { |
775 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
664 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); | 776 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
777 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
665 | vcpu->stat.sp_instruc++; | 778 | vcpu->stat.sp_instruc++; |
666 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 779 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
667 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | 780 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { |
@@ -682,25 +795,36 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
682 | case BOOK3S_INTERRUPT_DATA_STORAGE: | 795 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
683 | { | 796 | { |
684 | ulong dar = kvmppc_get_fault_dar(vcpu); | 797 | ulong dar = kvmppc_get_fault_dar(vcpu); |
685 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 798 | u32 fault_dsisr = vcpu->arch.fault_dsisr; |
686 | u32 fault_dsisr = svcpu->fault_dsisr; | ||
687 | vcpu->stat.pf_storage++; | 799 | vcpu->stat.pf_storage++; |
688 | 800 | ||
689 | #ifdef CONFIG_PPC_BOOK3S_32 | 801 | #ifdef CONFIG_PPC_BOOK3S_32 |
690 | /* We set segments as unused segments when invalidating them. So | 802 | /* We set segments as unused segments when invalidating them. So |
691 | * treat the respective fault as segment fault. */ | 803 | * treat the respective fault as segment fault. */ |
692 | if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { | 804 | { |
693 | kvmppc_mmu_map_segment(vcpu, dar); | 805 | struct kvmppc_book3s_shadow_vcpu *svcpu; |
694 | r = RESUME_GUEST; | 806 | u32 sr; |
807 | |||
808 | svcpu = svcpu_get(vcpu); | ||
809 | sr = svcpu->sr[dar >> SID_SHIFT]; | ||
695 | svcpu_put(svcpu); | 810 | svcpu_put(svcpu); |
696 | break; | 811 | if (sr == SR_INVALID) { |
812 | kvmppc_mmu_map_segment(vcpu, dar); | ||
813 | r = RESUME_GUEST; | ||
814 | break; | ||
815 | } | ||
697 | } | 816 | } |
698 | #endif | 817 | #endif |
699 | svcpu_put(svcpu); | ||
700 | 818 | ||
701 | /* The only case we need to handle is missing shadow PTEs */ | 819 | /* |
702 | if (fault_dsisr & DSISR_NOHPTE) { | 820 | * We need to handle missing shadow PTEs, and |
821 | * protection faults due to us mapping a page read-only | ||
822 | * when the guest thinks it is writable. | ||
823 | */ | ||
824 | if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { | ||
825 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
703 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); | 826 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
827 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
704 | } else { | 828 | } else { |
705 | vcpu->arch.shared->dar = dar; | 829 | vcpu->arch.shared->dar = dar; |
706 | vcpu->arch.shared->dsisr = fault_dsisr; | 830 | vcpu->arch.shared->dsisr = fault_dsisr; |
@@ -743,13 +867,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
743 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: | 867 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: |
744 | { | 868 | { |
745 | enum emulation_result er; | 869 | enum emulation_result er; |
746 | struct kvmppc_book3s_shadow_vcpu *svcpu; | ||
747 | ulong flags; | 870 | ulong flags; |
748 | 871 | ||
749 | program_interrupt: | 872 | program_interrupt: |
750 | svcpu = svcpu_get(vcpu); | 873 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; |
751 | flags = svcpu->shadow_srr1 & 0x1f0000ull; | ||
752 | svcpu_put(svcpu); | ||
753 | 874 | ||
754 | if (vcpu->arch.shared->msr & MSR_PR) { | 875 | if (vcpu->arch.shared->msr & MSR_PR) { |
755 | #ifdef EXIT_DEBUG | 876 | #ifdef EXIT_DEBUG |
@@ -798,7 +919,7 @@ program_interrupt: | |||
798 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | 919 | ulong cmd = kvmppc_get_gpr(vcpu, 3); |
799 | int i; | 920 | int i; |
800 | 921 | ||
801 | #ifdef CONFIG_KVM_BOOK3S_64_PR | 922 | #ifdef CONFIG_PPC_BOOK3S_64 |
802 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { | 923 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { |
803 | r = RESUME_GUEST; | 924 | r = RESUME_GUEST; |
804 | break; | 925 | break; |
@@ -881,9 +1002,7 @@ program_interrupt: | |||
881 | break; | 1002 | break; |
882 | default: | 1003 | default: |
883 | { | 1004 | { |
884 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 1005 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
885 | ulong shadow_srr1 = svcpu->shadow_srr1; | ||
886 | svcpu_put(svcpu); | ||
887 | /* Ugh - bork here! What did we get? */ | 1006 | /* Ugh - bork here! What did we get? */ |
888 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | 1007 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", |
889 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); | 1008 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); |
@@ -920,8 +1039,8 @@ program_interrupt: | |||
920 | return r; | 1039 | return r; |
921 | } | 1040 | } |
922 | 1041 | ||
923 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 1042 | static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, |
924 | struct kvm_sregs *sregs) | 1043 | struct kvm_sregs *sregs) |
925 | { | 1044 | { |
926 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 1045 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
927 | int i; | 1046 | int i; |
@@ -947,13 +1066,13 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
947 | return 0; | 1066 | return 0; |
948 | } | 1067 | } |
949 | 1068 | ||
950 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 1069 | static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, |
951 | struct kvm_sregs *sregs) | 1070 | struct kvm_sregs *sregs) |
952 | { | 1071 | { |
953 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 1072 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
954 | int i; | 1073 | int i; |
955 | 1074 | ||
956 | kvmppc_set_pvr(vcpu, sregs->pvr); | 1075 | kvmppc_set_pvr_pr(vcpu, sregs->pvr); |
957 | 1076 | ||
958 | vcpu3s->sdr1 = sregs->u.s.sdr1; | 1077 | vcpu3s->sdr1 = sregs->u.s.sdr1; |
959 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | 1078 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { |
@@ -983,7 +1102,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
983 | return 0; | 1102 | return 0; |
984 | } | 1103 | } |
985 | 1104 | ||
986 | int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | 1105 | static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1106 | union kvmppc_one_reg *val) | ||
987 | { | 1107 | { |
988 | int r = 0; | 1108 | int r = 0; |
989 | 1109 | ||
@@ -1012,7 +1132,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | |||
1012 | return r; | 1132 | return r; |
1013 | } | 1133 | } |
1014 | 1134 | ||
1015 | int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | 1135 | static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1136 | union kvmppc_one_reg *val) | ||
1016 | { | 1137 | { |
1017 | int r = 0; | 1138 | int r = 0; |
1018 | 1139 | ||
@@ -1042,28 +1163,30 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | |||
1042 | return r; | 1163 | return r; |
1043 | } | 1164 | } |
1044 | 1165 | ||
1045 | int kvmppc_core_check_processor_compat(void) | 1166 | static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, |
1046 | { | 1167 | unsigned int id) |
1047 | return 0; | ||
1048 | } | ||
1049 | |||
1050 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | ||
1051 | { | 1168 | { |
1052 | struct kvmppc_vcpu_book3s *vcpu_book3s; | 1169 | struct kvmppc_vcpu_book3s *vcpu_book3s; |
1053 | struct kvm_vcpu *vcpu; | 1170 | struct kvm_vcpu *vcpu; |
1054 | int err = -ENOMEM; | 1171 | int err = -ENOMEM; |
1055 | unsigned long p; | 1172 | unsigned long p; |
1056 | 1173 | ||
1057 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); | 1174 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
1058 | if (!vcpu_book3s) | 1175 | if (!vcpu) |
1059 | goto out; | 1176 | goto out; |
1060 | 1177 | ||
1061 | vcpu_book3s->shadow_vcpu = | 1178 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); |
1062 | kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); | 1179 | if (!vcpu_book3s) |
1063 | if (!vcpu_book3s->shadow_vcpu) | ||
1064 | goto free_vcpu; | 1180 | goto free_vcpu; |
1181 | vcpu->arch.book3s = vcpu_book3s; | ||
1182 | |||
1183 | #ifdef CONFIG_KVM_BOOK3S_32 | ||
1184 | vcpu->arch.shadow_vcpu = | ||
1185 | kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); | ||
1186 | if (!vcpu->arch.shadow_vcpu) | ||
1187 | goto free_vcpu3s; | ||
1188 | #endif | ||
1065 | 1189 | ||
1066 | vcpu = &vcpu_book3s->vcpu; | ||
1067 | err = kvm_vcpu_init(vcpu, kvm, id); | 1190 | err = kvm_vcpu_init(vcpu, kvm, id); |
1068 | if (err) | 1191 | if (err) |
1069 | goto free_shadow_vcpu; | 1192 | goto free_shadow_vcpu; |
@@ -1076,13 +1199,19 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1076 | vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); | 1199 | vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); |
1077 | 1200 | ||
1078 | #ifdef CONFIG_PPC_BOOK3S_64 | 1201 | #ifdef CONFIG_PPC_BOOK3S_64 |
1079 | /* default to book3s_64 (970fx) */ | 1202 | /* |
1203 | * Default to the same as the host if we're on sufficiently | ||
1204 | * recent machine that we have 1TB segments; | ||
1205 | * otherwise default to PPC970FX. | ||
1206 | */ | ||
1080 | vcpu->arch.pvr = 0x3C0301; | 1207 | vcpu->arch.pvr = 0x3C0301; |
1208 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | ||
1209 | vcpu->arch.pvr = mfspr(SPRN_PVR); | ||
1081 | #else | 1210 | #else |
1082 | /* default to book3s_32 (750) */ | 1211 | /* default to book3s_32 (750) */ |
1083 | vcpu->arch.pvr = 0x84202; | 1212 | vcpu->arch.pvr = 0x84202; |
1084 | #endif | 1213 | #endif |
1085 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | 1214 | kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); |
1086 | vcpu->arch.slb_nr = 64; | 1215 | vcpu->arch.slb_nr = 64; |
1087 | 1216 | ||
1088 | vcpu->arch.shadow_msr = MSR_USER64; | 1217 | vcpu->arch.shadow_msr = MSR_USER64; |
@@ -1096,24 +1225,31 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1096 | uninit_vcpu: | 1225 | uninit_vcpu: |
1097 | kvm_vcpu_uninit(vcpu); | 1226 | kvm_vcpu_uninit(vcpu); |
1098 | free_shadow_vcpu: | 1227 | free_shadow_vcpu: |
1099 | kfree(vcpu_book3s->shadow_vcpu); | 1228 | #ifdef CONFIG_KVM_BOOK3S_32 |
1100 | free_vcpu: | 1229 | kfree(vcpu->arch.shadow_vcpu); |
1230 | free_vcpu3s: | ||
1231 | #endif | ||
1101 | vfree(vcpu_book3s); | 1232 | vfree(vcpu_book3s); |
1233 | free_vcpu: | ||
1234 | kmem_cache_free(kvm_vcpu_cache, vcpu); | ||
1102 | out: | 1235 | out: |
1103 | return ERR_PTR(err); | 1236 | return ERR_PTR(err); |
1104 | } | 1237 | } |
1105 | 1238 | ||
1106 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | 1239 | static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) |
1107 | { | 1240 | { |
1108 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 1241 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
1109 | 1242 | ||
1110 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); | 1243 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); |
1111 | kvm_vcpu_uninit(vcpu); | 1244 | kvm_vcpu_uninit(vcpu); |
1112 | kfree(vcpu_book3s->shadow_vcpu); | 1245 | #ifdef CONFIG_KVM_BOOK3S_32 |
1246 | kfree(vcpu->arch.shadow_vcpu); | ||
1247 | #endif | ||
1113 | vfree(vcpu_book3s); | 1248 | vfree(vcpu_book3s); |
1249 | kmem_cache_free(kvm_vcpu_cache, vcpu); | ||
1114 | } | 1250 | } |
1115 | 1251 | ||
1116 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 1252 | static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
1117 | { | 1253 | { |
1118 | int ret; | 1254 | int ret; |
1119 | double fpr[32][TS_FPRWIDTH]; | 1255 | double fpr[32][TS_FPRWIDTH]; |
@@ -1222,8 +1358,8 @@ out: | |||
1222 | /* | 1358 | /* |
1223 | * Get (and clear) the dirty memory log for a memory slot. | 1359 | * Get (and clear) the dirty memory log for a memory slot. |
1224 | */ | 1360 | */ |
1225 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 1361 | static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, |
1226 | struct kvm_dirty_log *log) | 1362 | struct kvm_dirty_log *log) |
1227 | { | 1363 | { |
1228 | struct kvm_memory_slot *memslot; | 1364 | struct kvm_memory_slot *memslot; |
1229 | struct kvm_vcpu *vcpu; | 1365 | struct kvm_vcpu *vcpu; |
@@ -1258,67 +1394,100 @@ out: | |||
1258 | return r; | 1394 | return r; |
1259 | } | 1395 | } |
1260 | 1396 | ||
1261 | #ifdef CONFIG_PPC64 | 1397 | static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, |
1262 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | 1398 | struct kvm_memory_slot *memslot) |
1263 | { | 1399 | { |
1264 | info->flags = KVM_PPC_1T_SEGMENTS; | 1400 | return; |
1265 | 1401 | } | |
1266 | /* SLB is always 64 entries */ | ||
1267 | info->slb_size = 64; | ||
1268 | |||
1269 | /* Standard 4k base page size segment */ | ||
1270 | info->sps[0].page_shift = 12; | ||
1271 | info->sps[0].slb_enc = 0; | ||
1272 | info->sps[0].enc[0].page_shift = 12; | ||
1273 | info->sps[0].enc[0].pte_enc = 0; | ||
1274 | |||
1275 | /* Standard 16M large page size segment */ | ||
1276 | info->sps[1].page_shift = 24; | ||
1277 | info->sps[1].slb_enc = SLB_VSID_L; | ||
1278 | info->sps[1].enc[0].page_shift = 24; | ||
1279 | info->sps[1].enc[0].pte_enc = 0; | ||
1280 | 1402 | ||
1403 | static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, | ||
1404 | struct kvm_memory_slot *memslot, | ||
1405 | struct kvm_userspace_memory_region *mem) | ||
1406 | { | ||
1281 | return 0; | 1407 | return 0; |
1282 | } | 1408 | } |
1283 | #endif /* CONFIG_PPC64 */ | ||
1284 | 1409 | ||
1285 | void kvmppc_core_free_memslot(struct kvm_memory_slot *free, | 1410 | static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, |
1286 | struct kvm_memory_slot *dont) | 1411 | struct kvm_userspace_memory_region *mem, |
1412 | const struct kvm_memory_slot *old) | ||
1287 | { | 1413 | { |
1414 | return; | ||
1288 | } | 1415 | } |
1289 | 1416 | ||
1290 | int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, | 1417 | static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free, |
1291 | unsigned long npages) | 1418 | struct kvm_memory_slot *dont) |
1292 | { | 1419 | { |
1293 | return 0; | 1420 | return; |
1294 | } | 1421 | } |
1295 | 1422 | ||
1296 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 1423 | static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot, |
1297 | struct kvm_memory_slot *memslot, | 1424 | unsigned long npages) |
1298 | struct kvm_userspace_memory_region *mem) | ||
1299 | { | 1425 | { |
1300 | return 0; | 1426 | return 0; |
1301 | } | 1427 | } |
1302 | 1428 | ||
1303 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | 1429 | |
1304 | struct kvm_userspace_memory_region *mem, | 1430 | #ifdef CONFIG_PPC64 |
1305 | const struct kvm_memory_slot *old) | 1431 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, |
1432 | struct kvm_ppc_smmu_info *info) | ||
1306 | { | 1433 | { |
1307 | } | 1434 | long int i; |
1435 | struct kvm_vcpu *vcpu; | ||
1436 | |||
1437 | info->flags = 0; | ||
1438 | |||
1439 | /* SLB is always 64 entries */ | ||
1440 | info->slb_size = 64; | ||
1441 | |||
1442 | /* Standard 4k base page size segment */ | ||
1443 | info->sps[0].page_shift = 12; | ||
1444 | info->sps[0].slb_enc = 0; | ||
1445 | info->sps[0].enc[0].page_shift = 12; | ||
1446 | info->sps[0].enc[0].pte_enc = 0; | ||
1447 | |||
1448 | /* | ||
1449 | * 64k large page size. | ||
1450 | * We only want to put this in if the CPUs we're emulating | ||
1451 | * support it, but unfortunately we don't have a vcpu easily | ||
1452 | * to hand here to test. Just pick the first vcpu, and if | ||
1453 | * that doesn't exist yet, report the minimum capability, | ||
1454 | * i.e., no 64k pages. | ||
1455 | * 1T segment support goes along with 64k pages. | ||
1456 | */ | ||
1457 | i = 1; | ||
1458 | vcpu = kvm_get_vcpu(kvm, 0); | ||
1459 | if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { | ||
1460 | info->flags = KVM_PPC_1T_SEGMENTS; | ||
1461 | info->sps[i].page_shift = 16; | ||
1462 | info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01; | ||
1463 | info->sps[i].enc[0].page_shift = 16; | ||
1464 | info->sps[i].enc[0].pte_enc = 1; | ||
1465 | ++i; | ||
1466 | } | ||
1467 | |||
1468 | /* Standard 16M large page size segment */ | ||
1469 | info->sps[i].page_shift = 24; | ||
1470 | info->sps[i].slb_enc = SLB_VSID_L; | ||
1471 | info->sps[i].enc[0].page_shift = 24; | ||
1472 | info->sps[i].enc[0].pte_enc = 0; | ||
1308 | 1473 | ||
1309 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) | 1474 | return 0; |
1475 | } | ||
1476 | #else | ||
1477 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, | ||
1478 | struct kvm_ppc_smmu_info *info) | ||
1310 | { | 1479 | { |
1480 | /* We should not get called */ | ||
1481 | BUG(); | ||
1311 | } | 1482 | } |
1483 | #endif /* CONFIG_PPC64 */ | ||
1312 | 1484 | ||
1313 | static unsigned int kvm_global_user_count = 0; | 1485 | static unsigned int kvm_global_user_count = 0; |
1314 | static DEFINE_SPINLOCK(kvm_global_user_count_lock); | 1486 | static DEFINE_SPINLOCK(kvm_global_user_count_lock); |
1315 | 1487 | ||
1316 | int kvmppc_core_init_vm(struct kvm *kvm) | 1488 | static int kvmppc_core_init_vm_pr(struct kvm *kvm) |
1317 | { | 1489 | { |
1318 | #ifdef CONFIG_PPC64 | 1490 | mutex_init(&kvm->arch.hpt_mutex); |
1319 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); | ||
1320 | INIT_LIST_HEAD(&kvm->arch.rtas_tokens); | ||
1321 | #endif | ||
1322 | 1491 | ||
1323 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { | 1492 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { |
1324 | spin_lock(&kvm_global_user_count_lock); | 1493 | spin_lock(&kvm_global_user_count_lock); |
@@ -1329,7 +1498,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) | |||
1329 | return 0; | 1498 | return 0; |
1330 | } | 1499 | } |
1331 | 1500 | ||
1332 | void kvmppc_core_destroy_vm(struct kvm *kvm) | 1501 | static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) |
1333 | { | 1502 | { |
1334 | #ifdef CONFIG_PPC64 | 1503 | #ifdef CONFIG_PPC64 |
1335 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | 1504 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); |
@@ -1344,26 +1513,81 @@ void kvmppc_core_destroy_vm(struct kvm *kvm) | |||
1344 | } | 1513 | } |
1345 | } | 1514 | } |
1346 | 1515 | ||
1347 | static int kvmppc_book3s_init(void) | 1516 | static int kvmppc_core_check_processor_compat_pr(void) |
1348 | { | 1517 | { |
1349 | int r; | 1518 | /* we are always compatible */ |
1519 | return 0; | ||
1520 | } | ||
1350 | 1521 | ||
1351 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, | 1522 | static long kvm_arch_vm_ioctl_pr(struct file *filp, |
1352 | THIS_MODULE); | 1523 | unsigned int ioctl, unsigned long arg) |
1524 | { | ||
1525 | return -ENOTTY; | ||
1526 | } | ||
1353 | 1527 | ||
1354 | if (r) | 1528 | static struct kvmppc_ops kvm_ops_pr = { |
1529 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, | ||
1530 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, | ||
1531 | .get_one_reg = kvmppc_get_one_reg_pr, | ||
1532 | .set_one_reg = kvmppc_set_one_reg_pr, | ||
1533 | .vcpu_load = kvmppc_core_vcpu_load_pr, | ||
1534 | .vcpu_put = kvmppc_core_vcpu_put_pr, | ||
1535 | .set_msr = kvmppc_set_msr_pr, | ||
1536 | .vcpu_run = kvmppc_vcpu_run_pr, | ||
1537 | .vcpu_create = kvmppc_core_vcpu_create_pr, | ||
1538 | .vcpu_free = kvmppc_core_vcpu_free_pr, | ||
1539 | .check_requests = kvmppc_core_check_requests_pr, | ||
1540 | .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr, | ||
1541 | .flush_memslot = kvmppc_core_flush_memslot_pr, | ||
1542 | .prepare_memory_region = kvmppc_core_prepare_memory_region_pr, | ||
1543 | .commit_memory_region = kvmppc_core_commit_memory_region_pr, | ||
1544 | .unmap_hva = kvm_unmap_hva_pr, | ||
1545 | .unmap_hva_range = kvm_unmap_hva_range_pr, | ||
1546 | .age_hva = kvm_age_hva_pr, | ||
1547 | .test_age_hva = kvm_test_age_hva_pr, | ||
1548 | .set_spte_hva = kvm_set_spte_hva_pr, | ||
1549 | .mmu_destroy = kvmppc_mmu_destroy_pr, | ||
1550 | .free_memslot = kvmppc_core_free_memslot_pr, | ||
1551 | .create_memslot = kvmppc_core_create_memslot_pr, | ||
1552 | .init_vm = kvmppc_core_init_vm_pr, | ||
1553 | .destroy_vm = kvmppc_core_destroy_vm_pr, | ||
1554 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, | ||
1555 | .emulate_op = kvmppc_core_emulate_op_pr, | ||
1556 | .emulate_mtspr = kvmppc_core_emulate_mtspr_pr, | ||
1557 | .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, | ||
1558 | .fast_vcpu_kick = kvm_vcpu_kick, | ||
1559 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, | ||
1560 | }; | ||
1561 | |||
1562 | |||
1563 | int kvmppc_book3s_init_pr(void) | ||
1564 | { | ||
1565 | int r; | ||
1566 | |||
1567 | r = kvmppc_core_check_processor_compat_pr(); | ||
1568 | if (r < 0) | ||
1355 | return r; | 1569 | return r; |
1356 | 1570 | ||
1357 | r = kvmppc_mmu_hpte_sysinit(); | 1571 | kvm_ops_pr.owner = THIS_MODULE; |
1572 | kvmppc_pr_ops = &kvm_ops_pr; | ||
1358 | 1573 | ||
1574 | r = kvmppc_mmu_hpte_sysinit(); | ||
1359 | return r; | 1575 | return r; |
1360 | } | 1576 | } |
1361 | 1577 | ||
1362 | static void kvmppc_book3s_exit(void) | 1578 | void kvmppc_book3s_exit_pr(void) |
1363 | { | 1579 | { |
1580 | kvmppc_pr_ops = NULL; | ||
1364 | kvmppc_mmu_hpte_sysexit(); | 1581 | kvmppc_mmu_hpte_sysexit(); |
1365 | kvm_exit(); | ||
1366 | } | 1582 | } |
1367 | 1583 | ||
1368 | module_init(kvmppc_book3s_init); | 1584 | /* |
1369 | module_exit(kvmppc_book3s_exit); | 1585 | * We only support separate modules for book3s 64 |
1586 | */ | ||
1587 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
1588 | |||
1589 | module_init(kvmppc_book3s_init_pr); | ||
1590 | module_exit(kvmppc_book3s_exit_pr); | ||
1591 | |||
1592 | MODULE_LICENSE("GPL"); | ||
1593 | #endif | ||
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index da0e0bc268bd..5efa97b993d8 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <asm/kvm_ppc.h> | 21 | #include <asm/kvm_ppc.h> |
22 | #include <asm/kvm_book3s.h> | 22 | #include <asm/kvm_book3s.h> |
23 | 23 | ||
24 | #define HPTE_SIZE 16 /* bytes per HPT entry */ | ||
25 | |||
24 | static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) | 26 | static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) |
25 | { | 27 | { |
26 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 28 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
@@ -40,32 +42,41 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) | |||
40 | long pte_index = kvmppc_get_gpr(vcpu, 5); | 42 | long pte_index = kvmppc_get_gpr(vcpu, 5); |
41 | unsigned long pteg[2 * 8]; | 43 | unsigned long pteg[2 * 8]; |
42 | unsigned long pteg_addr, i, *hpte; | 44 | unsigned long pteg_addr, i, *hpte; |
45 | long int ret; | ||
43 | 46 | ||
47 | i = pte_index & 7; | ||
44 | pte_index &= ~7UL; | 48 | pte_index &= ~7UL; |
45 | pteg_addr = get_pteg_addr(vcpu, pte_index); | 49 | pteg_addr = get_pteg_addr(vcpu, pte_index); |
46 | 50 | ||
51 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | ||
47 | copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); | 52 | copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); |
48 | hpte = pteg; | 53 | hpte = pteg; |
49 | 54 | ||
55 | ret = H_PTEG_FULL; | ||
50 | if (likely((flags & H_EXACT) == 0)) { | 56 | if (likely((flags & H_EXACT) == 0)) { |
51 | pte_index &= ~7UL; | ||
52 | for (i = 0; ; ++i) { | 57 | for (i = 0; ; ++i) { |
53 | if (i == 8) | 58 | if (i == 8) |
54 | return H_PTEG_FULL; | 59 | goto done; |
55 | if ((*hpte & HPTE_V_VALID) == 0) | 60 | if ((*hpte & HPTE_V_VALID) == 0) |
56 | break; | 61 | break; |
57 | hpte += 2; | 62 | hpte += 2; |
58 | } | 63 | } |
59 | } else { | 64 | } else { |
60 | i = kvmppc_get_gpr(vcpu, 5) & 7UL; | ||
61 | hpte += i * 2; | 65 | hpte += i * 2; |
66 | if (*hpte & HPTE_V_VALID) | ||
67 | goto done; | ||
62 | } | 68 | } |
63 | 69 | ||
64 | hpte[0] = kvmppc_get_gpr(vcpu, 6); | 70 | hpte[0] = kvmppc_get_gpr(vcpu, 6); |
65 | hpte[1] = kvmppc_get_gpr(vcpu, 7); | 71 | hpte[1] = kvmppc_get_gpr(vcpu, 7); |
66 | copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg)); | 72 | pteg_addr += i * HPTE_SIZE; |
67 | kvmppc_set_gpr(vcpu, 3, H_SUCCESS); | 73 | copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); |
68 | kvmppc_set_gpr(vcpu, 4, pte_index | i); | 74 | kvmppc_set_gpr(vcpu, 4, pte_index | i); |
75 | ret = H_SUCCESS; | ||
76 | |||
77 | done: | ||
78 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); | ||
79 | kvmppc_set_gpr(vcpu, 3, ret); | ||
69 | 80 | ||
70 | return EMULATE_DONE; | 81 | return EMULATE_DONE; |
71 | } | 82 | } |
@@ -77,26 +88,31 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) | |||
77 | unsigned long avpn = kvmppc_get_gpr(vcpu, 6); | 88 | unsigned long avpn = kvmppc_get_gpr(vcpu, 6); |
78 | unsigned long v = 0, pteg, rb; | 89 | unsigned long v = 0, pteg, rb; |
79 | unsigned long pte[2]; | 90 | unsigned long pte[2]; |
91 | long int ret; | ||
80 | 92 | ||
81 | pteg = get_pteg_addr(vcpu, pte_index); | 93 | pteg = get_pteg_addr(vcpu, pte_index); |
94 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | ||
82 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 95 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); |
83 | 96 | ||
97 | ret = H_NOT_FOUND; | ||
84 | if ((pte[0] & HPTE_V_VALID) == 0 || | 98 | if ((pte[0] & HPTE_V_VALID) == 0 || |
85 | ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) || | 99 | ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) || |
86 | ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) { | 100 | ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) |
87 | kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND); | 101 | goto done; |
88 | return EMULATE_DONE; | ||
89 | } | ||
90 | 102 | ||
91 | copy_to_user((void __user *)pteg, &v, sizeof(v)); | 103 | copy_to_user((void __user *)pteg, &v, sizeof(v)); |
92 | 104 | ||
93 | rb = compute_tlbie_rb(pte[0], pte[1], pte_index); | 105 | rb = compute_tlbie_rb(pte[0], pte[1], pte_index); |
94 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); | 106 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
95 | 107 | ||
96 | kvmppc_set_gpr(vcpu, 3, H_SUCCESS); | 108 | ret = H_SUCCESS; |
97 | kvmppc_set_gpr(vcpu, 4, pte[0]); | 109 | kvmppc_set_gpr(vcpu, 4, pte[0]); |
98 | kvmppc_set_gpr(vcpu, 5, pte[1]); | 110 | kvmppc_set_gpr(vcpu, 5, pte[1]); |
99 | 111 | ||
112 | done: | ||
113 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); | ||
114 | kvmppc_set_gpr(vcpu, 3, ret); | ||
115 | |||
100 | return EMULATE_DONE; | 116 | return EMULATE_DONE; |
101 | } | 117 | } |
102 | 118 | ||
@@ -124,6 +140,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) | |||
124 | int paramnr = 4; | 140 | int paramnr = 4; |
125 | int ret = H_SUCCESS; | 141 | int ret = H_SUCCESS; |
126 | 142 | ||
143 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | ||
127 | for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { | 144 | for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { |
128 | unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i)); | 145 | unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i)); |
129 | unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1); | 146 | unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1); |
@@ -172,6 +189,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) | |||
172 | } | 189 | } |
173 | kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); | 190 | kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); |
174 | } | 191 | } |
192 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); | ||
175 | kvmppc_set_gpr(vcpu, 3, ret); | 193 | kvmppc_set_gpr(vcpu, 3, ret); |
176 | 194 | ||
177 | return EMULATE_DONE; | 195 | return EMULATE_DONE; |
@@ -184,15 +202,16 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
184 | unsigned long avpn = kvmppc_get_gpr(vcpu, 6); | 202 | unsigned long avpn = kvmppc_get_gpr(vcpu, 6); |
185 | unsigned long rb, pteg, r, v; | 203 | unsigned long rb, pteg, r, v; |
186 | unsigned long pte[2]; | 204 | unsigned long pte[2]; |
205 | long int ret; | ||
187 | 206 | ||
188 | pteg = get_pteg_addr(vcpu, pte_index); | 207 | pteg = get_pteg_addr(vcpu, pte_index); |
208 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | ||
189 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 209 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); |
190 | 210 | ||
211 | ret = H_NOT_FOUND; | ||
191 | if ((pte[0] & HPTE_V_VALID) == 0 || | 212 | if ((pte[0] & HPTE_V_VALID) == 0 || |
192 | ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) { | 213 | ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) |
193 | kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND); | 214 | goto done; |
194 | return EMULATE_DONE; | ||
195 | } | ||
196 | 215 | ||
197 | v = pte[0]; | 216 | v = pte[0]; |
198 | r = pte[1]; | 217 | r = pte[1]; |
@@ -207,8 +226,11 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
207 | rb = compute_tlbie_rb(v, r, pte_index); | 226 | rb = compute_tlbie_rb(v, r, pte_index); |
208 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); | 227 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
209 | copy_to_user((void __user *)pteg, pte, sizeof(pte)); | 228 | copy_to_user((void __user *)pteg, pte, sizeof(pte)); |
229 | ret = H_SUCCESS; | ||
210 | 230 | ||
211 | kvmppc_set_gpr(vcpu, 3, H_SUCCESS); | 231 | done: |
232 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); | ||
233 | kvmppc_set_gpr(vcpu, 3, ret); | ||
212 | 234 | ||
213 | return EMULATE_DONE; | 235 | return EMULATE_DONE; |
214 | } | 236 | } |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 8f7633e3afb8..a38c4c9edab8 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -38,32 +38,6 @@ | |||
38 | 38 | ||
39 | #define FUNC(name) GLUE(.,name) | 39 | #define FUNC(name) GLUE(.,name) |
40 | 40 | ||
41 | .globl kvmppc_skip_interrupt | ||
42 | kvmppc_skip_interrupt: | ||
43 | /* | ||
44 | * Here all GPRs are unchanged from when the interrupt happened | ||
45 | * except for r13, which is saved in SPRG_SCRATCH0. | ||
46 | */ | ||
47 | mfspr r13, SPRN_SRR0 | ||
48 | addi r13, r13, 4 | ||
49 | mtspr SPRN_SRR0, r13 | ||
50 | GET_SCRATCH0(r13) | ||
51 | rfid | ||
52 | b . | ||
53 | |||
54 | .globl kvmppc_skip_Hinterrupt | ||
55 | kvmppc_skip_Hinterrupt: | ||
56 | /* | ||
57 | * Here all GPRs are unchanged from when the interrupt happened | ||
58 | * except for r13, which is saved in SPRG_SCRATCH0. | ||
59 | */ | ||
60 | mfspr r13, SPRN_HSRR0 | ||
61 | addi r13, r13, 4 | ||
62 | mtspr SPRN_HSRR0, r13 | ||
63 | GET_SCRATCH0(r13) | ||
64 | hrfid | ||
65 | b . | ||
66 | |||
67 | #elif defined(CONFIG_PPC_BOOK3S_32) | 41 | #elif defined(CONFIG_PPC_BOOK3S_32) |
68 | 42 | ||
69 | #define FUNC(name) name | 43 | #define FUNC(name) name |
@@ -179,11 +153,15 @@ _GLOBAL(kvmppc_entry_trampoline) | |||
179 | 153 | ||
180 | li r6, MSR_IR | MSR_DR | 154 | li r6, MSR_IR | MSR_DR |
181 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ | 155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ |
156 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
182 | /* | 157 | /* |
183 | * Set EE in HOST_MSR so that it's enabled when we get into our | 158 | * Set EE in HOST_MSR so that it's enabled when we get into our |
184 | * C exit handler function | 159 | * C exit handler function. On 64-bit we delay enabling |
160 | * interrupts until we have finished transferring stuff | ||
161 | * to or from the PACA. | ||
185 | */ | 162 | */ |
186 | ori r5, r5, MSR_EE | 163 | ori r5, r5, MSR_EE |
164 | #endif | ||
187 | mtsrr0 r7 | 165 | mtsrr0 r7 |
188 | mtsrr1 r6 | 166 | mtsrr1 r6 |
189 | RFI | 167 | RFI |
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index 3219ba895246..cf95cdef73c9 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c | |||
@@ -260,6 +260,7 @@ fail: | |||
260 | */ | 260 | */ |
261 | return rc; | 261 | return rc; |
262 | } | 262 | } |
263 | EXPORT_SYMBOL_GPL(kvmppc_rtas_hcall); | ||
263 | 264 | ||
264 | void kvmppc_rtas_tokens_free(struct kvm *kvm) | 265 | void kvmppc_rtas_tokens_free(struct kvm *kvm) |
265 | { | 266 | { |
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 1abe4788191a..bc50c97751d3 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S | |||
@@ -161,8 +161,8 @@ kvmppc_handler_trampoline_enter_end: | |||
161 | .global kvmppc_handler_trampoline_exit | 161 | .global kvmppc_handler_trampoline_exit |
162 | kvmppc_handler_trampoline_exit: | 162 | kvmppc_handler_trampoline_exit: |
163 | 163 | ||
164 | .global kvmppc_interrupt | 164 | .global kvmppc_interrupt_pr |
165 | kvmppc_interrupt: | 165 | kvmppc_interrupt_pr: |
166 | 166 | ||
167 | /* Register usage at this point: | 167 | /* Register usage at this point: |
168 | * | 168 | * |
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index a3a5cb8ee7ea..02a17dcf1610 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c | |||
@@ -818,7 +818,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) | |||
818 | } | 818 | } |
819 | 819 | ||
820 | /* Check for real mode returning too hard */ | 820 | /* Check for real mode returning too hard */ |
821 | if (xics->real_mode) | 821 | if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm)) |
822 | return kvmppc_xics_rm_complete(vcpu, req); | 822 | return kvmppc_xics_rm_complete(vcpu, req); |
823 | 823 | ||
824 | switch (req) { | 824 | switch (req) { |
@@ -840,6 +840,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) | |||
840 | 840 | ||
841 | return rc; | 841 | return rc; |
842 | } | 842 | } |
843 | EXPORT_SYMBOL_GPL(kvmppc_xics_hcall); | ||
843 | 844 | ||
844 | 845 | ||
845 | /* -- Initialisation code etc. -- */ | 846 | /* -- Initialisation code etc. -- */ |
@@ -1250,13 +1251,13 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type) | |||
1250 | 1251 | ||
1251 | xics_debugfs_init(xics); | 1252 | xics_debugfs_init(xics); |
1252 | 1253 | ||
1253 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 1254 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
1254 | if (cpu_has_feature(CPU_FTR_ARCH_206)) { | 1255 | if (cpu_has_feature(CPU_FTR_ARCH_206)) { |
1255 | /* Enable real mode support */ | 1256 | /* Enable real mode support */ |
1256 | xics->real_mode = ENABLE_REALMODE; | 1257 | xics->real_mode = ENABLE_REALMODE; |
1257 | xics->real_mode_dbg = DEBUG_REALMODE; | 1258 | xics->real_mode_dbg = DEBUG_REALMODE; |
1258 | } | 1259 | } |
1259 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 1260 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ |
1260 | 1261 | ||
1261 | return 0; | 1262 | return 0; |
1262 | } | 1263 | } |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 17722d82f1d1..15d0149511eb 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -40,7 +40,9 @@ | |||
40 | 40 | ||
41 | #include "timing.h" | 41 | #include "timing.h" |
42 | #include "booke.h" | 42 | #include "booke.h" |
43 | #include "trace.h" | 43 | |
44 | #define CREATE_TRACE_POINTS | ||
45 | #include "trace_booke.h" | ||
44 | 46 | ||
45 | unsigned long kvmppc_booke_handlers; | 47 | unsigned long kvmppc_booke_handlers; |
46 | 48 | ||
@@ -133,6 +135,29 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu) | |||
133 | #endif | 135 | #endif |
134 | } | 136 | } |
135 | 137 | ||
138 | static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu) | ||
139 | { | ||
140 | /* Synchronize guest's desire to get debug interrupts into shadow MSR */ | ||
141 | #ifndef CONFIG_KVM_BOOKE_HV | ||
142 | vcpu->arch.shadow_msr &= ~MSR_DE; | ||
143 | vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE; | ||
144 | #endif | ||
145 | |||
146 | /* Force enable debug interrupts when user space wants to debug */ | ||
147 | if (vcpu->guest_debug) { | ||
148 | #ifdef CONFIG_KVM_BOOKE_HV | ||
149 | /* | ||
150 | * Since there is no shadow MSR, sync MSR_DE into the guest | ||
151 | * visible MSR. | ||
152 | */ | ||
153 | vcpu->arch.shared->msr |= MSR_DE; | ||
154 | #else | ||
155 | vcpu->arch.shadow_msr |= MSR_DE; | ||
156 | vcpu->arch.shared->msr &= ~MSR_DE; | ||
157 | #endif | ||
158 | } | ||
159 | } | ||
160 | |||
136 | /* | 161 | /* |
137 | * Helper function for "full" MSR writes. No need to call this if only | 162 | * Helper function for "full" MSR writes. No need to call this if only |
138 | * EE/CE/ME/DE/RI are changing. | 163 | * EE/CE/ME/DE/RI are changing. |
@@ -150,6 +175,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | |||
150 | kvmppc_mmu_msr_notify(vcpu, old_msr); | 175 | kvmppc_mmu_msr_notify(vcpu, old_msr); |
151 | kvmppc_vcpu_sync_spe(vcpu); | 176 | kvmppc_vcpu_sync_spe(vcpu); |
152 | kvmppc_vcpu_sync_fpu(vcpu); | 177 | kvmppc_vcpu_sync_fpu(vcpu); |
178 | kvmppc_vcpu_sync_debug(vcpu); | ||
153 | } | 179 | } |
154 | 180 | ||
155 | static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, | 181 | static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, |
@@ -655,6 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | |||
655 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
656 | { | 682 | { |
657 | int ret, s; | 683 | int ret, s; |
684 | struct thread_struct thread; | ||
658 | #ifdef CONFIG_PPC_FPU | 685 | #ifdef CONFIG_PPC_FPU |
659 | unsigned int fpscr; | 686 | unsigned int fpscr; |
660 | int fpexc_mode; | 687 | int fpexc_mode; |
@@ -696,6 +723,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
696 | kvmppc_load_guest_fp(vcpu); | 723 | kvmppc_load_guest_fp(vcpu); |
697 | #endif | 724 | #endif |
698 | 725 | ||
726 | /* Switch to guest debug context */ | ||
727 | thread.debug = vcpu->arch.shadow_dbg_reg; | ||
728 | switch_booke_debug_regs(&thread); | ||
729 | thread.debug = current->thread.debug; | ||
730 | current->thread.debug = vcpu->arch.shadow_dbg_reg; | ||
731 | |||
699 | kvmppc_fix_ee_before_entry(); | 732 | kvmppc_fix_ee_before_entry(); |
700 | 733 | ||
701 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | 734 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
@@ -703,6 +736,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
703 | /* No need for kvm_guest_exit. It's done in handle_exit. | 736 | /* No need for kvm_guest_exit. It's done in handle_exit. |
704 | We also get here with interrupts enabled. */ | 737 | We also get here with interrupts enabled. */ |
705 | 738 | ||
739 | /* Switch back to user space debug context */ | ||
740 | switch_booke_debug_regs(&thread); | ||
741 | current->thread.debug = thread.debug; | ||
742 | |||
706 | #ifdef CONFIG_PPC_FPU | 743 | #ifdef CONFIG_PPC_FPU |
707 | kvmppc_save_guest_fp(vcpu); | 744 | kvmppc_save_guest_fp(vcpu); |
708 | 745 | ||
@@ -758,6 +795,30 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
758 | } | 795 | } |
759 | } | 796 | } |
760 | 797 | ||
798 | static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
799 | { | ||
800 | struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg); | ||
801 | u32 dbsr = vcpu->arch.dbsr; | ||
802 | |||
803 | run->debug.arch.status = 0; | ||
804 | run->debug.arch.address = vcpu->arch.pc; | ||
805 | |||
806 | if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) { | ||
807 | run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; | ||
808 | } else { | ||
809 | if (dbsr & (DBSR_DAC1W | DBSR_DAC2W)) | ||
810 | run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE; | ||
811 | else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R)) | ||
812 | run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ; | ||
813 | if (dbsr & (DBSR_DAC1R | DBSR_DAC1W)) | ||
814 | run->debug.arch.address = dbg_reg->dac1; | ||
815 | else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W)) | ||
816 | run->debug.arch.address = dbg_reg->dac2; | ||
817 | } | ||
818 | |||
819 | return RESUME_HOST; | ||
820 | } | ||
821 | |||
761 | static void kvmppc_fill_pt_regs(struct pt_regs *regs) | 822 | static void kvmppc_fill_pt_regs(struct pt_regs *regs) |
762 | { | 823 | { |
763 | ulong r1, ip, msr, lr; | 824 | ulong r1, ip, msr, lr; |
@@ -818,6 +879,11 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, | |||
818 | case BOOKE_INTERRUPT_CRITICAL: | 879 | case BOOKE_INTERRUPT_CRITICAL: |
819 | unknown_exception(®s); | 880 | unknown_exception(®s); |
820 | break; | 881 | break; |
882 | case BOOKE_INTERRUPT_DEBUG: | ||
883 | /* Save DBSR before preemption is enabled */ | ||
884 | vcpu->arch.dbsr = mfspr(SPRN_DBSR); | ||
885 | kvmppc_clear_dbsr(); | ||
886 | break; | ||
821 | } | 887 | } |
822 | } | 888 | } |
823 | 889 | ||
@@ -1135,18 +1201,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1135 | } | 1201 | } |
1136 | 1202 | ||
1137 | case BOOKE_INTERRUPT_DEBUG: { | 1203 | case BOOKE_INTERRUPT_DEBUG: { |
1138 | u32 dbsr; | 1204 | r = kvmppc_handle_debug(run, vcpu); |
1139 | 1205 | if (r == RESUME_HOST) | |
1140 | vcpu->arch.pc = mfspr(SPRN_CSRR0); | 1206 | run->exit_reason = KVM_EXIT_DEBUG; |
1141 | |||
1142 | /* clear IAC events in DBSR register */ | ||
1143 | dbsr = mfspr(SPRN_DBSR); | ||
1144 | dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4; | ||
1145 | mtspr(SPRN_DBSR, dbsr); | ||
1146 | |||
1147 | run->exit_reason = KVM_EXIT_DEBUG; | ||
1148 | kvmppc_account_exit(vcpu, DEBUG_EXITS); | 1207 | kvmppc_account_exit(vcpu, DEBUG_EXITS); |
1149 | r = RESUME_HOST; | ||
1150 | break; | 1208 | break; |
1151 | } | 1209 | } |
1152 | 1210 | ||
@@ -1197,7 +1255,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1197 | kvmppc_set_msr(vcpu, 0); | 1255 | kvmppc_set_msr(vcpu, 0); |
1198 | 1256 | ||
1199 | #ifndef CONFIG_KVM_BOOKE_HV | 1257 | #ifndef CONFIG_KVM_BOOKE_HV |
1200 | vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; | 1258 | vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS; |
1201 | vcpu->arch.shadow_pid = 1; | 1259 | vcpu->arch.shadow_pid = 1; |
1202 | vcpu->arch.shared->msr = 0; | 1260 | vcpu->arch.shared->msr = 0; |
1203 | #endif | 1261 | #endif |
@@ -1359,7 +1417,7 @@ static int set_sregs_arch206(struct kvm_vcpu *vcpu, | |||
1359 | return 0; | 1417 | return 0; |
1360 | } | 1418 | } |
1361 | 1419 | ||
1362 | void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 1420 | int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
1363 | { | 1421 | { |
1364 | sregs->u.e.features |= KVM_SREGS_E_IVOR; | 1422 | sregs->u.e.features |= KVM_SREGS_E_IVOR; |
1365 | 1423 | ||
@@ -1379,6 +1437,7 @@ void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
1379 | sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; | 1437 | sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; |
1380 | sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; | 1438 | sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; |
1381 | sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; | 1439 | sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; |
1440 | return 0; | ||
1382 | } | 1441 | } |
1383 | 1442 | ||
1384 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 1443 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
@@ -1413,8 +1472,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
1413 | 1472 | ||
1414 | get_sregs_base(vcpu, sregs); | 1473 | get_sregs_base(vcpu, sregs); |
1415 | get_sregs_arch206(vcpu, sregs); | 1474 | get_sregs_arch206(vcpu, sregs); |
1416 | kvmppc_core_get_sregs(vcpu, sregs); | 1475 | return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); |
1417 | return 0; | ||
1418 | } | 1476 | } |
1419 | 1477 | ||
1420 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 1478 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
@@ -1433,7 +1491,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
1433 | if (ret < 0) | 1491 | if (ret < 0) |
1434 | return ret; | 1492 | return ret; |
1435 | 1493 | ||
1436 | return kvmppc_core_set_sregs(vcpu, sregs); | 1494 | return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); |
1437 | } | 1495 | } |
1438 | 1496 | ||
1439 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | 1497 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
@@ -1441,7 +1499,6 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
1441 | int r = 0; | 1499 | int r = 0; |
1442 | union kvmppc_one_reg val; | 1500 | union kvmppc_one_reg val; |
1443 | int size; | 1501 | int size; |
1444 | long int i; | ||
1445 | 1502 | ||
1446 | size = one_reg_size(reg->id); | 1503 | size = one_reg_size(reg->id); |
1447 | if (size > sizeof(val)) | 1504 | if (size > sizeof(val)) |
@@ -1449,16 +1506,24 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
1449 | 1506 | ||
1450 | switch (reg->id) { | 1507 | switch (reg->id) { |
1451 | case KVM_REG_PPC_IAC1: | 1508 | case KVM_REG_PPC_IAC1: |
1509 | val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1); | ||
1510 | break; | ||
1452 | case KVM_REG_PPC_IAC2: | 1511 | case KVM_REG_PPC_IAC2: |
1512 | val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2); | ||
1513 | break; | ||
1514 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
1453 | case KVM_REG_PPC_IAC3: | 1515 | case KVM_REG_PPC_IAC3: |
1516 | val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3); | ||
1517 | break; | ||
1454 | case KVM_REG_PPC_IAC4: | 1518 | case KVM_REG_PPC_IAC4: |
1455 | i = reg->id - KVM_REG_PPC_IAC1; | 1519 | val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4); |
1456 | val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac[i]); | ||
1457 | break; | 1520 | break; |
1521 | #endif | ||
1458 | case KVM_REG_PPC_DAC1: | 1522 | case KVM_REG_PPC_DAC1: |
1523 | val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1); | ||
1524 | break; | ||
1459 | case KVM_REG_PPC_DAC2: | 1525 | case KVM_REG_PPC_DAC2: |
1460 | i = reg->id - KVM_REG_PPC_DAC1; | 1526 | val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2); |
1461 | val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac[i]); | ||
1462 | break; | 1527 | break; |
1463 | case KVM_REG_PPC_EPR: { | 1528 | case KVM_REG_PPC_EPR: { |
1464 | u32 epr = get_guest_epr(vcpu); | 1529 | u32 epr = get_guest_epr(vcpu); |
@@ -1477,10 +1542,13 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
1477 | val = get_reg_val(reg->id, vcpu->arch.tsr); | 1542 | val = get_reg_val(reg->id, vcpu->arch.tsr); |
1478 | break; | 1543 | break; |
1479 | case KVM_REG_PPC_DEBUG_INST: | 1544 | case KVM_REG_PPC_DEBUG_INST: |
1480 | val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV); | 1545 | val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG); |
1546 | break; | ||
1547 | case KVM_REG_PPC_VRSAVE: | ||
1548 | val = get_reg_val(reg->id, vcpu->arch.vrsave); | ||
1481 | break; | 1549 | break; |
1482 | default: | 1550 | default: |
1483 | r = kvmppc_get_one_reg(vcpu, reg->id, &val); | 1551 | r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val); |
1484 | break; | 1552 | break; |
1485 | } | 1553 | } |
1486 | 1554 | ||
@@ -1498,7 +1566,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
1498 | int r = 0; | 1566 | int r = 0; |
1499 | union kvmppc_one_reg val; | 1567 | union kvmppc_one_reg val; |
1500 | int size; | 1568 | int size; |
1501 | long int i; | ||
1502 | 1569 | ||
1503 | size = one_reg_size(reg->id); | 1570 | size = one_reg_size(reg->id); |
1504 | if (size > sizeof(val)) | 1571 | if (size > sizeof(val)) |
@@ -1509,16 +1576,24 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
1509 | 1576 | ||
1510 | switch (reg->id) { | 1577 | switch (reg->id) { |
1511 | case KVM_REG_PPC_IAC1: | 1578 | case KVM_REG_PPC_IAC1: |
1579 | vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val); | ||
1580 | break; | ||
1512 | case KVM_REG_PPC_IAC2: | 1581 | case KVM_REG_PPC_IAC2: |
1582 | vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val); | ||
1583 | break; | ||
1584 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
1513 | case KVM_REG_PPC_IAC3: | 1585 | case KVM_REG_PPC_IAC3: |
1586 | vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val); | ||
1587 | break; | ||
1514 | case KVM_REG_PPC_IAC4: | 1588 | case KVM_REG_PPC_IAC4: |
1515 | i = reg->id - KVM_REG_PPC_IAC1; | 1589 | vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val); |
1516 | vcpu->arch.dbg_reg.iac[i] = set_reg_val(reg->id, val); | ||
1517 | break; | 1590 | break; |
1591 | #endif | ||
1518 | case KVM_REG_PPC_DAC1: | 1592 | case KVM_REG_PPC_DAC1: |
1593 | vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val); | ||
1594 | break; | ||
1519 | case KVM_REG_PPC_DAC2: | 1595 | case KVM_REG_PPC_DAC2: |
1520 | i = reg->id - KVM_REG_PPC_DAC1; | 1596 | vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val); |
1521 | vcpu->arch.dbg_reg.dac[i] = set_reg_val(reg->id, val); | ||
1522 | break; | 1597 | break; |
1523 | case KVM_REG_PPC_EPR: { | 1598 | case KVM_REG_PPC_EPR: { |
1524 | u32 new_epr = set_reg_val(reg->id, val); | 1599 | u32 new_epr = set_reg_val(reg->id, val); |
@@ -1552,20 +1627,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
1552 | kvmppc_set_tcr(vcpu, tcr); | 1627 | kvmppc_set_tcr(vcpu, tcr); |
1553 | break; | 1628 | break; |
1554 | } | 1629 | } |
1630 | case KVM_REG_PPC_VRSAVE: | ||
1631 | vcpu->arch.vrsave = set_reg_val(reg->id, val); | ||
1632 | break; | ||
1555 | default: | 1633 | default: |
1556 | r = kvmppc_set_one_reg(vcpu, reg->id, &val); | 1634 | r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val); |
1557 | break; | 1635 | break; |
1558 | } | 1636 | } |
1559 | 1637 | ||
1560 | return r; | 1638 | return r; |
1561 | } | 1639 | } |
1562 | 1640 | ||
1563 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | ||
1564 | struct kvm_guest_debug *dbg) | ||
1565 | { | ||
1566 | return -EINVAL; | ||
1567 | } | ||
1568 | |||
1569 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 1641 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
1570 | { | 1642 | { |
1571 | return -ENOTSUPP; | 1643 | return -ENOTSUPP; |
@@ -1590,12 +1662,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | |||
1590 | return -ENOTSUPP; | 1662 | return -ENOTSUPP; |
1591 | } | 1663 | } |
1592 | 1664 | ||
1593 | void kvmppc_core_free_memslot(struct kvm_memory_slot *free, | 1665 | void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
1594 | struct kvm_memory_slot *dont) | 1666 | struct kvm_memory_slot *dont) |
1595 | { | 1667 | { |
1596 | } | 1668 | } |
1597 | 1669 | ||
1598 | int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, | 1670 | int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
1599 | unsigned long npages) | 1671 | unsigned long npages) |
1600 | { | 1672 | { |
1601 | return 0; | 1673 | return 0; |
@@ -1671,6 +1743,157 @@ void kvmppc_decrementer_func(unsigned long data) | |||
1671 | kvmppc_set_tsr_bits(vcpu, TSR_DIS); | 1743 | kvmppc_set_tsr_bits(vcpu, TSR_DIS); |
1672 | } | 1744 | } |
1673 | 1745 | ||
1746 | static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg, | ||
1747 | uint64_t addr, int index) | ||
1748 | { | ||
1749 | switch (index) { | ||
1750 | case 0: | ||
1751 | dbg_reg->dbcr0 |= DBCR0_IAC1; | ||
1752 | dbg_reg->iac1 = addr; | ||
1753 | break; | ||
1754 | case 1: | ||
1755 | dbg_reg->dbcr0 |= DBCR0_IAC2; | ||
1756 | dbg_reg->iac2 = addr; | ||
1757 | break; | ||
1758 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||
1759 | case 2: | ||
1760 | dbg_reg->dbcr0 |= DBCR0_IAC3; | ||
1761 | dbg_reg->iac3 = addr; | ||
1762 | break; | ||
1763 | case 3: | ||
1764 | dbg_reg->dbcr0 |= DBCR0_IAC4; | ||
1765 | dbg_reg->iac4 = addr; | ||
1766 | break; | ||
1767 | #endif | ||
1768 | default: | ||
1769 | return -EINVAL; | ||
1770 | } | ||
1771 | |||
1772 | dbg_reg->dbcr0 |= DBCR0_IDM; | ||
1773 | return 0; | ||
1774 | } | ||
1775 | |||
1776 | static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr, | ||
1777 | int type, int index) | ||
1778 | { | ||
1779 | switch (index) { | ||
1780 | case 0: | ||
1781 | if (type & KVMPPC_DEBUG_WATCH_READ) | ||
1782 | dbg_reg->dbcr0 |= DBCR0_DAC1R; | ||
1783 | if (type & KVMPPC_DEBUG_WATCH_WRITE) | ||
1784 | dbg_reg->dbcr0 |= DBCR0_DAC1W; | ||
1785 | dbg_reg->dac1 = addr; | ||
1786 | break; | ||
1787 | case 1: | ||
1788 | if (type & KVMPPC_DEBUG_WATCH_READ) | ||
1789 | dbg_reg->dbcr0 |= DBCR0_DAC2R; | ||
1790 | if (type & KVMPPC_DEBUG_WATCH_WRITE) | ||
1791 | dbg_reg->dbcr0 |= DBCR0_DAC2W; | ||
1792 | dbg_reg->dac2 = addr; | ||
1793 | break; | ||
1794 | default: | ||
1795 | return -EINVAL; | ||
1796 | } | ||
1797 | |||
1798 | dbg_reg->dbcr0 |= DBCR0_IDM; | ||
1799 | return 0; | ||
1800 | } | ||
1801 | void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set) | ||
1802 | { | ||
1803 | /* XXX: Add similar MSR protection for BookE-PR */ | ||
1804 | #ifdef CONFIG_KVM_BOOKE_HV | ||
1805 | BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP)); | ||
1806 | if (set) { | ||
1807 | if (prot_bitmap & MSR_UCLE) | ||
1808 | vcpu->arch.shadow_msrp |= MSRP_UCLEP; | ||
1809 | if (prot_bitmap & MSR_DE) | ||
1810 | vcpu->arch.shadow_msrp |= MSRP_DEP; | ||
1811 | if (prot_bitmap & MSR_PMM) | ||
1812 | vcpu->arch.shadow_msrp |= MSRP_PMMP; | ||
1813 | } else { | ||
1814 | if (prot_bitmap & MSR_UCLE) | ||
1815 | vcpu->arch.shadow_msrp &= ~MSRP_UCLEP; | ||
1816 | if (prot_bitmap & MSR_DE) | ||
1817 | vcpu->arch.shadow_msrp &= ~MSRP_DEP; | ||
1818 | if (prot_bitmap & MSR_PMM) | ||
1819 | vcpu->arch.shadow_msrp &= ~MSRP_PMMP; | ||
1820 | } | ||
1821 | #endif | ||
1822 | } | ||
1823 | |||
1824 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | ||
1825 | struct kvm_guest_debug *dbg) | ||
1826 | { | ||
1827 | struct debug_reg *dbg_reg; | ||
1828 | int n, b = 0, w = 0; | ||
1829 | |||
1830 | if (!(dbg->control & KVM_GUESTDBG_ENABLE)) { | ||
1831 | vcpu->arch.shadow_dbg_reg.dbcr0 = 0; | ||
1832 | vcpu->guest_debug = 0; | ||
1833 | kvm_guest_protect_msr(vcpu, MSR_DE, false); | ||
1834 | return 0; | ||
1835 | } | ||
1836 | |||
1837 | kvm_guest_protect_msr(vcpu, MSR_DE, true); | ||
1838 | vcpu->guest_debug = dbg->control; | ||
1839 | vcpu->arch.shadow_dbg_reg.dbcr0 = 0; | ||
1840 | /* Set DBCR0_EDM in guest visible DBCR0 register. */ | ||
1841 | vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM; | ||
1842 | |||
1843 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
1844 | vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC; | ||
1845 | |||
1846 | /* Code below handles only HW breakpoints */ | ||
1847 | dbg_reg = &(vcpu->arch.shadow_dbg_reg); | ||
1848 | |||
1849 | #ifdef CONFIG_KVM_BOOKE_HV | ||
1850 | /* | ||
1851 | * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1 | ||
1852 | * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0 | ||
1853 | */ | ||
1854 | dbg_reg->dbcr1 = 0; | ||
1855 | dbg_reg->dbcr2 = 0; | ||
1856 | #else | ||
1857 | /* | ||
1858 | * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1 | ||
1859 | * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR | ||
1860 | * is set. | ||
1861 | */ | ||
1862 | dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US | | ||
1863 | DBCR1_IAC4US; | ||
1864 | dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; | ||
1865 | #endif | ||
1866 | |||
1867 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) | ||
1868 | return 0; | ||
1869 | |||
1870 | for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) { | ||
1871 | uint64_t addr = dbg->arch.bp[n].addr; | ||
1872 | uint32_t type = dbg->arch.bp[n].type; | ||
1873 | |||
1874 | if (type == KVMPPC_DEBUG_NONE) | ||
1875 | continue; | ||
1876 | |||
1877 | if (type & !(KVMPPC_DEBUG_WATCH_READ | | ||
1878 | KVMPPC_DEBUG_WATCH_WRITE | | ||
1879 | KVMPPC_DEBUG_BREAKPOINT)) | ||
1880 | return -EINVAL; | ||
1881 | |||
1882 | if (type & KVMPPC_DEBUG_BREAKPOINT) { | ||
1883 | /* Setting H/W breakpoint */ | ||
1884 | if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++)) | ||
1885 | return -EINVAL; | ||
1886 | } else { | ||
1887 | /* Setting H/W watchpoint */ | ||
1888 | if (kvmppc_booke_add_watchpoint(dbg_reg, addr, | ||
1889 | type, w++)) | ||
1890 | return -EINVAL; | ||
1891 | } | ||
1892 | } | ||
1893 | |||
1894 | return 0; | ||
1895 | } | ||
1896 | |||
1674 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 1897 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1675 | { | 1898 | { |
1676 | vcpu->cpu = smp_processor_id(); | 1899 | vcpu->cpu = smp_processor_id(); |
@@ -1681,6 +1904,44 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) | |||
1681 | { | 1904 | { |
1682 | current->thread.kvm_vcpu = NULL; | 1905 | current->thread.kvm_vcpu = NULL; |
1683 | vcpu->cpu = -1; | 1906 | vcpu->cpu = -1; |
1907 | |||
1908 | /* Clear pending debug event in DBSR */ | ||
1909 | kvmppc_clear_dbsr(); | ||
1910 | } | ||
1911 | |||
1912 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | ||
1913 | { | ||
1914 | vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); | ||
1915 | } | ||
1916 | |||
1917 | int kvmppc_core_init_vm(struct kvm *kvm) | ||
1918 | { | ||
1919 | return kvm->arch.kvm_ops->init_vm(kvm); | ||
1920 | } | ||
1921 | |||
1922 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | ||
1923 | { | ||
1924 | return kvm->arch.kvm_ops->vcpu_create(kvm, id); | ||
1925 | } | ||
1926 | |||
1927 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | ||
1928 | { | ||
1929 | vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); | ||
1930 | } | ||
1931 | |||
1932 | void kvmppc_core_destroy_vm(struct kvm *kvm) | ||
1933 | { | ||
1934 | kvm->arch.kvm_ops->destroy_vm(kvm); | ||
1935 | } | ||
1936 | |||
1937 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
1938 | { | ||
1939 | vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); | ||
1940 | } | ||
1941 | |||
1942 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | ||
1943 | { | ||
1944 | vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); | ||
1684 | } | 1945 | } |
1685 | 1946 | ||
1686 | int __init kvmppc_booke_init(void) | 1947 | int __init kvmppc_booke_init(void) |
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 5fd1ba693579..09bfd9bc7cf8 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h | |||
@@ -99,6 +99,30 @@ enum int_class { | |||
99 | 99 | ||
100 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); | 100 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); |
101 | 101 | ||
102 | extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu); | ||
103 | extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
104 | unsigned int inst, int *advance); | ||
105 | extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, | ||
106 | ulong spr_val); | ||
107 | extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, | ||
108 | ulong *spr_val); | ||
109 | extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); | ||
110 | extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, | ||
111 | struct kvm_vcpu *vcpu, | ||
112 | unsigned int inst, int *advance); | ||
113 | extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, | ||
114 | ulong spr_val); | ||
115 | extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, | ||
116 | ulong *spr_val); | ||
117 | extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); | ||
118 | extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, | ||
119 | struct kvm_vcpu *vcpu, | ||
120 | unsigned int inst, int *advance); | ||
121 | extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, | ||
122 | ulong spr_val); | ||
123 | extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, | ||
124 | ulong *spr_val); | ||
125 | |||
102 | /* | 126 | /* |
103 | * Load up guest vcpu FP state if it's needed. | 127 | * Load up guest vcpu FP state if it's needed. |
104 | * It also set the MSR_FP in thread so that host know | 128 | * It also set the MSR_FP in thread so that host know |
@@ -129,4 +153,9 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu) | |||
129 | giveup_fpu(current); | 153 | giveup_fpu(current); |
130 | #endif | 154 | #endif |
131 | } | 155 | } |
156 | |||
157 | static inline void kvmppc_clear_dbsr(void) | ||
158 | { | ||
159 | mtspr(SPRN_DBSR, mfspr(SPRN_DBSR)); | ||
160 | } | ||
132 | #endif /* __KVM_BOOKE_H__ */ | 161 | #endif /* __KVM_BOOKE_H__ */ |
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index ce6b73c29612..497b142f651c 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -305,7 +305,7 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
305 | { | 305 | { |
306 | } | 306 | } |
307 | 307 | ||
308 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 308 | static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu) |
309 | { | 309 | { |
310 | kvmppc_booke_vcpu_load(vcpu, cpu); | 310 | kvmppc_booke_vcpu_load(vcpu, cpu); |
311 | 311 | ||
@@ -313,7 +313,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
313 | kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); | 313 | kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); |
314 | } | 314 | } |
315 | 315 | ||
316 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 316 | static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu) |
317 | { | 317 | { |
318 | #ifdef CONFIG_SPE | 318 | #ifdef CONFIG_SPE |
319 | if (vcpu->arch.shadow_msr & MSR_SPE) | 319 | if (vcpu->arch.shadow_msr & MSR_SPE) |
@@ -367,7 +367,8 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
367 | return 0; | 367 | return 0; |
368 | } | 368 | } |
369 | 369 | ||
370 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 370 | static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu, |
371 | struct kvm_sregs *sregs) | ||
371 | { | 372 | { |
372 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 373 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
373 | 374 | ||
@@ -388,9 +389,11 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
388 | 389 | ||
389 | kvmppc_get_sregs_ivor(vcpu, sregs); | 390 | kvmppc_get_sregs_ivor(vcpu, sregs); |
390 | kvmppc_get_sregs_e500_tlb(vcpu, sregs); | 391 | kvmppc_get_sregs_e500_tlb(vcpu, sregs); |
392 | return 0; | ||
391 | } | 393 | } |
392 | 394 | ||
393 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 395 | static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu, |
396 | struct kvm_sregs *sregs) | ||
394 | { | 397 | { |
395 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 398 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
396 | int ret; | 399 | int ret; |
@@ -425,21 +428,22 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
425 | return kvmppc_set_sregs_ivor(vcpu, sregs); | 428 | return kvmppc_set_sregs_ivor(vcpu, sregs); |
426 | } | 429 | } |
427 | 430 | ||
428 | int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, | 431 | static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id, |
429 | union kvmppc_one_reg *val) | 432 | union kvmppc_one_reg *val) |
430 | { | 433 | { |
431 | int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); | 434 | int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); |
432 | return r; | 435 | return r; |
433 | } | 436 | } |
434 | 437 | ||
435 | int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, | 438 | static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id, |
436 | union kvmppc_one_reg *val) | 439 | union kvmppc_one_reg *val) |
437 | { | 440 | { |
438 | int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); | 441 | int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); |
439 | return r; | 442 | return r; |
440 | } | 443 | } |
441 | 444 | ||
442 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | 445 | static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm, |
446 | unsigned int id) | ||
443 | { | 447 | { |
444 | struct kvmppc_vcpu_e500 *vcpu_e500; | 448 | struct kvmppc_vcpu_e500 *vcpu_e500; |
445 | struct kvm_vcpu *vcpu; | 449 | struct kvm_vcpu *vcpu; |
@@ -481,7 +485,7 @@ out: | |||
481 | return ERR_PTR(err); | 485 | return ERR_PTR(err); |
482 | } | 486 | } |
483 | 487 | ||
484 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | 488 | static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu) |
485 | { | 489 | { |
486 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 490 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
487 | 491 | ||
@@ -492,15 +496,32 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
492 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | 496 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); |
493 | } | 497 | } |
494 | 498 | ||
495 | int kvmppc_core_init_vm(struct kvm *kvm) | 499 | static int kvmppc_core_init_vm_e500(struct kvm *kvm) |
496 | { | 500 | { |
497 | return 0; | 501 | return 0; |
498 | } | 502 | } |
499 | 503 | ||
500 | void kvmppc_core_destroy_vm(struct kvm *kvm) | 504 | static void kvmppc_core_destroy_vm_e500(struct kvm *kvm) |
501 | { | 505 | { |
502 | } | 506 | } |
503 | 507 | ||
508 | static struct kvmppc_ops kvm_ops_e500 = { | ||
509 | .get_sregs = kvmppc_core_get_sregs_e500, | ||
510 | .set_sregs = kvmppc_core_set_sregs_e500, | ||
511 | .get_one_reg = kvmppc_get_one_reg_e500, | ||
512 | .set_one_reg = kvmppc_set_one_reg_e500, | ||
513 | .vcpu_load = kvmppc_core_vcpu_load_e500, | ||
514 | .vcpu_put = kvmppc_core_vcpu_put_e500, | ||
515 | .vcpu_create = kvmppc_core_vcpu_create_e500, | ||
516 | .vcpu_free = kvmppc_core_vcpu_free_e500, | ||
517 | .mmu_destroy = kvmppc_mmu_destroy_e500, | ||
518 | .init_vm = kvmppc_core_init_vm_e500, | ||
519 | .destroy_vm = kvmppc_core_destroy_vm_e500, | ||
520 | .emulate_op = kvmppc_core_emulate_op_e500, | ||
521 | .emulate_mtspr = kvmppc_core_emulate_mtspr_e500, | ||
522 | .emulate_mfspr = kvmppc_core_emulate_mfspr_e500, | ||
523 | }; | ||
524 | |||
504 | static int __init kvmppc_e500_init(void) | 525 | static int __init kvmppc_e500_init(void) |
505 | { | 526 | { |
506 | int r, i; | 527 | int r, i; |
@@ -512,11 +533,11 @@ static int __init kvmppc_e500_init(void) | |||
512 | 533 | ||
513 | r = kvmppc_core_check_processor_compat(); | 534 | r = kvmppc_core_check_processor_compat(); |
514 | if (r) | 535 | if (r) |
515 | return r; | 536 | goto err_out; |
516 | 537 | ||
517 | r = kvmppc_booke_init(); | 538 | r = kvmppc_booke_init(); |
518 | if (r) | 539 | if (r) |
519 | return r; | 540 | goto err_out; |
520 | 541 | ||
521 | /* copy extra E500 exception handlers */ | 542 | /* copy extra E500 exception handlers */ |
522 | ivor[0] = mfspr(SPRN_IVOR32); | 543 | ivor[0] = mfspr(SPRN_IVOR32); |
@@ -534,11 +555,19 @@ static int __init kvmppc_e500_init(void) | |||
534 | flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + | 555 | flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + |
535 | ivor[max_ivor] + handler_len); | 556 | ivor[max_ivor] + handler_len); |
536 | 557 | ||
537 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); | 558 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); |
559 | if (r) | ||
560 | goto err_out; | ||
561 | kvm_ops_e500.owner = THIS_MODULE; | ||
562 | kvmppc_pr_ops = &kvm_ops_e500; | ||
563 | |||
564 | err_out: | ||
565 | return r; | ||
538 | } | 566 | } |
539 | 567 | ||
540 | static void __exit kvmppc_e500_exit(void) | 568 | static void __exit kvmppc_e500_exit(void) |
541 | { | 569 | { |
570 | kvmppc_pr_ops = NULL; | ||
542 | kvmppc_booke_exit(); | 571 | kvmppc_booke_exit(); |
543 | } | 572 | } |
544 | 573 | ||
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index c2e5e98453a6..4fd9650eb018 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h | |||
@@ -117,7 +117,7 @@ static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) | |||
117 | #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW) | 117 | #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW) |
118 | #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW) | 118 | #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW) |
119 | #define MAS2_ATTRIB_MASK \ | 119 | #define MAS2_ATTRIB_MASK \ |
120 | (MAS2_X0 | MAS2_X1) | 120 | (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G) |
121 | #define MAS3_ATTRIB_MASK \ | 121 | #define MAS3_ATTRIB_MASK \ |
122 | (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ | 122 | (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ |
123 | | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) | 123 | | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) |
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index b10a01243abd..89b7f821f6c4 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #define XOP_TLBRE 946 | 26 | #define XOP_TLBRE 946 |
27 | #define XOP_TLBWE 978 | 27 | #define XOP_TLBWE 978 |
28 | #define XOP_TLBILX 18 | 28 | #define XOP_TLBILX 18 |
29 | #define XOP_EHPRIV 270 | ||
29 | 30 | ||
30 | #ifdef CONFIG_KVM_E500MC | 31 | #ifdef CONFIG_KVM_E500MC |
31 | static int dbell2prio(ulong param) | 32 | static int dbell2prio(ulong param) |
@@ -82,8 +83,28 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) | |||
82 | } | 83 | } |
83 | #endif | 84 | #endif |
84 | 85 | ||
85 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 86 | static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, |
86 | unsigned int inst, int *advance) | 87 | unsigned int inst, int *advance) |
88 | { | ||
89 | int emulated = EMULATE_DONE; | ||
90 | |||
91 | switch (get_oc(inst)) { | ||
92 | case EHPRIV_OC_DEBUG: | ||
93 | run->exit_reason = KVM_EXIT_DEBUG; | ||
94 | run->debug.arch.address = vcpu->arch.pc; | ||
95 | run->debug.arch.status = 0; | ||
96 | kvmppc_account_exit(vcpu, DEBUG_EXITS); | ||
97 | emulated = EMULATE_EXIT_USER; | ||
98 | *advance = 0; | ||
99 | break; | ||
100 | default: | ||
101 | emulated = EMULATE_FAIL; | ||
102 | } | ||
103 | return emulated; | ||
104 | } | ||
105 | |||
106 | int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
107 | unsigned int inst, int *advance) | ||
87 | { | 108 | { |
88 | int emulated = EMULATE_DONE; | 109 | int emulated = EMULATE_DONE; |
89 | int ra = get_ra(inst); | 110 | int ra = get_ra(inst); |
@@ -130,6 +151,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
130 | emulated = kvmppc_e500_emul_tlbivax(vcpu, ea); | 151 | emulated = kvmppc_e500_emul_tlbivax(vcpu, ea); |
131 | break; | 152 | break; |
132 | 153 | ||
154 | case XOP_EHPRIV: | ||
155 | emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst, | ||
156 | advance); | ||
157 | break; | ||
158 | |||
133 | default: | 159 | default: |
134 | emulated = EMULATE_FAIL; | 160 | emulated = EMULATE_FAIL; |
135 | } | 161 | } |
@@ -146,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
146 | return emulated; | 172 | return emulated; |
147 | } | 173 | } |
148 | 174 | ||
149 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | 175 | int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
150 | { | 176 | { |
151 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 177 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
152 | int emulated = EMULATE_DONE; | 178 | int emulated = EMULATE_DONE; |
@@ -237,7 +263,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | |||
237 | return emulated; | 263 | return emulated; |
238 | } | 264 | } |
239 | 265 | ||
240 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) | 266 | int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
241 | { | 267 | { |
242 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 268 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
243 | int emulated = EMULATE_DONE; | 269 | int emulated = EMULATE_DONE; |
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index 6d6f153b6c1d..ebca6b88ea5e 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <asm/kvm_ppc.h> | 32 | #include <asm/kvm_ppc.h> |
33 | 33 | ||
34 | #include "e500.h" | 34 | #include "e500.h" |
35 | #include "trace.h" | 35 | #include "trace_booke.h" |
36 | #include "timing.h" | 36 | #include "timing.h" |
37 | #include "e500_mmu_host.h" | 37 | #include "e500_mmu_host.h" |
38 | 38 | ||
@@ -536,7 +536,7 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, | |||
536 | return get_tlb_raddr(gtlbe) | (eaddr & pgmask); | 536 | return get_tlb_raddr(gtlbe) | (eaddr & pgmask); |
537 | } | 537 | } |
538 | 538 | ||
539 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 539 | void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu) |
540 | { | 540 | { |
541 | } | 541 | } |
542 | 542 | ||
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index c65593abae8e..ecf2247b13be 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c | |||
@@ -32,10 +32,11 @@ | |||
32 | #include <asm/kvm_ppc.h> | 32 | #include <asm/kvm_ppc.h> |
33 | 33 | ||
34 | #include "e500.h" | 34 | #include "e500.h" |
35 | #include "trace.h" | ||
36 | #include "timing.h" | 35 | #include "timing.h" |
37 | #include "e500_mmu_host.h" | 36 | #include "e500_mmu_host.h" |
38 | 37 | ||
38 | #include "trace_booke.h" | ||
39 | |||
39 | #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) | 40 | #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) |
40 | 41 | ||
41 | static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; | 42 | static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; |
@@ -253,6 +254,9 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, | |||
253 | ref->pfn = pfn; | 254 | ref->pfn = pfn; |
254 | ref->flags |= E500_TLB_VALID; | 255 | ref->flags |= E500_TLB_VALID; |
255 | 256 | ||
257 | /* Mark the page accessed */ | ||
258 | kvm_set_pfn_accessed(pfn); | ||
259 | |||
256 | if (tlbe_is_writable(gtlbe)) | 260 | if (tlbe_is_writable(gtlbe)) |
257 | kvm_set_pfn_dirty(pfn); | 261 | kvm_set_pfn_dirty(pfn); |
258 | } | 262 | } |
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 19c8379575f7..4132cd2fc171 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c | |||
@@ -110,7 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | |||
110 | 110 | ||
111 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); | 111 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); |
112 | 112 | ||
113 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 113 | static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) |
114 | { | 114 | { |
115 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 115 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
116 | 116 | ||
@@ -147,7 +147,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
147 | kvmppc_load_guest_fp(vcpu); | 147 | kvmppc_load_guest_fp(vcpu); |
148 | } | 148 | } |
149 | 149 | ||
150 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 150 | static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu) |
151 | { | 151 | { |
152 | vcpu->arch.eplc = mfspr(SPRN_EPLC); | 152 | vcpu->arch.eplc = mfspr(SPRN_EPLC); |
153 | vcpu->arch.epsc = mfspr(SPRN_EPSC); | 153 | vcpu->arch.epsc = mfspr(SPRN_EPSC); |
@@ -204,7 +204,8 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
204 | return 0; | 204 | return 0; |
205 | } | 205 | } |
206 | 206 | ||
207 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 207 | static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu, |
208 | struct kvm_sregs *sregs) | ||
208 | { | 209 | { |
209 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 210 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
210 | 211 | ||
@@ -224,10 +225,11 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
224 | sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; | 225 | sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; |
225 | sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; | 226 | sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; |
226 | 227 | ||
227 | kvmppc_get_sregs_ivor(vcpu, sregs); | 228 | return kvmppc_get_sregs_ivor(vcpu, sregs); |
228 | } | 229 | } |
229 | 230 | ||
230 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 231 | static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu, |
232 | struct kvm_sregs *sregs) | ||
231 | { | 233 | { |
232 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 234 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
233 | int ret; | 235 | int ret; |
@@ -260,21 +262,22 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
260 | return kvmppc_set_sregs_ivor(vcpu, sregs); | 262 | return kvmppc_set_sregs_ivor(vcpu, sregs); |
261 | } | 263 | } |
262 | 264 | ||
263 | int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, | 265 | static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, |
264 | union kvmppc_one_reg *val) | 266 | union kvmppc_one_reg *val) |
265 | { | 267 | { |
266 | int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); | 268 | int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); |
267 | return r; | 269 | return r; |
268 | } | 270 | } |
269 | 271 | ||
270 | int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, | 272 | static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, |
271 | union kvmppc_one_reg *val) | 273 | union kvmppc_one_reg *val) |
272 | { | 274 | { |
273 | int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val); | 275 | int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val); |
274 | return r; | 276 | return r; |
275 | } | 277 | } |
276 | 278 | ||
277 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | 279 | static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm, |
280 | unsigned int id) | ||
278 | { | 281 | { |
279 | struct kvmppc_vcpu_e500 *vcpu_e500; | 282 | struct kvmppc_vcpu_e500 *vcpu_e500; |
280 | struct kvm_vcpu *vcpu; | 283 | struct kvm_vcpu *vcpu; |
@@ -315,7 +318,7 @@ out: | |||
315 | return ERR_PTR(err); | 318 | return ERR_PTR(err); |
316 | } | 319 | } |
317 | 320 | ||
318 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | 321 | static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu) |
319 | { | 322 | { |
320 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 323 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
321 | 324 | ||
@@ -325,7 +328,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
325 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | 328 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); |
326 | } | 329 | } |
327 | 330 | ||
328 | int kvmppc_core_init_vm(struct kvm *kvm) | 331 | static int kvmppc_core_init_vm_e500mc(struct kvm *kvm) |
329 | { | 332 | { |
330 | int lpid; | 333 | int lpid; |
331 | 334 | ||
@@ -337,27 +340,52 @@ int kvmppc_core_init_vm(struct kvm *kvm) | |||
337 | return 0; | 340 | return 0; |
338 | } | 341 | } |
339 | 342 | ||
340 | void kvmppc_core_destroy_vm(struct kvm *kvm) | 343 | static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm) |
341 | { | 344 | { |
342 | kvmppc_free_lpid(kvm->arch.lpid); | 345 | kvmppc_free_lpid(kvm->arch.lpid); |
343 | } | 346 | } |
344 | 347 | ||
348 | static struct kvmppc_ops kvm_ops_e500mc = { | ||
349 | .get_sregs = kvmppc_core_get_sregs_e500mc, | ||
350 | .set_sregs = kvmppc_core_set_sregs_e500mc, | ||
351 | .get_one_reg = kvmppc_get_one_reg_e500mc, | ||
352 | .set_one_reg = kvmppc_set_one_reg_e500mc, | ||
353 | .vcpu_load = kvmppc_core_vcpu_load_e500mc, | ||
354 | .vcpu_put = kvmppc_core_vcpu_put_e500mc, | ||
355 | .vcpu_create = kvmppc_core_vcpu_create_e500mc, | ||
356 | .vcpu_free = kvmppc_core_vcpu_free_e500mc, | ||
357 | .mmu_destroy = kvmppc_mmu_destroy_e500, | ||
358 | .init_vm = kvmppc_core_init_vm_e500mc, | ||
359 | .destroy_vm = kvmppc_core_destroy_vm_e500mc, | ||
360 | .emulate_op = kvmppc_core_emulate_op_e500, | ||
361 | .emulate_mtspr = kvmppc_core_emulate_mtspr_e500, | ||
362 | .emulate_mfspr = kvmppc_core_emulate_mfspr_e500, | ||
363 | }; | ||
364 | |||
345 | static int __init kvmppc_e500mc_init(void) | 365 | static int __init kvmppc_e500mc_init(void) |
346 | { | 366 | { |
347 | int r; | 367 | int r; |
348 | 368 | ||
349 | r = kvmppc_booke_init(); | 369 | r = kvmppc_booke_init(); |
350 | if (r) | 370 | if (r) |
351 | return r; | 371 | goto err_out; |
352 | 372 | ||
353 | kvmppc_init_lpid(64); | 373 | kvmppc_init_lpid(64); |
354 | kvmppc_claim_lpid(0); /* host */ | 374 | kvmppc_claim_lpid(0); /* host */ |
355 | 375 | ||
356 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); | 376 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); |
377 | if (r) | ||
378 | goto err_out; | ||
379 | kvm_ops_e500mc.owner = THIS_MODULE; | ||
380 | kvmppc_pr_ops = &kvm_ops_e500mc; | ||
381 | |||
382 | err_out: | ||
383 | return r; | ||
357 | } | 384 | } |
358 | 385 | ||
359 | static void __exit kvmppc_e500mc_exit(void) | 386 | static void __exit kvmppc_e500mc_exit(void) |
360 | { | 387 | { |
388 | kvmppc_pr_ops = NULL; | ||
361 | kvmppc_booke_exit(); | 389 | kvmppc_booke_exit(); |
362 | } | 390 | } |
363 | 391 | ||
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 751cd45f65a0..2f9a0873b44f 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -130,8 +130,8 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
130 | case SPRN_PIR: break; | 130 | case SPRN_PIR: break; |
131 | 131 | ||
132 | default: | 132 | default: |
133 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, | 133 | emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn, |
134 | spr_val); | 134 | spr_val); |
135 | if (emulated == EMULATE_FAIL) | 135 | if (emulated == EMULATE_FAIL) |
136 | printk(KERN_INFO "mtspr: unknown spr " | 136 | printk(KERN_INFO "mtspr: unknown spr " |
137 | "0x%x\n", sprn); | 137 | "0x%x\n", sprn); |
@@ -191,8 +191,8 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
191 | spr_val = kvmppc_get_dec(vcpu, get_tb()); | 191 | spr_val = kvmppc_get_dec(vcpu, get_tb()); |
192 | break; | 192 | break; |
193 | default: | 193 | default: |
194 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, | 194 | emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn, |
195 | &spr_val); | 195 | &spr_val); |
196 | if (unlikely(emulated == EMULATE_FAIL)) { | 196 | if (unlikely(emulated == EMULATE_FAIL)) { |
197 | printk(KERN_INFO "mfspr: unknown spr " | 197 | printk(KERN_INFO "mfspr: unknown spr " |
198 | "0x%x\n", sprn); | 198 | "0x%x\n", sprn); |
@@ -464,7 +464,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
464 | } | 464 | } |
465 | 465 | ||
466 | if (emulated == EMULATE_FAIL) { | 466 | if (emulated == EMULATE_FAIL) { |
467 | emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); | 467 | emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst, |
468 | &advance); | ||
468 | if (emulated == EMULATE_AGAIN) { | 469 | if (emulated == EMULATE_AGAIN) { |
469 | advance = 0; | 470 | advance = 0; |
470 | } else if (emulated == EMULATE_FAIL) { | 471 | } else if (emulated == EMULATE_FAIL) { |
@@ -483,3 +484,4 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
483 | 484 | ||
484 | return emulated; | 485 | return emulated; |
485 | } | 486 | } |
487 | EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction); | ||
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 07c0106fab76..9ae97686e9f4 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/file.h> | 28 | #include <linux/file.h> |
29 | #include <linux/module.h> | ||
29 | #include <asm/cputable.h> | 30 | #include <asm/cputable.h> |
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
31 | #include <asm/kvm_ppc.h> | 32 | #include <asm/kvm_ppc.h> |
@@ -39,6 +40,12 @@ | |||
39 | #define CREATE_TRACE_POINTS | 40 | #define CREATE_TRACE_POINTS |
40 | #include "trace.h" | 41 | #include "trace.h" |
41 | 42 | ||
43 | struct kvmppc_ops *kvmppc_hv_ops; | ||
44 | EXPORT_SYMBOL_GPL(kvmppc_hv_ops); | ||
45 | struct kvmppc_ops *kvmppc_pr_ops; | ||
46 | EXPORT_SYMBOL_GPL(kvmppc_pr_ops); | ||
47 | |||
48 | |||
42 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 49 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
43 | { | 50 | { |
44 | return !!(v->arch.pending_exceptions) || | 51 | return !!(v->arch.pending_exceptions) || |
@@ -50,7 +57,6 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | |||
50 | return 1; | 57 | return 1; |
51 | } | 58 | } |
52 | 59 | ||
53 | #ifndef CONFIG_KVM_BOOK3S_64_HV | ||
54 | /* | 60 | /* |
55 | * Common checks before entering the guest world. Call with interrupts | 61 | * Common checks before entering the guest world. Call with interrupts |
56 | * disabled. | 62 | * disabled. |
@@ -125,7 +131,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
125 | 131 | ||
126 | return r; | 132 | return r; |
127 | } | 133 | } |
128 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 134 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); |
129 | 135 | ||
130 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | 136 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
131 | { | 137 | { |
@@ -179,6 +185,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | |||
179 | 185 | ||
180 | return r; | 186 | return r; |
181 | } | 187 | } |
188 | EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); | ||
182 | 189 | ||
183 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) | 190 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) |
184 | { | 191 | { |
@@ -192,11 +199,9 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu) | |||
192 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) | 199 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) |
193 | goto out; | 200 | goto out; |
194 | 201 | ||
195 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
196 | /* HV KVM can only do PAPR mode for now */ | 202 | /* HV KVM can only do PAPR mode for now */ |
197 | if (!vcpu->arch.papr_enabled) | 203 | if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) |
198 | goto out; | 204 | goto out; |
199 | #endif | ||
200 | 205 | ||
201 | #ifdef CONFIG_KVM_BOOKE_HV | 206 | #ifdef CONFIG_KVM_BOOKE_HV |
202 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) | 207 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) |
@@ -209,6 +214,7 @@ out: | |||
209 | vcpu->arch.sane = r; | 214 | vcpu->arch.sane = r; |
210 | return r ? 0 : -EINVAL; | 215 | return r ? 0 : -EINVAL; |
211 | } | 216 | } |
217 | EXPORT_SYMBOL_GPL(kvmppc_sanity_check); | ||
212 | 218 | ||
213 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | 219 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
214 | { | 220 | { |
@@ -243,6 +249,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
243 | 249 | ||
244 | return r; | 250 | return r; |
245 | } | 251 | } |
252 | EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); | ||
246 | 253 | ||
247 | int kvm_arch_hardware_enable(void *garbage) | 254 | int kvm_arch_hardware_enable(void *garbage) |
248 | { | 255 | { |
@@ -269,10 +276,35 @@ void kvm_arch_check_processor_compat(void *rtn) | |||
269 | 276 | ||
270 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | 277 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
271 | { | 278 | { |
272 | if (type) | 279 | struct kvmppc_ops *kvm_ops = NULL; |
273 | return -EINVAL; | 280 | /* |
274 | 281 | * if we have both HV and PR enabled, default is HV | |
282 | */ | ||
283 | if (type == 0) { | ||
284 | if (kvmppc_hv_ops) | ||
285 | kvm_ops = kvmppc_hv_ops; | ||
286 | else | ||
287 | kvm_ops = kvmppc_pr_ops; | ||
288 | if (!kvm_ops) | ||
289 | goto err_out; | ||
290 | } else if (type == KVM_VM_PPC_HV) { | ||
291 | if (!kvmppc_hv_ops) | ||
292 | goto err_out; | ||
293 | kvm_ops = kvmppc_hv_ops; | ||
294 | } else if (type == KVM_VM_PPC_PR) { | ||
295 | if (!kvmppc_pr_ops) | ||
296 | goto err_out; | ||
297 | kvm_ops = kvmppc_pr_ops; | ||
298 | } else | ||
299 | goto err_out; | ||
300 | |||
301 | if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) | ||
302 | return -ENOENT; | ||
303 | |||
304 | kvm->arch.kvm_ops = kvm_ops; | ||
275 | return kvmppc_core_init_vm(kvm); | 305 | return kvmppc_core_init_vm(kvm); |
306 | err_out: | ||
307 | return -EINVAL; | ||
276 | } | 308 | } |
277 | 309 | ||
278 | void kvm_arch_destroy_vm(struct kvm *kvm) | 310 | void kvm_arch_destroy_vm(struct kvm *kvm) |
@@ -292,6 +324,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
292 | kvmppc_core_destroy_vm(kvm); | 324 | kvmppc_core_destroy_vm(kvm); |
293 | 325 | ||
294 | mutex_unlock(&kvm->lock); | 326 | mutex_unlock(&kvm->lock); |
327 | |||
328 | /* drop the module reference */ | ||
329 | module_put(kvm->arch.kvm_ops->owner); | ||
295 | } | 330 | } |
296 | 331 | ||
297 | void kvm_arch_sync_events(struct kvm *kvm) | 332 | void kvm_arch_sync_events(struct kvm *kvm) |
@@ -301,6 +336,10 @@ void kvm_arch_sync_events(struct kvm *kvm) | |||
301 | int kvm_dev_ioctl_check_extension(long ext) | 336 | int kvm_dev_ioctl_check_extension(long ext) |
302 | { | 337 | { |
303 | int r; | 338 | int r; |
339 | /* FIXME!! | ||
340 | * Should some of this be vm ioctl ? is it possible now ? | ||
341 | */ | ||
342 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; | ||
304 | 343 | ||
305 | switch (ext) { | 344 | switch (ext) { |
306 | #ifdef CONFIG_BOOKE | 345 | #ifdef CONFIG_BOOKE |
@@ -320,22 +359,26 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
320 | case KVM_CAP_DEVICE_CTRL: | 359 | case KVM_CAP_DEVICE_CTRL: |
321 | r = 1; | 360 | r = 1; |
322 | break; | 361 | break; |
323 | #ifndef CONFIG_KVM_BOOK3S_64_HV | ||
324 | case KVM_CAP_PPC_PAIRED_SINGLES: | 362 | case KVM_CAP_PPC_PAIRED_SINGLES: |
325 | case KVM_CAP_PPC_OSI: | 363 | case KVM_CAP_PPC_OSI: |
326 | case KVM_CAP_PPC_GET_PVINFO: | 364 | case KVM_CAP_PPC_GET_PVINFO: |
327 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) | 365 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
328 | case KVM_CAP_SW_TLB: | 366 | case KVM_CAP_SW_TLB: |
329 | #endif | 367 | #endif |
330 | #ifdef CONFIG_KVM_MPIC | 368 | /* We support this only for PR */ |
331 | case KVM_CAP_IRQ_MPIC: | 369 | r = !hv_enabled; |
332 | #endif | ||
333 | r = 1; | ||
334 | break; | 370 | break; |
371 | #ifdef CONFIG_KVM_MMIO | ||
335 | case KVM_CAP_COALESCED_MMIO: | 372 | case KVM_CAP_COALESCED_MMIO: |
336 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 373 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
337 | break; | 374 | break; |
338 | #endif | 375 | #endif |
376 | #ifdef CONFIG_KVM_MPIC | ||
377 | case KVM_CAP_IRQ_MPIC: | ||
378 | r = 1; | ||
379 | break; | ||
380 | #endif | ||
381 | |||
339 | #ifdef CONFIG_PPC_BOOK3S_64 | 382 | #ifdef CONFIG_PPC_BOOK3S_64 |
340 | case KVM_CAP_SPAPR_TCE: | 383 | case KVM_CAP_SPAPR_TCE: |
341 | case KVM_CAP_PPC_ALLOC_HTAB: | 384 | case KVM_CAP_PPC_ALLOC_HTAB: |
@@ -346,32 +389,37 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
346 | r = 1; | 389 | r = 1; |
347 | break; | 390 | break; |
348 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 391 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
349 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 392 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
350 | case KVM_CAP_PPC_SMT: | 393 | case KVM_CAP_PPC_SMT: |
351 | r = threads_per_core; | 394 | if (hv_enabled) |
395 | r = threads_per_core; | ||
396 | else | ||
397 | r = 0; | ||
352 | break; | 398 | break; |
353 | case KVM_CAP_PPC_RMA: | 399 | case KVM_CAP_PPC_RMA: |
354 | r = 1; | 400 | r = hv_enabled; |
355 | /* PPC970 requires an RMA */ | 401 | /* PPC970 requires an RMA */ |
356 | if (cpu_has_feature(CPU_FTR_ARCH_201)) | 402 | if (r && cpu_has_feature(CPU_FTR_ARCH_201)) |
357 | r = 2; | 403 | r = 2; |
358 | break; | 404 | break; |
359 | #endif | 405 | #endif |
360 | case KVM_CAP_SYNC_MMU: | 406 | case KVM_CAP_SYNC_MMU: |
361 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 407 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
362 | r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; | 408 | if (hv_enabled) |
409 | r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; | ||
410 | else | ||
411 | r = 0; | ||
363 | #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) | 412 | #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
364 | r = 1; | 413 | r = 1; |
365 | #else | 414 | #else |
366 | r = 0; | 415 | r = 0; |
367 | break; | ||
368 | #endif | 416 | #endif |
369 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 417 | break; |
418 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | ||
370 | case KVM_CAP_PPC_HTAB_FD: | 419 | case KVM_CAP_PPC_HTAB_FD: |
371 | r = 1; | 420 | r = hv_enabled; |
372 | break; | 421 | break; |
373 | #endif | 422 | #endif |
374 | break; | ||
375 | case KVM_CAP_NR_VCPUS: | 423 | case KVM_CAP_NR_VCPUS: |
376 | /* | 424 | /* |
377 | * Recommending a number of CPUs is somewhat arbitrary; we | 425 | * Recommending a number of CPUs is somewhat arbitrary; we |
@@ -379,11 +427,10 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
379 | * will have secondary threads "offline"), and for other KVM | 427 | * will have secondary threads "offline"), and for other KVM |
380 | * implementations just count online CPUs. | 428 | * implementations just count online CPUs. |
381 | */ | 429 | */ |
382 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 430 | if (hv_enabled) |
383 | r = num_present_cpus(); | 431 | r = num_present_cpus(); |
384 | #else | 432 | else |
385 | r = num_online_cpus(); | 433 | r = num_online_cpus(); |
386 | #endif | ||
387 | break; | 434 | break; |
388 | case KVM_CAP_MAX_VCPUS: | 435 | case KVM_CAP_MAX_VCPUS: |
389 | r = KVM_MAX_VCPUS; | 436 | r = KVM_MAX_VCPUS; |
@@ -407,15 +454,16 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
407 | return -EINVAL; | 454 | return -EINVAL; |
408 | } | 455 | } |
409 | 456 | ||
410 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, | 457 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
411 | struct kvm_memory_slot *dont) | 458 | struct kvm_memory_slot *dont) |
412 | { | 459 | { |
413 | kvmppc_core_free_memslot(free, dont); | 460 | kvmppc_core_free_memslot(kvm, free, dont); |
414 | } | 461 | } |
415 | 462 | ||
416 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) | 463 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
464 | unsigned long npages) | ||
417 | { | 465 | { |
418 | return kvmppc_core_create_memslot(slot, npages); | 466 | return kvmppc_core_create_memslot(kvm, slot, npages); |
419 | } | 467 | } |
420 | 468 | ||
421 | void kvm_arch_memslots_updated(struct kvm *kvm) | 469 | void kvm_arch_memslots_updated(struct kvm *kvm) |
@@ -659,6 +707,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
659 | 707 | ||
660 | return EMULATE_DO_MMIO; | 708 | return EMULATE_DO_MMIO; |
661 | } | 709 | } |
710 | EXPORT_SYMBOL_GPL(kvmppc_handle_load); | ||
662 | 711 | ||
663 | /* Same as above, but sign extends */ | 712 | /* Same as above, but sign extends */ |
664 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, | 713 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, |
@@ -720,6 +769,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
720 | 769 | ||
721 | return EMULATE_DO_MMIO; | 770 | return EMULATE_DO_MMIO; |
722 | } | 771 | } |
772 | EXPORT_SYMBOL_GPL(kvmppc_handle_store); | ||
723 | 773 | ||
724 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | 774 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
725 | { | 775 | { |
@@ -1024,52 +1074,12 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1024 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); | 1074 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); |
1025 | goto out; | 1075 | goto out; |
1026 | } | 1076 | } |
1027 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
1028 | |||
1029 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
1030 | case KVM_ALLOCATE_RMA: { | ||
1031 | struct kvm_allocate_rma rma; | ||
1032 | struct kvm *kvm = filp->private_data; | ||
1033 | |||
1034 | r = kvm_vm_ioctl_allocate_rma(kvm, &rma); | ||
1035 | if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) | ||
1036 | r = -EFAULT; | ||
1037 | break; | ||
1038 | } | ||
1039 | |||
1040 | case KVM_PPC_ALLOCATE_HTAB: { | ||
1041 | u32 htab_order; | ||
1042 | |||
1043 | r = -EFAULT; | ||
1044 | if (get_user(htab_order, (u32 __user *)argp)) | ||
1045 | break; | ||
1046 | r = kvmppc_alloc_reset_hpt(kvm, &htab_order); | ||
1047 | if (r) | ||
1048 | break; | ||
1049 | r = -EFAULT; | ||
1050 | if (put_user(htab_order, (u32 __user *)argp)) | ||
1051 | break; | ||
1052 | r = 0; | ||
1053 | break; | ||
1054 | } | ||
1055 | |||
1056 | case KVM_PPC_GET_HTAB_FD: { | ||
1057 | struct kvm_get_htab_fd ghf; | ||
1058 | |||
1059 | r = -EFAULT; | ||
1060 | if (copy_from_user(&ghf, argp, sizeof(ghf))) | ||
1061 | break; | ||
1062 | r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); | ||
1063 | break; | ||
1064 | } | ||
1065 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | ||
1066 | |||
1067 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
1068 | case KVM_PPC_GET_SMMU_INFO: { | 1077 | case KVM_PPC_GET_SMMU_INFO: { |
1069 | struct kvm_ppc_smmu_info info; | 1078 | struct kvm_ppc_smmu_info info; |
1079 | struct kvm *kvm = filp->private_data; | ||
1070 | 1080 | ||
1071 | memset(&info, 0, sizeof(info)); | 1081 | memset(&info, 0, sizeof(info)); |
1072 | r = kvm_vm_ioctl_get_smmu_info(kvm, &info); | 1082 | r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); |
1073 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) | 1083 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
1074 | r = -EFAULT; | 1084 | r = -EFAULT; |
1075 | break; | 1085 | break; |
@@ -1080,11 +1090,15 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1080 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); | 1090 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); |
1081 | break; | 1091 | break; |
1082 | } | 1092 | } |
1083 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 1093 | default: { |
1094 | struct kvm *kvm = filp->private_data; | ||
1095 | r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); | ||
1096 | } | ||
1097 | #else /* CONFIG_PPC_BOOK3S_64 */ | ||
1084 | default: | 1098 | default: |
1085 | r = -ENOTTY; | 1099 | r = -ENOTTY; |
1100 | #endif | ||
1086 | } | 1101 | } |
1087 | |||
1088 | out: | 1102 | out: |
1089 | return r; | 1103 | return r; |
1090 | } | 1104 | } |
@@ -1106,22 +1120,26 @@ long kvmppc_alloc_lpid(void) | |||
1106 | 1120 | ||
1107 | return lpid; | 1121 | return lpid; |
1108 | } | 1122 | } |
1123 | EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); | ||
1109 | 1124 | ||
1110 | void kvmppc_claim_lpid(long lpid) | 1125 | void kvmppc_claim_lpid(long lpid) |
1111 | { | 1126 | { |
1112 | set_bit(lpid, lpid_inuse); | 1127 | set_bit(lpid, lpid_inuse); |
1113 | } | 1128 | } |
1129 | EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); | ||
1114 | 1130 | ||
1115 | void kvmppc_free_lpid(long lpid) | 1131 | void kvmppc_free_lpid(long lpid) |
1116 | { | 1132 | { |
1117 | clear_bit(lpid, lpid_inuse); | 1133 | clear_bit(lpid, lpid_inuse); |
1118 | } | 1134 | } |
1135 | EXPORT_SYMBOL_GPL(kvmppc_free_lpid); | ||
1119 | 1136 | ||
1120 | void kvmppc_init_lpid(unsigned long nr_lpids_param) | 1137 | void kvmppc_init_lpid(unsigned long nr_lpids_param) |
1121 | { | 1138 | { |
1122 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); | 1139 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); |
1123 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | 1140 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); |
1124 | } | 1141 | } |
1142 | EXPORT_SYMBOL_GPL(kvmppc_init_lpid); | ||
1125 | 1143 | ||
1126 | int kvm_arch_init(void *opaque) | 1144 | int kvm_arch_init(void *opaque) |
1127 | { | 1145 | { |
@@ -1130,4 +1148,5 @@ int kvm_arch_init(void *opaque) | |||
1130 | 1148 | ||
1131 | void kvm_arch_exit(void) | 1149 | void kvm_arch_exit(void) |
1132 | { | 1150 | { |
1151 | |||
1133 | } | 1152 | } |
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index e326489a5420..2e0e67ef3544 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h | |||
@@ -31,126 +31,6 @@ TRACE_EVENT(kvm_ppc_instr, | |||
31 | __entry->inst, __entry->pc, __entry->emulate) | 31 | __entry->inst, __entry->pc, __entry->emulate) |
32 | ); | 32 | ); |
33 | 33 | ||
34 | #ifdef CONFIG_PPC_BOOK3S | ||
35 | #define kvm_trace_symbol_exit \ | ||
36 | {0x100, "SYSTEM_RESET"}, \ | ||
37 | {0x200, "MACHINE_CHECK"}, \ | ||
38 | {0x300, "DATA_STORAGE"}, \ | ||
39 | {0x380, "DATA_SEGMENT"}, \ | ||
40 | {0x400, "INST_STORAGE"}, \ | ||
41 | {0x480, "INST_SEGMENT"}, \ | ||
42 | {0x500, "EXTERNAL"}, \ | ||
43 | {0x501, "EXTERNAL_LEVEL"}, \ | ||
44 | {0x502, "EXTERNAL_HV"}, \ | ||
45 | {0x600, "ALIGNMENT"}, \ | ||
46 | {0x700, "PROGRAM"}, \ | ||
47 | {0x800, "FP_UNAVAIL"}, \ | ||
48 | {0x900, "DECREMENTER"}, \ | ||
49 | {0x980, "HV_DECREMENTER"}, \ | ||
50 | {0xc00, "SYSCALL"}, \ | ||
51 | {0xd00, "TRACE"}, \ | ||
52 | {0xe00, "H_DATA_STORAGE"}, \ | ||
53 | {0xe20, "H_INST_STORAGE"}, \ | ||
54 | {0xe40, "H_EMUL_ASSIST"}, \ | ||
55 | {0xf00, "PERFMON"}, \ | ||
56 | {0xf20, "ALTIVEC"}, \ | ||
57 | {0xf40, "VSX"} | ||
58 | #else | ||
59 | #define kvm_trace_symbol_exit \ | ||
60 | {0, "CRITICAL"}, \ | ||
61 | {1, "MACHINE_CHECK"}, \ | ||
62 | {2, "DATA_STORAGE"}, \ | ||
63 | {3, "INST_STORAGE"}, \ | ||
64 | {4, "EXTERNAL"}, \ | ||
65 | {5, "ALIGNMENT"}, \ | ||
66 | {6, "PROGRAM"}, \ | ||
67 | {7, "FP_UNAVAIL"}, \ | ||
68 | {8, "SYSCALL"}, \ | ||
69 | {9, "AP_UNAVAIL"}, \ | ||
70 | {10, "DECREMENTER"}, \ | ||
71 | {11, "FIT"}, \ | ||
72 | {12, "WATCHDOG"}, \ | ||
73 | {13, "DTLB_MISS"}, \ | ||
74 | {14, "ITLB_MISS"}, \ | ||
75 | {15, "DEBUG"}, \ | ||
76 | {32, "SPE_UNAVAIL"}, \ | ||
77 | {33, "SPE_FP_DATA"}, \ | ||
78 | {34, "SPE_FP_ROUND"}, \ | ||
79 | {35, "PERFORMANCE_MONITOR"}, \ | ||
80 | {36, "DOORBELL"}, \ | ||
81 | {37, "DOORBELL_CRITICAL"}, \ | ||
82 | {38, "GUEST_DBELL"}, \ | ||
83 | {39, "GUEST_DBELL_CRIT"}, \ | ||
84 | {40, "HV_SYSCALL"}, \ | ||
85 | {41, "HV_PRIV"} | ||
86 | #endif | ||
87 | |||
88 | TRACE_EVENT(kvm_exit, | ||
89 | TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), | ||
90 | TP_ARGS(exit_nr, vcpu), | ||
91 | |||
92 | TP_STRUCT__entry( | ||
93 | __field( unsigned int, exit_nr ) | ||
94 | __field( unsigned long, pc ) | ||
95 | __field( unsigned long, msr ) | ||
96 | __field( unsigned long, dar ) | ||
97 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
98 | __field( unsigned long, srr1 ) | ||
99 | #endif | ||
100 | __field( unsigned long, last_inst ) | ||
101 | ), | ||
102 | |||
103 | TP_fast_assign( | ||
104 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
105 | struct kvmppc_book3s_shadow_vcpu *svcpu; | ||
106 | #endif | ||
107 | __entry->exit_nr = exit_nr; | ||
108 | __entry->pc = kvmppc_get_pc(vcpu); | ||
109 | __entry->dar = kvmppc_get_fault_dar(vcpu); | ||
110 | __entry->msr = vcpu->arch.shared->msr; | ||
111 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
112 | svcpu = svcpu_get(vcpu); | ||
113 | __entry->srr1 = svcpu->shadow_srr1; | ||
114 | svcpu_put(svcpu); | ||
115 | #endif | ||
116 | __entry->last_inst = vcpu->arch.last_inst; | ||
117 | ), | ||
118 | |||
119 | TP_printk("exit=%s" | ||
120 | " | pc=0x%lx" | ||
121 | " | msr=0x%lx" | ||
122 | " | dar=0x%lx" | ||
123 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
124 | " | srr1=0x%lx" | ||
125 | #endif | ||
126 | " | last_inst=0x%lx" | ||
127 | , | ||
128 | __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit), | ||
129 | __entry->pc, | ||
130 | __entry->msr, | ||
131 | __entry->dar, | ||
132 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
133 | __entry->srr1, | ||
134 | #endif | ||
135 | __entry->last_inst | ||
136 | ) | ||
137 | ); | ||
138 | |||
139 | TRACE_EVENT(kvm_unmap_hva, | ||
140 | TP_PROTO(unsigned long hva), | ||
141 | TP_ARGS(hva), | ||
142 | |||
143 | TP_STRUCT__entry( | ||
144 | __field( unsigned long, hva ) | ||
145 | ), | ||
146 | |||
147 | TP_fast_assign( | ||
148 | __entry->hva = hva; | ||
149 | ), | ||
150 | |||
151 | TP_printk("unmap hva 0x%lx\n", __entry->hva) | ||
152 | ); | ||
153 | |||
154 | TRACE_EVENT(kvm_stlb_inval, | 34 | TRACE_EVENT(kvm_stlb_inval, |
155 | TP_PROTO(unsigned int stlb_index), | 35 | TP_PROTO(unsigned int stlb_index), |
156 | TP_ARGS(stlb_index), | 36 | TP_ARGS(stlb_index), |
@@ -236,315 +116,6 @@ TRACE_EVENT(kvm_check_requests, | |||
236 | __entry->cpu_nr, __entry->requests) | 116 | __entry->cpu_nr, __entry->requests) |
237 | ); | 117 | ); |
238 | 118 | ||
239 | |||
240 | /************************************************************************* | ||
241 | * Book3S trace points * | ||
242 | *************************************************************************/ | ||
243 | |||
244 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
245 | |||
246 | TRACE_EVENT(kvm_book3s_reenter, | ||
247 | TP_PROTO(int r, struct kvm_vcpu *vcpu), | ||
248 | TP_ARGS(r, vcpu), | ||
249 | |||
250 | TP_STRUCT__entry( | ||
251 | __field( unsigned int, r ) | ||
252 | __field( unsigned long, pc ) | ||
253 | ), | ||
254 | |||
255 | TP_fast_assign( | ||
256 | __entry->r = r; | ||
257 | __entry->pc = kvmppc_get_pc(vcpu); | ||
258 | ), | ||
259 | |||
260 | TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc) | ||
261 | ); | ||
262 | |||
263 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
264 | |||
265 | TRACE_EVENT(kvm_book3s_64_mmu_map, | ||
266 | TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr, | ||
267 | struct kvmppc_pte *orig_pte), | ||
268 | TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte), | ||
269 | |||
270 | TP_STRUCT__entry( | ||
271 | __field( unsigned char, flag_w ) | ||
272 | __field( unsigned char, flag_x ) | ||
273 | __field( unsigned long, eaddr ) | ||
274 | __field( unsigned long, hpteg ) | ||
275 | __field( unsigned long, va ) | ||
276 | __field( unsigned long long, vpage ) | ||
277 | __field( unsigned long, hpaddr ) | ||
278 | ), | ||
279 | |||
280 | TP_fast_assign( | ||
281 | __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w'; | ||
282 | __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x'; | ||
283 | __entry->eaddr = orig_pte->eaddr; | ||
284 | __entry->hpteg = hpteg; | ||
285 | __entry->va = va; | ||
286 | __entry->vpage = orig_pte->vpage; | ||
287 | __entry->hpaddr = hpaddr; | ||
288 | ), | ||
289 | |||
290 | TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx", | ||
291 | __entry->flag_w, __entry->flag_x, __entry->eaddr, | ||
292 | __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr) | ||
293 | ); | ||
294 | |||
295 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
296 | |||
297 | TRACE_EVENT(kvm_book3s_mmu_map, | ||
298 | TP_PROTO(struct hpte_cache *pte), | ||
299 | TP_ARGS(pte), | ||
300 | |||
301 | TP_STRUCT__entry( | ||
302 | __field( u64, host_vpn ) | ||
303 | __field( u64, pfn ) | ||
304 | __field( ulong, eaddr ) | ||
305 | __field( u64, vpage ) | ||
306 | __field( ulong, raddr ) | ||
307 | __field( int, flags ) | ||
308 | ), | ||
309 | |||
310 | TP_fast_assign( | ||
311 | __entry->host_vpn = pte->host_vpn; | ||
312 | __entry->pfn = pte->pfn; | ||
313 | __entry->eaddr = pte->pte.eaddr; | ||
314 | __entry->vpage = pte->pte.vpage; | ||
315 | __entry->raddr = pte->pte.raddr; | ||
316 | __entry->flags = (pte->pte.may_read ? 0x4 : 0) | | ||
317 | (pte->pte.may_write ? 0x2 : 0) | | ||
318 | (pte->pte.may_execute ? 0x1 : 0); | ||
319 | ), | ||
320 | |||
321 | TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", | ||
322 | __entry->host_vpn, __entry->pfn, __entry->eaddr, | ||
323 | __entry->vpage, __entry->raddr, __entry->flags) | ||
324 | ); | ||
325 | |||
326 | TRACE_EVENT(kvm_book3s_mmu_invalidate, | ||
327 | TP_PROTO(struct hpte_cache *pte), | ||
328 | TP_ARGS(pte), | ||
329 | |||
330 | TP_STRUCT__entry( | ||
331 | __field( u64, host_vpn ) | ||
332 | __field( u64, pfn ) | ||
333 | __field( ulong, eaddr ) | ||
334 | __field( u64, vpage ) | ||
335 | __field( ulong, raddr ) | ||
336 | __field( int, flags ) | ||
337 | ), | ||
338 | |||
339 | TP_fast_assign( | ||
340 | __entry->host_vpn = pte->host_vpn; | ||
341 | __entry->pfn = pte->pfn; | ||
342 | __entry->eaddr = pte->pte.eaddr; | ||
343 | __entry->vpage = pte->pte.vpage; | ||
344 | __entry->raddr = pte->pte.raddr; | ||
345 | __entry->flags = (pte->pte.may_read ? 0x4 : 0) | | ||
346 | (pte->pte.may_write ? 0x2 : 0) | | ||
347 | (pte->pte.may_execute ? 0x1 : 0); | ||
348 | ), | ||
349 | |||
350 | TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", | ||
351 | __entry->host_vpn, __entry->pfn, __entry->eaddr, | ||
352 | __entry->vpage, __entry->raddr, __entry->flags) | ||
353 | ); | ||
354 | |||
355 | TRACE_EVENT(kvm_book3s_mmu_flush, | ||
356 | TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1, | ||
357 | unsigned long long p2), | ||
358 | TP_ARGS(type, vcpu, p1, p2), | ||
359 | |||
360 | TP_STRUCT__entry( | ||
361 | __field( int, count ) | ||
362 | __field( unsigned long long, p1 ) | ||
363 | __field( unsigned long long, p2 ) | ||
364 | __field( const char *, type ) | ||
365 | ), | ||
366 | |||
367 | TP_fast_assign( | ||
368 | __entry->count = to_book3s(vcpu)->hpte_cache_count; | ||
369 | __entry->p1 = p1; | ||
370 | __entry->p2 = p2; | ||
371 | __entry->type = type; | ||
372 | ), | ||
373 | |||
374 | TP_printk("Flush %d %sPTEs: %llx - %llx", | ||
375 | __entry->count, __entry->type, __entry->p1, __entry->p2) | ||
376 | ); | ||
377 | |||
378 | TRACE_EVENT(kvm_book3s_slb_found, | ||
379 | TP_PROTO(unsigned long long gvsid, unsigned long long hvsid), | ||
380 | TP_ARGS(gvsid, hvsid), | ||
381 | |||
382 | TP_STRUCT__entry( | ||
383 | __field( unsigned long long, gvsid ) | ||
384 | __field( unsigned long long, hvsid ) | ||
385 | ), | ||
386 | |||
387 | TP_fast_assign( | ||
388 | __entry->gvsid = gvsid; | ||
389 | __entry->hvsid = hvsid; | ||
390 | ), | ||
391 | |||
392 | TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid) | ||
393 | ); | ||
394 | |||
395 | TRACE_EVENT(kvm_book3s_slb_fail, | ||
396 | TP_PROTO(u16 sid_map_mask, unsigned long long gvsid), | ||
397 | TP_ARGS(sid_map_mask, gvsid), | ||
398 | |||
399 | TP_STRUCT__entry( | ||
400 | __field( unsigned short, sid_map_mask ) | ||
401 | __field( unsigned long long, gvsid ) | ||
402 | ), | ||
403 | |||
404 | TP_fast_assign( | ||
405 | __entry->sid_map_mask = sid_map_mask; | ||
406 | __entry->gvsid = gvsid; | ||
407 | ), | ||
408 | |||
409 | TP_printk("%x/%x: %llx", __entry->sid_map_mask, | ||
410 | SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid) | ||
411 | ); | ||
412 | |||
413 | TRACE_EVENT(kvm_book3s_slb_map, | ||
414 | TP_PROTO(u16 sid_map_mask, unsigned long long gvsid, | ||
415 | unsigned long long hvsid), | ||
416 | TP_ARGS(sid_map_mask, gvsid, hvsid), | ||
417 | |||
418 | TP_STRUCT__entry( | ||
419 | __field( unsigned short, sid_map_mask ) | ||
420 | __field( unsigned long long, guest_vsid ) | ||
421 | __field( unsigned long long, host_vsid ) | ||
422 | ), | ||
423 | |||
424 | TP_fast_assign( | ||
425 | __entry->sid_map_mask = sid_map_mask; | ||
426 | __entry->guest_vsid = gvsid; | ||
427 | __entry->host_vsid = hvsid; | ||
428 | ), | ||
429 | |||
430 | TP_printk("%x: %llx -> %llx", __entry->sid_map_mask, | ||
431 | __entry->guest_vsid, __entry->host_vsid) | ||
432 | ); | ||
433 | |||
434 | TRACE_EVENT(kvm_book3s_slbmte, | ||
435 | TP_PROTO(u64 slb_vsid, u64 slb_esid), | ||
436 | TP_ARGS(slb_vsid, slb_esid), | ||
437 | |||
438 | TP_STRUCT__entry( | ||
439 | __field( u64, slb_vsid ) | ||
440 | __field( u64, slb_esid ) | ||
441 | ), | ||
442 | |||
443 | TP_fast_assign( | ||
444 | __entry->slb_vsid = slb_vsid; | ||
445 | __entry->slb_esid = slb_esid; | ||
446 | ), | ||
447 | |||
448 | TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid) | ||
449 | ); | ||
450 | |||
451 | #endif /* CONFIG_PPC_BOOK3S */ | ||
452 | |||
453 | |||
454 | /************************************************************************* | ||
455 | * Book3E trace points * | ||
456 | *************************************************************************/ | ||
457 | |||
458 | #ifdef CONFIG_BOOKE | ||
459 | |||
460 | TRACE_EVENT(kvm_booke206_stlb_write, | ||
461 | TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3), | ||
462 | TP_ARGS(mas0, mas8, mas1, mas2, mas7_3), | ||
463 | |||
464 | TP_STRUCT__entry( | ||
465 | __field( __u32, mas0 ) | ||
466 | __field( __u32, mas8 ) | ||
467 | __field( __u32, mas1 ) | ||
468 | __field( __u64, mas2 ) | ||
469 | __field( __u64, mas7_3 ) | ||
470 | ), | ||
471 | |||
472 | TP_fast_assign( | ||
473 | __entry->mas0 = mas0; | ||
474 | __entry->mas8 = mas8; | ||
475 | __entry->mas1 = mas1; | ||
476 | __entry->mas2 = mas2; | ||
477 | __entry->mas7_3 = mas7_3; | ||
478 | ), | ||
479 | |||
480 | TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx", | ||
481 | __entry->mas0, __entry->mas8, __entry->mas1, | ||
482 | __entry->mas2, __entry->mas7_3) | ||
483 | ); | ||
484 | |||
485 | TRACE_EVENT(kvm_booke206_gtlb_write, | ||
486 | TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3), | ||
487 | TP_ARGS(mas0, mas1, mas2, mas7_3), | ||
488 | |||
489 | TP_STRUCT__entry( | ||
490 | __field( __u32, mas0 ) | ||
491 | __field( __u32, mas1 ) | ||
492 | __field( __u64, mas2 ) | ||
493 | __field( __u64, mas7_3 ) | ||
494 | ), | ||
495 | |||
496 | TP_fast_assign( | ||
497 | __entry->mas0 = mas0; | ||
498 | __entry->mas1 = mas1; | ||
499 | __entry->mas2 = mas2; | ||
500 | __entry->mas7_3 = mas7_3; | ||
501 | ), | ||
502 | |||
503 | TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx", | ||
504 | __entry->mas0, __entry->mas1, | ||
505 | __entry->mas2, __entry->mas7_3) | ||
506 | ); | ||
507 | |||
508 | TRACE_EVENT(kvm_booke206_ref_release, | ||
509 | TP_PROTO(__u64 pfn, __u32 flags), | ||
510 | TP_ARGS(pfn, flags), | ||
511 | |||
512 | TP_STRUCT__entry( | ||
513 | __field( __u64, pfn ) | ||
514 | __field( __u32, flags ) | ||
515 | ), | ||
516 | |||
517 | TP_fast_assign( | ||
518 | __entry->pfn = pfn; | ||
519 | __entry->flags = flags; | ||
520 | ), | ||
521 | |||
522 | TP_printk("pfn=%llx flags=%x", | ||
523 | __entry->pfn, __entry->flags) | ||
524 | ); | ||
525 | |||
526 | TRACE_EVENT(kvm_booke_queue_irqprio, | ||
527 | TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority), | ||
528 | TP_ARGS(vcpu, priority), | ||
529 | |||
530 | TP_STRUCT__entry( | ||
531 | __field( __u32, cpu_nr ) | ||
532 | __field( __u32, priority ) | ||
533 | __field( unsigned long, pending ) | ||
534 | ), | ||
535 | |||
536 | TP_fast_assign( | ||
537 | __entry->cpu_nr = vcpu->vcpu_id; | ||
538 | __entry->priority = priority; | ||
539 | __entry->pending = vcpu->arch.pending_exceptions; | ||
540 | ), | ||
541 | |||
542 | TP_printk("vcpu=%x prio=%x pending=%lx", | ||
543 | __entry->cpu_nr, __entry->priority, __entry->pending) | ||
544 | ); | ||
545 | |||
546 | #endif | ||
547 | |||
548 | #endif /* _TRACE_KVM_H */ | 119 | #endif /* _TRACE_KVM_H */ |
549 | 120 | ||
550 | /* This part must be outside protection */ | 121 | /* This part must be outside protection */ |
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h new file mode 100644 index 000000000000..f7537cf26ce7 --- /dev/null +++ b/arch/powerpc/kvm/trace_booke.h | |||
@@ -0,0 +1,177 @@ | |||
1 | #if !defined(_TRACE_KVM_BOOKE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_KVM_BOOKE_H | ||
3 | |||
4 | #include <linux/tracepoint.h> | ||
5 | |||
6 | #undef TRACE_SYSTEM | ||
7 | #define TRACE_SYSTEM kvm_booke | ||
8 | #define TRACE_INCLUDE_PATH . | ||
9 | #define TRACE_INCLUDE_FILE trace_booke | ||
10 | |||
11 | #define kvm_trace_symbol_exit \ | ||
12 | {0, "CRITICAL"}, \ | ||
13 | {1, "MACHINE_CHECK"}, \ | ||
14 | {2, "DATA_STORAGE"}, \ | ||
15 | {3, "INST_STORAGE"}, \ | ||
16 | {4, "EXTERNAL"}, \ | ||
17 | {5, "ALIGNMENT"}, \ | ||
18 | {6, "PROGRAM"}, \ | ||
19 | {7, "FP_UNAVAIL"}, \ | ||
20 | {8, "SYSCALL"}, \ | ||
21 | {9, "AP_UNAVAIL"}, \ | ||
22 | {10, "DECREMENTER"}, \ | ||
23 | {11, "FIT"}, \ | ||
24 | {12, "WATCHDOG"}, \ | ||
25 | {13, "DTLB_MISS"}, \ | ||
26 | {14, "ITLB_MISS"}, \ | ||
27 | {15, "DEBUG"}, \ | ||
28 | {32, "SPE_UNAVAIL"}, \ | ||
29 | {33, "SPE_FP_DATA"}, \ | ||
30 | {34, "SPE_FP_ROUND"}, \ | ||
31 | {35, "PERFORMANCE_MONITOR"}, \ | ||
32 | {36, "DOORBELL"}, \ | ||
33 | {37, "DOORBELL_CRITICAL"}, \ | ||
34 | {38, "GUEST_DBELL"}, \ | ||
35 | {39, "GUEST_DBELL_CRIT"}, \ | ||
36 | {40, "HV_SYSCALL"}, \ | ||
37 | {41, "HV_PRIV"} | ||
38 | |||
39 | TRACE_EVENT(kvm_exit, | ||
40 | TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), | ||
41 | TP_ARGS(exit_nr, vcpu), | ||
42 | |||
43 | TP_STRUCT__entry( | ||
44 | __field( unsigned int, exit_nr ) | ||
45 | __field( unsigned long, pc ) | ||
46 | __field( unsigned long, msr ) | ||
47 | __field( unsigned long, dar ) | ||
48 | __field( unsigned long, last_inst ) | ||
49 | ), | ||
50 | |||
51 | TP_fast_assign( | ||
52 | __entry->exit_nr = exit_nr; | ||
53 | __entry->pc = kvmppc_get_pc(vcpu); | ||
54 | __entry->dar = kvmppc_get_fault_dar(vcpu); | ||
55 | __entry->msr = vcpu->arch.shared->msr; | ||
56 | __entry->last_inst = vcpu->arch.last_inst; | ||
57 | ), | ||
58 | |||
59 | TP_printk("exit=%s" | ||
60 | " | pc=0x%lx" | ||
61 | " | msr=0x%lx" | ||
62 | " | dar=0x%lx" | ||
63 | " | last_inst=0x%lx" | ||
64 | , | ||
65 | __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit), | ||
66 | __entry->pc, | ||
67 | __entry->msr, | ||
68 | __entry->dar, | ||
69 | __entry->last_inst | ||
70 | ) | ||
71 | ); | ||
72 | |||
73 | TRACE_EVENT(kvm_unmap_hva, | ||
74 | TP_PROTO(unsigned long hva), | ||
75 | TP_ARGS(hva), | ||
76 | |||
77 | TP_STRUCT__entry( | ||
78 | __field( unsigned long, hva ) | ||
79 | ), | ||
80 | |||
81 | TP_fast_assign( | ||
82 | __entry->hva = hva; | ||
83 | ), | ||
84 | |||
85 | TP_printk("unmap hva 0x%lx\n", __entry->hva) | ||
86 | ); | ||
87 | |||
88 | TRACE_EVENT(kvm_booke206_stlb_write, | ||
89 | TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3), | ||
90 | TP_ARGS(mas0, mas8, mas1, mas2, mas7_3), | ||
91 | |||
92 | TP_STRUCT__entry( | ||
93 | __field( __u32, mas0 ) | ||
94 | __field( __u32, mas8 ) | ||
95 | __field( __u32, mas1 ) | ||
96 | __field( __u64, mas2 ) | ||
97 | __field( __u64, mas7_3 ) | ||
98 | ), | ||
99 | |||
100 | TP_fast_assign( | ||
101 | __entry->mas0 = mas0; | ||
102 | __entry->mas8 = mas8; | ||
103 | __entry->mas1 = mas1; | ||
104 | __entry->mas2 = mas2; | ||
105 | __entry->mas7_3 = mas7_3; | ||
106 | ), | ||
107 | |||
108 | TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx", | ||
109 | __entry->mas0, __entry->mas8, __entry->mas1, | ||
110 | __entry->mas2, __entry->mas7_3) | ||
111 | ); | ||
112 | |||
113 | TRACE_EVENT(kvm_booke206_gtlb_write, | ||
114 | TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3), | ||
115 | TP_ARGS(mas0, mas1, mas2, mas7_3), | ||
116 | |||
117 | TP_STRUCT__entry( | ||
118 | __field( __u32, mas0 ) | ||
119 | __field( __u32, mas1 ) | ||
120 | __field( __u64, mas2 ) | ||
121 | __field( __u64, mas7_3 ) | ||
122 | ), | ||
123 | |||
124 | TP_fast_assign( | ||
125 | __entry->mas0 = mas0; | ||
126 | __entry->mas1 = mas1; | ||
127 | __entry->mas2 = mas2; | ||
128 | __entry->mas7_3 = mas7_3; | ||
129 | ), | ||
130 | |||
131 | TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx", | ||
132 | __entry->mas0, __entry->mas1, | ||
133 | __entry->mas2, __entry->mas7_3) | ||
134 | ); | ||
135 | |||
136 | TRACE_EVENT(kvm_booke206_ref_release, | ||
137 | TP_PROTO(__u64 pfn, __u32 flags), | ||
138 | TP_ARGS(pfn, flags), | ||
139 | |||
140 | TP_STRUCT__entry( | ||
141 | __field( __u64, pfn ) | ||
142 | __field( __u32, flags ) | ||
143 | ), | ||
144 | |||
145 | TP_fast_assign( | ||
146 | __entry->pfn = pfn; | ||
147 | __entry->flags = flags; | ||
148 | ), | ||
149 | |||
150 | TP_printk("pfn=%llx flags=%x", | ||
151 | __entry->pfn, __entry->flags) | ||
152 | ); | ||
153 | |||
154 | TRACE_EVENT(kvm_booke_queue_irqprio, | ||
155 | TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority), | ||
156 | TP_ARGS(vcpu, priority), | ||
157 | |||
158 | TP_STRUCT__entry( | ||
159 | __field( __u32, cpu_nr ) | ||
160 | __field( __u32, priority ) | ||
161 | __field( unsigned long, pending ) | ||
162 | ), | ||
163 | |||
164 | TP_fast_assign( | ||
165 | __entry->cpu_nr = vcpu->vcpu_id; | ||
166 | __entry->priority = priority; | ||
167 | __entry->pending = vcpu->arch.pending_exceptions; | ||
168 | ), | ||
169 | |||
170 | TP_printk("vcpu=%x prio=%x pending=%lx", | ||
171 | __entry->cpu_nr, __entry->priority, __entry->pending) | ||
172 | ); | ||
173 | |||
174 | #endif | ||
175 | |||
176 | /* This part must be outside protection */ | ||
177 | #include <trace/define_trace.h> | ||
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h new file mode 100644 index 000000000000..8b22e4748344 --- /dev/null +++ b/arch/powerpc/kvm/trace_pr.h | |||
@@ -0,0 +1,297 @@ | |||
1 | |||
2 | #if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ) | ||
3 | #define _TRACE_KVM_PR_H | ||
4 | |||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #undef TRACE_SYSTEM | ||
8 | #define TRACE_SYSTEM kvm_pr | ||
9 | #define TRACE_INCLUDE_PATH . | ||
10 | #define TRACE_INCLUDE_FILE trace_pr | ||
11 | |||
12 | #define kvm_trace_symbol_exit \ | ||
13 | {0x100, "SYSTEM_RESET"}, \ | ||
14 | {0x200, "MACHINE_CHECK"}, \ | ||
15 | {0x300, "DATA_STORAGE"}, \ | ||
16 | {0x380, "DATA_SEGMENT"}, \ | ||
17 | {0x400, "INST_STORAGE"}, \ | ||
18 | {0x480, "INST_SEGMENT"}, \ | ||
19 | {0x500, "EXTERNAL"}, \ | ||
20 | {0x501, "EXTERNAL_LEVEL"}, \ | ||
21 | {0x502, "EXTERNAL_HV"}, \ | ||
22 | {0x600, "ALIGNMENT"}, \ | ||
23 | {0x700, "PROGRAM"}, \ | ||
24 | {0x800, "FP_UNAVAIL"}, \ | ||
25 | {0x900, "DECREMENTER"}, \ | ||
26 | {0x980, "HV_DECREMENTER"}, \ | ||
27 | {0xc00, "SYSCALL"}, \ | ||
28 | {0xd00, "TRACE"}, \ | ||
29 | {0xe00, "H_DATA_STORAGE"}, \ | ||
30 | {0xe20, "H_INST_STORAGE"}, \ | ||
31 | {0xe40, "H_EMUL_ASSIST"}, \ | ||
32 | {0xf00, "PERFMON"}, \ | ||
33 | {0xf20, "ALTIVEC"}, \ | ||
34 | {0xf40, "VSX"} | ||
35 | |||
36 | TRACE_EVENT(kvm_book3s_reenter, | ||
37 | TP_PROTO(int r, struct kvm_vcpu *vcpu), | ||
38 | TP_ARGS(r, vcpu), | ||
39 | |||
40 | TP_STRUCT__entry( | ||
41 | __field( unsigned int, r ) | ||
42 | __field( unsigned long, pc ) | ||
43 | ), | ||
44 | |||
45 | TP_fast_assign( | ||
46 | __entry->r = r; | ||
47 | __entry->pc = kvmppc_get_pc(vcpu); | ||
48 | ), | ||
49 | |||
50 | TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc) | ||
51 | ); | ||
52 | |||
53 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
54 | |||
55 | TRACE_EVENT(kvm_book3s_64_mmu_map, | ||
56 | TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr, | ||
57 | struct kvmppc_pte *orig_pte), | ||
58 | TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte), | ||
59 | |||
60 | TP_STRUCT__entry( | ||
61 | __field( unsigned char, flag_w ) | ||
62 | __field( unsigned char, flag_x ) | ||
63 | __field( unsigned long, eaddr ) | ||
64 | __field( unsigned long, hpteg ) | ||
65 | __field( unsigned long, va ) | ||
66 | __field( unsigned long long, vpage ) | ||
67 | __field( unsigned long, hpaddr ) | ||
68 | ), | ||
69 | |||
70 | TP_fast_assign( | ||
71 | __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w'; | ||
72 | __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x'; | ||
73 | __entry->eaddr = orig_pte->eaddr; | ||
74 | __entry->hpteg = hpteg; | ||
75 | __entry->va = va; | ||
76 | __entry->vpage = orig_pte->vpage; | ||
77 | __entry->hpaddr = hpaddr; | ||
78 | ), | ||
79 | |||
80 | TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx", | ||
81 | __entry->flag_w, __entry->flag_x, __entry->eaddr, | ||
82 | __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr) | ||
83 | ); | ||
84 | |||
85 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
86 | |||
87 | TRACE_EVENT(kvm_book3s_mmu_map, | ||
88 | TP_PROTO(struct hpte_cache *pte), | ||
89 | TP_ARGS(pte), | ||
90 | |||
91 | TP_STRUCT__entry( | ||
92 | __field( u64, host_vpn ) | ||
93 | __field( u64, pfn ) | ||
94 | __field( ulong, eaddr ) | ||
95 | __field( u64, vpage ) | ||
96 | __field( ulong, raddr ) | ||
97 | __field( int, flags ) | ||
98 | ), | ||
99 | |||
100 | TP_fast_assign( | ||
101 | __entry->host_vpn = pte->host_vpn; | ||
102 | __entry->pfn = pte->pfn; | ||
103 | __entry->eaddr = pte->pte.eaddr; | ||
104 | __entry->vpage = pte->pte.vpage; | ||
105 | __entry->raddr = pte->pte.raddr; | ||
106 | __entry->flags = (pte->pte.may_read ? 0x4 : 0) | | ||
107 | (pte->pte.may_write ? 0x2 : 0) | | ||
108 | (pte->pte.may_execute ? 0x1 : 0); | ||
109 | ), | ||
110 | |||
111 | TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", | ||
112 | __entry->host_vpn, __entry->pfn, __entry->eaddr, | ||
113 | __entry->vpage, __entry->raddr, __entry->flags) | ||
114 | ); | ||
115 | |||
116 | TRACE_EVENT(kvm_book3s_mmu_invalidate, | ||
117 | TP_PROTO(struct hpte_cache *pte), | ||
118 | TP_ARGS(pte), | ||
119 | |||
120 | TP_STRUCT__entry( | ||
121 | __field( u64, host_vpn ) | ||
122 | __field( u64, pfn ) | ||
123 | __field( ulong, eaddr ) | ||
124 | __field( u64, vpage ) | ||
125 | __field( ulong, raddr ) | ||
126 | __field( int, flags ) | ||
127 | ), | ||
128 | |||
129 | TP_fast_assign( | ||
130 | __entry->host_vpn = pte->host_vpn; | ||
131 | __entry->pfn = pte->pfn; | ||
132 | __entry->eaddr = pte->pte.eaddr; | ||
133 | __entry->vpage = pte->pte.vpage; | ||
134 | __entry->raddr = pte->pte.raddr; | ||
135 | __entry->flags = (pte->pte.may_read ? 0x4 : 0) | | ||
136 | (pte->pte.may_write ? 0x2 : 0) | | ||
137 | (pte->pte.may_execute ? 0x1 : 0); | ||
138 | ), | ||
139 | |||
140 | TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", | ||
141 | __entry->host_vpn, __entry->pfn, __entry->eaddr, | ||
142 | __entry->vpage, __entry->raddr, __entry->flags) | ||
143 | ); | ||
144 | |||
145 | TRACE_EVENT(kvm_book3s_mmu_flush, | ||
146 | TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1, | ||
147 | unsigned long long p2), | ||
148 | TP_ARGS(type, vcpu, p1, p2), | ||
149 | |||
150 | TP_STRUCT__entry( | ||
151 | __field( int, count ) | ||
152 | __field( unsigned long long, p1 ) | ||
153 | __field( unsigned long long, p2 ) | ||
154 | __field( const char *, type ) | ||
155 | ), | ||
156 | |||
157 | TP_fast_assign( | ||
158 | __entry->count = to_book3s(vcpu)->hpte_cache_count; | ||
159 | __entry->p1 = p1; | ||
160 | __entry->p2 = p2; | ||
161 | __entry->type = type; | ||
162 | ), | ||
163 | |||
164 | TP_printk("Flush %d %sPTEs: %llx - %llx", | ||
165 | __entry->count, __entry->type, __entry->p1, __entry->p2) | ||
166 | ); | ||
167 | |||
168 | TRACE_EVENT(kvm_book3s_slb_found, | ||
169 | TP_PROTO(unsigned long long gvsid, unsigned long long hvsid), | ||
170 | TP_ARGS(gvsid, hvsid), | ||
171 | |||
172 | TP_STRUCT__entry( | ||
173 | __field( unsigned long long, gvsid ) | ||
174 | __field( unsigned long long, hvsid ) | ||
175 | ), | ||
176 | |||
177 | TP_fast_assign( | ||
178 | __entry->gvsid = gvsid; | ||
179 | __entry->hvsid = hvsid; | ||
180 | ), | ||
181 | |||
182 | TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid) | ||
183 | ); | ||
184 | |||
185 | TRACE_EVENT(kvm_book3s_slb_fail, | ||
186 | TP_PROTO(u16 sid_map_mask, unsigned long long gvsid), | ||
187 | TP_ARGS(sid_map_mask, gvsid), | ||
188 | |||
189 | TP_STRUCT__entry( | ||
190 | __field( unsigned short, sid_map_mask ) | ||
191 | __field( unsigned long long, gvsid ) | ||
192 | ), | ||
193 | |||
194 | TP_fast_assign( | ||
195 | __entry->sid_map_mask = sid_map_mask; | ||
196 | __entry->gvsid = gvsid; | ||
197 | ), | ||
198 | |||
199 | TP_printk("%x/%x: %llx", __entry->sid_map_mask, | ||
200 | SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid) | ||
201 | ); | ||
202 | |||
203 | TRACE_EVENT(kvm_book3s_slb_map, | ||
204 | TP_PROTO(u16 sid_map_mask, unsigned long long gvsid, | ||
205 | unsigned long long hvsid), | ||
206 | TP_ARGS(sid_map_mask, gvsid, hvsid), | ||
207 | |||
208 | TP_STRUCT__entry( | ||
209 | __field( unsigned short, sid_map_mask ) | ||
210 | __field( unsigned long long, guest_vsid ) | ||
211 | __field( unsigned long long, host_vsid ) | ||
212 | ), | ||
213 | |||
214 | TP_fast_assign( | ||
215 | __entry->sid_map_mask = sid_map_mask; | ||
216 | __entry->guest_vsid = gvsid; | ||
217 | __entry->host_vsid = hvsid; | ||
218 | ), | ||
219 | |||
220 | TP_printk("%x: %llx -> %llx", __entry->sid_map_mask, | ||
221 | __entry->guest_vsid, __entry->host_vsid) | ||
222 | ); | ||
223 | |||
224 | TRACE_EVENT(kvm_book3s_slbmte, | ||
225 | TP_PROTO(u64 slb_vsid, u64 slb_esid), | ||
226 | TP_ARGS(slb_vsid, slb_esid), | ||
227 | |||
228 | TP_STRUCT__entry( | ||
229 | __field( u64, slb_vsid ) | ||
230 | __field( u64, slb_esid ) | ||
231 | ), | ||
232 | |||
233 | TP_fast_assign( | ||
234 | __entry->slb_vsid = slb_vsid; | ||
235 | __entry->slb_esid = slb_esid; | ||
236 | ), | ||
237 | |||
238 | TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid) | ||
239 | ); | ||
240 | |||
241 | TRACE_EVENT(kvm_exit, | ||
242 | TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), | ||
243 | TP_ARGS(exit_nr, vcpu), | ||
244 | |||
245 | TP_STRUCT__entry( | ||
246 | __field( unsigned int, exit_nr ) | ||
247 | __field( unsigned long, pc ) | ||
248 | __field( unsigned long, msr ) | ||
249 | __field( unsigned long, dar ) | ||
250 | __field( unsigned long, srr1 ) | ||
251 | __field( unsigned long, last_inst ) | ||
252 | ), | ||
253 | |||
254 | TP_fast_assign( | ||
255 | __entry->exit_nr = exit_nr; | ||
256 | __entry->pc = kvmppc_get_pc(vcpu); | ||
257 | __entry->dar = kvmppc_get_fault_dar(vcpu); | ||
258 | __entry->msr = vcpu->arch.shared->msr; | ||
259 | __entry->srr1 = vcpu->arch.shadow_srr1; | ||
260 | __entry->last_inst = vcpu->arch.last_inst; | ||
261 | ), | ||
262 | |||
263 | TP_printk("exit=%s" | ||
264 | " | pc=0x%lx" | ||
265 | " | msr=0x%lx" | ||
266 | " | dar=0x%lx" | ||
267 | " | srr1=0x%lx" | ||
268 | " | last_inst=0x%lx" | ||
269 | , | ||
270 | __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit), | ||
271 | __entry->pc, | ||
272 | __entry->msr, | ||
273 | __entry->dar, | ||
274 | __entry->srr1, | ||
275 | __entry->last_inst | ||
276 | ) | ||
277 | ); | ||
278 | |||
279 | TRACE_EVENT(kvm_unmap_hva, | ||
280 | TP_PROTO(unsigned long hva), | ||
281 | TP_ARGS(hva), | ||
282 | |||
283 | TP_STRUCT__entry( | ||
284 | __field( unsigned long, hva ) | ||
285 | ), | ||
286 | |||
287 | TP_fast_assign( | ||
288 | __entry->hva = hva; | ||
289 | ), | ||
290 | |||
291 | TP_printk("unmap hva 0x%lx\n", __entry->hva) | ||
292 | ); | ||
293 | |||
294 | #endif /* _TRACE_KVM_H */ | ||
295 | |||
296 | /* This part must be outside protection */ | ||
297 | #include <trace/define_trace.h> | ||