diff options
-rw-r--r-- | arch/powerpc/include/asm/hvcall.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 11 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_ppc.h | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/Makefile | 8 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 170 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 368 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 158 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 2 |
9 files changed, 718 insertions, 7 deletions
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index fd8201dddd4b..1c324ff55ea8 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h | |||
@@ -29,6 +29,10 @@ | |||
29 | #define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \ | 29 | #define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \ |
30 | is a good time to retry */ | 30 | is a good time to retry */ |
31 | #define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */ | 31 | #define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */ |
32 | |||
33 | /* Internal value used in book3s_hv kvm support; not returned to guests */ | ||
34 | #define H_TOO_HARD 9999 | ||
35 | |||
32 | #define H_HARDWARE -1 /* Hardware error */ | 36 | #define H_HARDWARE -1 /* Hardware error */ |
33 | #define H_FUNCTION -2 /* Function not supported */ | 37 | #define H_FUNCTION -2 /* Function not supported */ |
34 | #define H_PRIVILEGE -3 /* Caller not privileged */ | 38 | #define H_PRIVILEGE -3 /* Caller not privileged */ |
@@ -100,6 +104,7 @@ | |||
100 | #define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE | 104 | #define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE |
101 | #define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */ | 105 | #define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */ |
102 | #define H_ANDCOND (1UL<<(63-33)) | 106 | #define H_ANDCOND (1UL<<(63-33)) |
107 | #define H_LOCAL (1UL<<(63-35)) | ||
103 | #define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ | 108 | #define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ |
104 | #define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */ | 109 | #define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */ |
105 | #define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */ | 110 | #define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */ |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 4a3f790d5fc4..6ebf1721680a 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -59,6 +59,10 @@ struct kvm; | |||
59 | struct kvm_run; | 59 | struct kvm_run; |
60 | struct kvm_vcpu; | 60 | struct kvm_vcpu; |
61 | 61 | ||
62 | struct lppaca; | ||
63 | struct slb_shadow; | ||
64 | struct dtl; | ||
65 | |||
62 | struct kvm_vm_stat { | 66 | struct kvm_vm_stat { |
63 | u32 remote_tlb_flush; | 67 | u32 remote_tlb_flush; |
64 | }; | 68 | }; |
@@ -344,7 +348,14 @@ struct kvm_vcpu_arch { | |||
344 | u64 dec_expires; | 348 | u64 dec_expires; |
345 | unsigned long pending_exceptions; | 349 | unsigned long pending_exceptions; |
346 | u16 last_cpu; | 350 | u16 last_cpu; |
351 | u8 ceded; | ||
352 | u8 prodded; | ||
347 | u32 last_inst; | 353 | u32 last_inst; |
354 | |||
355 | struct lppaca *vpa; | ||
356 | struct slb_shadow *slb_shadow; | ||
357 | struct dtl *dtl; | ||
358 | struct dtl *dtl_end; | ||
348 | int trap; | 359 | int trap; |
349 | struct kvm_vcpu_arch_shared *shared; | 360 | struct kvm_vcpu_arch_shared *shared; |
350 | unsigned long magic_page_pa; /* phys addr to map the magic page to */ | 361 | unsigned long magic_page_pa; /* phys addr to map the magic page to */ |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 0dafd53c30ed..2afe92e6f625 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -118,6 +118,7 @@ extern long kvmppc_prepare_vrma(struct kvm *kvm, | |||
118 | struct kvm_userspace_memory_region *mem); | 118 | struct kvm_userspace_memory_region *mem); |
119 | extern void kvmppc_map_vrma(struct kvm *kvm, | 119 | extern void kvmppc_map_vrma(struct kvm *kvm, |
120 | struct kvm_userspace_memory_region *mem); | 120 | struct kvm_userspace_memory_region *mem); |
121 | extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); | ||
121 | extern int kvmppc_core_init_vm(struct kvm *kvm); | 122 | extern int kvmppc_core_init_vm(struct kvm *kvm); |
122 | extern void kvmppc_core_destroy_vm(struct kvm *kvm); | 123 | extern void kvmppc_core_destroy_vm(struct kvm *kvm); |
123 | extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 124 | extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 936267462cae..c70d106bf1a4 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -189,6 +189,7 @@ int main(void) | |||
189 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); | 189 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); |
190 | DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); | 190 | DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); |
191 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); | 191 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); |
192 | DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count)); | ||
192 | DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); | 193 | DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); |
193 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 194 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
194 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); | 195 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); |
@@ -459,6 +460,7 @@ int main(void) | |||
459 | DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); | 460 | DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); |
460 | DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); | 461 | DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); |
461 | DEFINE(VCPU_LPCR, offsetof(struct kvm_vcpu, arch.lpcr)); | 462 | DEFINE(VCPU_LPCR, offsetof(struct kvm_vcpu, arch.lpcr)); |
463 | DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa)); | ||
462 | DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); | 464 | DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); |
463 | DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); | 465 | DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); |
464 | DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); | 466 | DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 8a435a6da665..2ecffc0dc1bb 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -54,14 +54,17 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ | |||
54 | book3s_hv.o \ | 54 | book3s_hv.o \ |
55 | book3s_hv_interrupts.o \ | 55 | book3s_hv_interrupts.o \ |
56 | book3s_64_mmu_hv.o | 56 | book3s_64_mmu_hv.o |
57 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ | ||
58 | book3s_hv_rm_mmu.o | ||
57 | 59 | ||
58 | kvm-book3s_64-objs := \ | 60 | kvm-book3s_64-module-objs := \ |
59 | ../../../virt/kvm/kvm_main.o \ | 61 | ../../../virt/kvm/kvm_main.o \ |
60 | powerpc.o \ | 62 | powerpc.o \ |
61 | emulate.o \ | 63 | emulate.o \ |
62 | book3s.o \ | 64 | book3s.o \ |
63 | $(kvm-book3s_64-objs-y) | 65 | $(kvm-book3s_64-objs-y) |
64 | kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs) | 66 | |
67 | kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs) | ||
65 | 68 | ||
66 | kvm-book3s_32-objs := \ | 69 | kvm-book3s_32-objs := \ |
67 | $(common-objs-y) \ | 70 | $(common-objs-y) \ |
@@ -83,3 +86,4 @@ obj-$(CONFIG_KVM_E500) += kvm.o | |||
83 | obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o | 86 | obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o |
84 | obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o | 87 | obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o |
85 | 88 | ||
89 | obj-y += $(kvm-book3s_64-builtin-objs-y) | ||
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 60b7300568c8..af862c30b70e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -124,6 +124,158 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu) | |||
124 | vcpu->arch.last_inst); | 124 | vcpu->arch.last_inst); |
125 | } | 125 | } |
126 | 126 | ||
127 | struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) | ||
128 | { | ||
129 | int r; | ||
130 | struct kvm_vcpu *v, *ret = NULL; | ||
131 | |||
132 | mutex_lock(&kvm->lock); | ||
133 | kvm_for_each_vcpu(r, v, kvm) { | ||
134 | if (v->vcpu_id == id) { | ||
135 | ret = v; | ||
136 | break; | ||
137 | } | ||
138 | } | ||
139 | mutex_unlock(&kvm->lock); | ||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) | ||
144 | { | ||
145 | vpa->shared_proc = 1; | ||
146 | vpa->yield_count = 1; | ||
147 | } | ||
148 | |||
149 | static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, | ||
150 | unsigned long flags, | ||
151 | unsigned long vcpuid, unsigned long vpa) | ||
152 | { | ||
153 | struct kvm *kvm = vcpu->kvm; | ||
154 | unsigned long pg_index, ra, len; | ||
155 | unsigned long pg_offset; | ||
156 | void *va; | ||
157 | struct kvm_vcpu *tvcpu; | ||
158 | |||
159 | tvcpu = kvmppc_find_vcpu(kvm, vcpuid); | ||
160 | if (!tvcpu) | ||
161 | return H_PARAMETER; | ||
162 | |||
163 | flags >>= 63 - 18; | ||
164 | flags &= 7; | ||
165 | if (flags == 0 || flags == 4) | ||
166 | return H_PARAMETER; | ||
167 | if (flags < 4) { | ||
168 | if (vpa & 0x7f) | ||
169 | return H_PARAMETER; | ||
170 | /* registering new area; convert logical addr to real */ | ||
171 | pg_index = vpa >> kvm->arch.ram_porder; | ||
172 | pg_offset = vpa & (kvm->arch.ram_psize - 1); | ||
173 | if (pg_index >= kvm->arch.ram_npages) | ||
174 | return H_PARAMETER; | ||
175 | if (kvm->arch.ram_pginfo[pg_index].pfn == 0) | ||
176 | return H_PARAMETER; | ||
177 | ra = kvm->arch.ram_pginfo[pg_index].pfn << PAGE_SHIFT; | ||
178 | ra |= pg_offset; | ||
179 | va = __va(ra); | ||
180 | if (flags <= 1) | ||
181 | len = *(unsigned short *)(va + 4); | ||
182 | else | ||
183 | len = *(unsigned int *)(va + 4); | ||
184 | if (pg_offset + len > kvm->arch.ram_psize) | ||
185 | return H_PARAMETER; | ||
186 | switch (flags) { | ||
187 | case 1: /* register VPA */ | ||
188 | if (len < 640) | ||
189 | return H_PARAMETER; | ||
190 | tvcpu->arch.vpa = va; | ||
191 | init_vpa(vcpu, va); | ||
192 | break; | ||
193 | case 2: /* register DTL */ | ||
194 | if (len < 48) | ||
195 | return H_PARAMETER; | ||
196 | if (!tvcpu->arch.vpa) | ||
197 | return H_RESOURCE; | ||
198 | len -= len % 48; | ||
199 | tvcpu->arch.dtl = va; | ||
200 | tvcpu->arch.dtl_end = va + len; | ||
201 | break; | ||
202 | case 3: /* register SLB shadow buffer */ | ||
203 | if (len < 8) | ||
204 | return H_PARAMETER; | ||
205 | if (!tvcpu->arch.vpa) | ||
206 | return H_RESOURCE; | ||
207 | tvcpu->arch.slb_shadow = va; | ||
208 | len = (len - 16) / 16; | ||
209 | tvcpu->arch.slb_shadow = va; | ||
210 | break; | ||
211 | } | ||
212 | } else { | ||
213 | switch (flags) { | ||
214 | case 5: /* unregister VPA */ | ||
215 | if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl) | ||
216 | return H_RESOURCE; | ||
217 | tvcpu->arch.vpa = NULL; | ||
218 | break; | ||
219 | case 6: /* unregister DTL */ | ||
220 | tvcpu->arch.dtl = NULL; | ||
221 | break; | ||
222 | case 7: /* unregister SLB shadow buffer */ | ||
223 | tvcpu->arch.slb_shadow = NULL; | ||
224 | break; | ||
225 | } | ||
226 | } | ||
227 | return H_SUCCESS; | ||
228 | } | ||
229 | |||
230 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | ||
231 | { | ||
232 | unsigned long req = kvmppc_get_gpr(vcpu, 3); | ||
233 | unsigned long target, ret = H_SUCCESS; | ||
234 | struct kvm_vcpu *tvcpu; | ||
235 | |||
236 | switch (req) { | ||
237 | case H_CEDE: | ||
238 | vcpu->arch.shregs.msr |= MSR_EE; | ||
239 | vcpu->arch.ceded = 1; | ||
240 | smp_mb(); | ||
241 | if (!vcpu->arch.prodded) | ||
242 | kvmppc_vcpu_block(vcpu); | ||
243 | else | ||
244 | vcpu->arch.prodded = 0; | ||
245 | smp_mb(); | ||
246 | vcpu->arch.ceded = 0; | ||
247 | break; | ||
248 | case H_PROD: | ||
249 | target = kvmppc_get_gpr(vcpu, 4); | ||
250 | tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); | ||
251 | if (!tvcpu) { | ||
252 | ret = H_PARAMETER; | ||
253 | break; | ||
254 | } | ||
255 | tvcpu->arch.prodded = 1; | ||
256 | smp_mb(); | ||
257 | if (vcpu->arch.ceded) { | ||
258 | if (waitqueue_active(&vcpu->wq)) { | ||
259 | wake_up_interruptible(&vcpu->wq); | ||
260 | vcpu->stat.halt_wakeup++; | ||
261 | } | ||
262 | } | ||
263 | break; | ||
264 | case H_CONFER: | ||
265 | break; | ||
266 | case H_REGISTER_VPA: | ||
267 | ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), | ||
268 | kvmppc_get_gpr(vcpu, 5), | ||
269 | kvmppc_get_gpr(vcpu, 6)); | ||
270 | break; | ||
271 | default: | ||
272 | return RESUME_HOST; | ||
273 | } | ||
274 | kvmppc_set_gpr(vcpu, 3, ret); | ||
275 | vcpu->arch.hcall_needed = 0; | ||
276 | return RESUME_GUEST; | ||
277 | } | ||
278 | |||
127 | static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | 279 | static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, |
128 | struct task_struct *tsk) | 280 | struct task_struct *tsk) |
129 | { | 281 | { |
@@ -318,7 +470,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
318 | 470 | ||
319 | extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | 471 | extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
320 | 472 | ||
321 | int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | 473 | static int kvmppc_run_vcpu(struct kvm_run *run, struct kvm_vcpu *vcpu) |
322 | { | 474 | { |
323 | u64 now; | 475 | u64 now; |
324 | 476 | ||
@@ -370,6 +522,22 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
370 | return -EBUSY; | 522 | return -EBUSY; |
371 | } | 523 | } |
372 | 524 | ||
525 | int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
526 | { | ||
527 | int r; | ||
528 | |||
529 | do { | ||
530 | r = kvmppc_run_vcpu(run, vcpu); | ||
531 | |||
532 | if (run->exit_reason == KVM_EXIT_PAPR_HCALL && | ||
533 | !(vcpu->arch.shregs.msr & MSR_PR)) { | ||
534 | r = kvmppc_pseries_do_hcall(vcpu); | ||
535 | kvmppc_core_deliver_interrupts(vcpu); | ||
536 | } | ||
537 | } while (r == RESUME_GUEST); | ||
538 | return r; | ||
539 | } | ||
540 | |||
373 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 541 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
374 | struct kvm_userspace_memory_region *mem) | 542 | struct kvm_userspace_memory_region *mem) |
375 | { | 543 | { |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c new file mode 100644 index 000000000000..edb0aae901a3 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -0,0 +1,368 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/kvm.h> | ||
12 | #include <linux/kvm_host.h> | ||
13 | #include <linux/hugetlb.h> | ||
14 | |||
15 | #include <asm/tlbflush.h> | ||
16 | #include <asm/kvm_ppc.h> | ||
17 | #include <asm/kvm_book3s.h> | ||
18 | #include <asm/mmu-hash64.h> | ||
19 | #include <asm/hvcall.h> | ||
20 | #include <asm/synch.h> | ||
21 | #include <asm/ppc-opcode.h> | ||
22 | |||
23 | /* For now use fixed-size 16MB page table */ | ||
24 | #define HPT_ORDER 24 | ||
25 | #define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */ | ||
26 | #define HPT_HASH_MASK (HPT_NPTEG - 1) | ||
27 | |||
28 | #define HPTE_V_HVLOCK 0x40UL | ||
29 | |||
30 | static inline long lock_hpte(unsigned long *hpte, unsigned long bits) | ||
31 | { | ||
32 | unsigned long tmp, old; | ||
33 | |||
34 | asm volatile(" ldarx %0,0,%2\n" | ||
35 | " and. %1,%0,%3\n" | ||
36 | " bne 2f\n" | ||
37 | " ori %0,%0,%4\n" | ||
38 | " stdcx. %0,0,%2\n" | ||
39 | " beq+ 2f\n" | ||
40 | " li %1,%3\n" | ||
41 | "2: isync" | ||
42 | : "=&r" (tmp), "=&r" (old) | ||
43 | : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK) | ||
44 | : "cc", "memory"); | ||
45 | return old == 0; | ||
46 | } | ||
47 | |||
48 | long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | ||
49 | long pte_index, unsigned long pteh, unsigned long ptel) | ||
50 | { | ||
51 | unsigned long porder; | ||
52 | struct kvm *kvm = vcpu->kvm; | ||
53 | unsigned long i, lpn, pa; | ||
54 | unsigned long *hpte; | ||
55 | |||
56 | /* only handle 4k, 64k and 16M pages for now */ | ||
57 | porder = 12; | ||
58 | if (pteh & HPTE_V_LARGE) { | ||
59 | if ((ptel & 0xf000) == 0x1000) { | ||
60 | /* 64k page */ | ||
61 | porder = 16; | ||
62 | } else if ((ptel & 0xff000) == 0) { | ||
63 | /* 16M page */ | ||
64 | porder = 24; | ||
65 | /* lowest AVA bit must be 0 for 16M pages */ | ||
66 | if (pteh & 0x80) | ||
67 | return H_PARAMETER; | ||
68 | } else | ||
69 | return H_PARAMETER; | ||
70 | } | ||
71 | lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder; | ||
72 | if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder) | ||
73 | return H_PARAMETER; | ||
74 | pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT; | ||
75 | if (!pa) | ||
76 | return H_PARAMETER; | ||
77 | /* Check WIMG */ | ||
78 | if ((ptel & HPTE_R_WIMG) != HPTE_R_M && | ||
79 | (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M)) | ||
80 | return H_PARAMETER; | ||
81 | pteh &= ~0x60UL; | ||
82 | ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize); | ||
83 | ptel |= pa; | ||
84 | if (pte_index >= (HPT_NPTEG << 3)) | ||
85 | return H_PARAMETER; | ||
86 | if (likely((flags & H_EXACT) == 0)) { | ||
87 | pte_index &= ~7UL; | ||
88 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | ||
89 | for (i = 0; ; ++i) { | ||
90 | if (i == 8) | ||
91 | return H_PTEG_FULL; | ||
92 | if ((*hpte & HPTE_V_VALID) == 0 && | ||
93 | lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) | ||
94 | break; | ||
95 | hpte += 2; | ||
96 | } | ||
97 | } else { | ||
98 | i = 0; | ||
99 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | ||
100 | if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) | ||
101 | return H_PTEG_FULL; | ||
102 | } | ||
103 | hpte[1] = ptel; | ||
104 | eieio(); | ||
105 | hpte[0] = pteh; | ||
106 | asm volatile("ptesync" : : : "memory"); | ||
107 | atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt); | ||
108 | vcpu->arch.gpr[4] = pte_index + i; | ||
109 | return H_SUCCESS; | ||
110 | } | ||
111 | |||
112 | static unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, | ||
113 | unsigned long pte_index) | ||
114 | { | ||
115 | unsigned long rb, va_low; | ||
116 | |||
117 | rb = (v & ~0x7fUL) << 16; /* AVA field */ | ||
118 | va_low = pte_index >> 3; | ||
119 | if (v & HPTE_V_SECONDARY) | ||
120 | va_low = ~va_low; | ||
121 | /* xor vsid from AVA */ | ||
122 | if (!(v & HPTE_V_1TB_SEG)) | ||
123 | va_low ^= v >> 12; | ||
124 | else | ||
125 | va_low ^= v >> 24; | ||
126 | va_low &= 0x7ff; | ||
127 | if (v & HPTE_V_LARGE) { | ||
128 | rb |= 1; /* L field */ | ||
129 | if (r & 0xff000) { | ||
130 | /* non-16MB large page, must be 64k */ | ||
131 | /* (masks depend on page size) */ | ||
132 | rb |= 0x1000; /* page encoding in LP field */ | ||
133 | rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ | ||
134 | rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ | ||
135 | } | ||
136 | } else { | ||
137 | /* 4kB page */ | ||
138 | rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */ | ||
139 | } | ||
140 | rb |= (v >> 54) & 0x300; /* B field */ | ||
141 | return rb; | ||
142 | } | ||
143 | |||
144 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) | ||
145 | |||
146 | static inline int try_lock_tlbie(unsigned int *lock) | ||
147 | { | ||
148 | unsigned int tmp, old; | ||
149 | unsigned int token = LOCK_TOKEN; | ||
150 | |||
151 | asm volatile("1:lwarx %1,0,%2\n" | ||
152 | " cmpwi cr0,%1,0\n" | ||
153 | " bne 2f\n" | ||
154 | " stwcx. %3,0,%2\n" | ||
155 | " bne- 1b\n" | ||
156 | " isync\n" | ||
157 | "2:" | ||
158 | : "=&r" (tmp), "=&r" (old) | ||
159 | : "r" (lock), "r" (token) | ||
160 | : "cc", "memory"); | ||
161 | return old == 0; | ||
162 | } | ||
163 | |||
164 | long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, | ||
165 | unsigned long pte_index, unsigned long avpn, | ||
166 | unsigned long va) | ||
167 | { | ||
168 | struct kvm *kvm = vcpu->kvm; | ||
169 | unsigned long *hpte; | ||
170 | unsigned long v, r, rb; | ||
171 | |||
172 | if (pte_index >= (HPT_NPTEG << 3)) | ||
173 | return H_PARAMETER; | ||
174 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | ||
175 | while (!lock_hpte(hpte, HPTE_V_HVLOCK)) | ||
176 | cpu_relax(); | ||
177 | if ((hpte[0] & HPTE_V_VALID) == 0 || | ||
178 | ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) || | ||
179 | ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) { | ||
180 | hpte[0] &= ~HPTE_V_HVLOCK; | ||
181 | return H_NOT_FOUND; | ||
182 | } | ||
183 | if (atomic_read(&kvm->online_vcpus) == 1) | ||
184 | flags |= H_LOCAL; | ||
185 | vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK; | ||
186 | vcpu->arch.gpr[5] = r = hpte[1]; | ||
187 | rb = compute_tlbie_rb(v, r, pte_index); | ||
188 | hpte[0] = 0; | ||
189 | if (!(flags & H_LOCAL)) { | ||
190 | while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) | ||
191 | cpu_relax(); | ||
192 | asm volatile("ptesync" : : : "memory"); | ||
193 | asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" | ||
194 | : : "r" (rb), "r" (kvm->arch.lpid)); | ||
195 | asm volatile("ptesync" : : : "memory"); | ||
196 | kvm->arch.tlbie_lock = 0; | ||
197 | } else { | ||
198 | asm volatile("ptesync" : : : "memory"); | ||
199 | asm volatile("tlbiel %0" : : "r" (rb)); | ||
200 | asm volatile("ptesync" : : : "memory"); | ||
201 | } | ||
202 | return H_SUCCESS; | ||
203 | } | ||
204 | |||
205 | long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | ||
206 | { | ||
207 | struct kvm *kvm = vcpu->kvm; | ||
208 | unsigned long *args = &vcpu->arch.gpr[4]; | ||
209 | unsigned long *hp, tlbrb[4]; | ||
210 | long int i, found; | ||
211 | long int n_inval = 0; | ||
212 | unsigned long flags, req, pte_index; | ||
213 | long int local = 0; | ||
214 | long int ret = H_SUCCESS; | ||
215 | |||
216 | if (atomic_read(&kvm->online_vcpus) == 1) | ||
217 | local = 1; | ||
218 | for (i = 0; i < 4; ++i) { | ||
219 | pte_index = args[i * 2]; | ||
220 | flags = pte_index >> 56; | ||
221 | pte_index &= ((1ul << 56) - 1); | ||
222 | req = flags >> 6; | ||
223 | flags &= 3; | ||
224 | if (req == 3) | ||
225 | break; | ||
226 | if (req != 1 || flags == 3 || | ||
227 | pte_index >= (HPT_NPTEG << 3)) { | ||
228 | /* parameter error */ | ||
229 | args[i * 2] = ((0xa0 | flags) << 56) + pte_index; | ||
230 | ret = H_PARAMETER; | ||
231 | break; | ||
232 | } | ||
233 | hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | ||
234 | while (!lock_hpte(hp, HPTE_V_HVLOCK)) | ||
235 | cpu_relax(); | ||
236 | found = 0; | ||
237 | if (hp[0] & HPTE_V_VALID) { | ||
238 | switch (flags & 3) { | ||
239 | case 0: /* absolute */ | ||
240 | found = 1; | ||
241 | break; | ||
242 | case 1: /* andcond */ | ||
243 | if (!(hp[0] & args[i * 2 + 1])) | ||
244 | found = 1; | ||
245 | break; | ||
246 | case 2: /* AVPN */ | ||
247 | if ((hp[0] & ~0x7fUL) == args[i * 2 + 1]) | ||
248 | found = 1; | ||
249 | break; | ||
250 | } | ||
251 | } | ||
252 | if (!found) { | ||
253 | hp[0] &= ~HPTE_V_HVLOCK; | ||
254 | args[i * 2] = ((0x90 | flags) << 56) + pte_index; | ||
255 | continue; | ||
256 | } | ||
257 | /* insert R and C bits from PTE */ | ||
258 | flags |= (hp[1] >> 5) & 0x0c; | ||
259 | args[i * 2] = ((0x80 | flags) << 56) + pte_index; | ||
260 | tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index); | ||
261 | hp[0] = 0; | ||
262 | } | ||
263 | if (n_inval == 0) | ||
264 | return ret; | ||
265 | |||
266 | if (!local) { | ||
267 | while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) | ||
268 | cpu_relax(); | ||
269 | asm volatile("ptesync" : : : "memory"); | ||
270 | for (i = 0; i < n_inval; ++i) | ||
271 | asm volatile(PPC_TLBIE(%1,%0) | ||
272 | : : "r" (tlbrb[i]), "r" (kvm->arch.lpid)); | ||
273 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); | ||
274 | kvm->arch.tlbie_lock = 0; | ||
275 | } else { | ||
276 | asm volatile("ptesync" : : : "memory"); | ||
277 | for (i = 0; i < n_inval; ++i) | ||
278 | asm volatile("tlbiel %0" : : "r" (tlbrb[i])); | ||
279 | asm volatile("ptesync" : : : "memory"); | ||
280 | } | ||
281 | return ret; | ||
282 | } | ||
283 | |||
284 | long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, | ||
285 | unsigned long pte_index, unsigned long avpn, | ||
286 | unsigned long va) | ||
287 | { | ||
288 | struct kvm *kvm = vcpu->kvm; | ||
289 | unsigned long *hpte; | ||
290 | unsigned long v, r, rb; | ||
291 | |||
292 | if (pte_index >= (HPT_NPTEG << 3)) | ||
293 | return H_PARAMETER; | ||
294 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | ||
295 | while (!lock_hpte(hpte, HPTE_V_HVLOCK)) | ||
296 | cpu_relax(); | ||
297 | if ((hpte[0] & HPTE_V_VALID) == 0 || | ||
298 | ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) { | ||
299 | hpte[0] &= ~HPTE_V_HVLOCK; | ||
300 | return H_NOT_FOUND; | ||
301 | } | ||
302 | if (atomic_read(&kvm->online_vcpus) == 1) | ||
303 | flags |= H_LOCAL; | ||
304 | v = hpte[0]; | ||
305 | r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | | ||
306 | HPTE_R_KEY_HI | HPTE_R_KEY_LO); | ||
307 | r |= (flags << 55) & HPTE_R_PP0; | ||
308 | r |= (flags << 48) & HPTE_R_KEY_HI; | ||
309 | r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); | ||
310 | rb = compute_tlbie_rb(v, r, pte_index); | ||
311 | hpte[0] = v & ~HPTE_V_VALID; | ||
312 | if (!(flags & H_LOCAL)) { | ||
313 | while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) | ||
314 | cpu_relax(); | ||
315 | asm volatile("ptesync" : : : "memory"); | ||
316 | asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" | ||
317 | : : "r" (rb), "r" (kvm->arch.lpid)); | ||
318 | asm volatile("ptesync" : : : "memory"); | ||
319 | kvm->arch.tlbie_lock = 0; | ||
320 | } else { | ||
321 | asm volatile("ptesync" : : : "memory"); | ||
322 | asm volatile("tlbiel %0" : : "r" (rb)); | ||
323 | asm volatile("ptesync" : : : "memory"); | ||
324 | } | ||
325 | hpte[1] = r; | ||
326 | eieio(); | ||
327 | hpte[0] = v & ~HPTE_V_HVLOCK; | ||
328 | asm volatile("ptesync" : : : "memory"); | ||
329 | return H_SUCCESS; | ||
330 | } | ||
331 | |||
332 | static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr) | ||
333 | { | ||
334 | long int i; | ||
335 | unsigned long offset, rpn; | ||
336 | |||
337 | offset = realaddr & (kvm->arch.ram_psize - 1); | ||
338 | rpn = (realaddr - offset) >> PAGE_SHIFT; | ||
339 | for (i = 0; i < kvm->arch.ram_npages; ++i) | ||
340 | if (rpn == kvm->arch.ram_pginfo[i].pfn) | ||
341 | return (i << PAGE_SHIFT) + offset; | ||
342 | return HPTE_R_RPN; /* all 1s in the RPN field */ | ||
343 | } | ||
344 | |||
345 | long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, | ||
346 | unsigned long pte_index) | ||
347 | { | ||
348 | struct kvm *kvm = vcpu->kvm; | ||
349 | unsigned long *hpte, r; | ||
350 | int i, n = 1; | ||
351 | |||
352 | if (pte_index >= (HPT_NPTEG << 3)) | ||
353 | return H_PARAMETER; | ||
354 | if (flags & H_READ_4) { | ||
355 | pte_index &= ~3; | ||
356 | n = 4; | ||
357 | } | ||
358 | for (i = 0; i < n; ++i, ++pte_index) { | ||
359 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | ||
360 | r = hpte[1]; | ||
361 | if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID)) | ||
362 | r = reverse_xlate(kvm, r & HPTE_R_RPN) | | ||
363 | (r & ~HPTE_R_RPN); | ||
364 | vcpu->arch.gpr[4 + i * 2] = hpte[0]; | ||
365 | vcpu->arch.gpr[5 + i * 2] = r; | ||
366 | } | ||
367 | return H_SUCCESS; | ||
368 | } | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 9af264840b98..319ff63b1f31 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -166,6 +166,14 @@ kvmppc_hv_entry: | |||
166 | /* Save R1 in the PACA */ | 166 | /* Save R1 in the PACA */ |
167 | std r1, HSTATE_HOST_R1(r13) | 167 | std r1, HSTATE_HOST_R1(r13) |
168 | 168 | ||
169 | /* Increment yield count if they have a VPA */ | ||
170 | ld r3, VCPU_VPA(r4) | ||
171 | cmpdi r3, 0 | ||
172 | beq 25f | ||
173 | lwz r5, LPPACA_YIELDCOUNT(r3) | ||
174 | addi r5, r5, 1 | ||
175 | stw r5, LPPACA_YIELDCOUNT(r3) | ||
176 | 25: | ||
169 | /* Load up DAR and DSISR */ | 177 | /* Load up DAR and DSISR */ |
170 | ld r5, VCPU_DAR(r4) | 178 | ld r5, VCPU_DAR(r4) |
171 | lwz r6, VCPU_DSISR(r4) | 179 | lwz r6, VCPU_DSISR(r4) |
@@ -401,6 +409,10 @@ kvmppc_interrupt: | |||
401 | cmpwi r3,0 | 409 | cmpwi r3,0 |
402 | bge ignore_hdec | 410 | bge ignore_hdec |
403 | 2: | 411 | 2: |
412 | /* See if this is something we can handle in real mode */ | ||
413 | cmpwi r12,BOOK3S_INTERRUPT_SYSCALL | ||
414 | beq hcall_try_real_mode | ||
415 | hcall_real_cont: | ||
404 | 416 | ||
405 | /* Check for mediated interrupts (could be done earlier really ...) */ | 417 | /* Check for mediated interrupts (could be done earlier really ...) */ |
406 | cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL | 418 | cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL |
@@ -579,13 +591,28 @@ hdec_soon: | |||
579 | std r5, VCPU_SPRG2(r9) | 591 | std r5, VCPU_SPRG2(r9) |
580 | std r6, VCPU_SPRG3(r9) | 592 | std r6, VCPU_SPRG3(r9) |
581 | 593 | ||
582 | /* Save PMU registers */ | 594 | /* Increment yield count if they have a VPA */ |
595 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ | ||
596 | cmpdi r8, 0 | ||
597 | beq 25f | ||
598 | lwz r3, LPPACA_YIELDCOUNT(r8) | ||
599 | addi r3, r3, 1 | ||
600 | stw r3, LPPACA_YIELDCOUNT(r8) | ||
601 | 25: | ||
602 | /* Save PMU registers if requested */ | ||
603 | /* r8 and cr0.eq are live here */ | ||
583 | li r3, 1 | 604 | li r3, 1 |
584 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | 605 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ |
585 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ | 606 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ |
586 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | 607 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ |
587 | isync | 608 | isync |
588 | mfspr r5, SPRN_MMCR1 | 609 | beq 21f /* if no VPA, save PMU stuff anyway */ |
610 | lbz r7, LPPACA_PMCINUSE(r8) | ||
611 | cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ | ||
612 | bne 21f | ||
613 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ | ||
614 | b 22f | ||
615 | 21: mfspr r5, SPRN_MMCR1 | ||
589 | mfspr r6, SPRN_MMCRA | 616 | mfspr r6, SPRN_MMCRA |
590 | std r4, VCPU_MMCR(r9) | 617 | std r4, VCPU_MMCR(r9) |
591 | std r5, VCPU_MMCR + 8(r9) | 618 | std r5, VCPU_MMCR + 8(r9) |
@@ -676,6 +703,125 @@ hdec_soon: | |||
676 | mfspr r7,SPRN_HDSISR | 703 | mfspr r7,SPRN_HDSISR |
677 | b 7b | 704 | b 7b |
678 | 705 | ||
706 | /* | ||
707 | * Try to handle an hcall in real mode. | ||
708 | * Returns to the guest if we handle it, or continues on up to | ||
709 | * the kernel if we can't (i.e. if we don't have a handler for | ||
710 | * it, or if the handler returns H_TOO_HARD). | ||
711 | */ | ||
712 | .globl hcall_try_real_mode | ||
713 | hcall_try_real_mode: | ||
714 | ld r3,VCPU_GPR(r3)(r9) | ||
715 | andi. r0,r11,MSR_PR | ||
716 | bne hcall_real_cont | ||
717 | clrrdi r3,r3,2 | ||
718 | cmpldi r3,hcall_real_table_end - hcall_real_table | ||
719 | bge hcall_real_cont | ||
720 | LOAD_REG_ADDR(r4, hcall_real_table) | ||
721 | lwzx r3,r3,r4 | ||
722 | cmpwi r3,0 | ||
723 | beq hcall_real_cont | ||
724 | add r3,r3,r4 | ||
725 | mtctr r3 | ||
726 | mr r3,r9 /* get vcpu pointer */ | ||
727 | ld r4,VCPU_GPR(r4)(r9) | ||
728 | bctrl | ||
729 | cmpdi r3,H_TOO_HARD | ||
730 | beq hcall_real_fallback | ||
731 | ld r4,HSTATE_KVM_VCPU(r13) | ||
732 | std r3,VCPU_GPR(r3)(r4) | ||
733 | ld r10,VCPU_PC(r4) | ||
734 | ld r11,VCPU_MSR(r4) | ||
735 | b fast_guest_return | ||
736 | |||
737 | /* We've attempted a real mode hcall, but it's punted it back | ||
738 | * to userspace. We need to restore some clobbered volatiles | ||
739 | * before resuming the pass-it-to-qemu path */ | ||
740 | hcall_real_fallback: | ||
741 | li r12,BOOK3S_INTERRUPT_SYSCALL | ||
742 | ld r9, HSTATE_KVM_VCPU(r13) | ||
743 | ld r11, VCPU_MSR(r9) | ||
744 | |||
745 | b hcall_real_cont | ||
746 | |||
747 | .globl hcall_real_table | ||
748 | hcall_real_table: | ||
749 | .long 0 /* 0 - unused */ | ||
750 | .long .kvmppc_h_remove - hcall_real_table | ||
751 | .long .kvmppc_h_enter - hcall_real_table | ||
752 | .long .kvmppc_h_read - hcall_real_table | ||
753 | .long 0 /* 0x10 - H_CLEAR_MOD */ | ||
754 | .long 0 /* 0x14 - H_CLEAR_REF */ | ||
755 | .long .kvmppc_h_protect - hcall_real_table | ||
756 | .long 0 /* 0x1c - H_GET_TCE */ | ||
757 | .long 0 /* 0x20 - H_SET_TCE */ | ||
758 | .long 0 /* 0x24 - H_SET_SPRG0 */ | ||
759 | .long .kvmppc_h_set_dabr - hcall_real_table | ||
760 | .long 0 /* 0x2c */ | ||
761 | .long 0 /* 0x30 */ | ||
762 | .long 0 /* 0x34 */ | ||
763 | .long 0 /* 0x38 */ | ||
764 | .long 0 /* 0x3c */ | ||
765 | .long 0 /* 0x40 */ | ||
766 | .long 0 /* 0x44 */ | ||
767 | .long 0 /* 0x48 */ | ||
768 | .long 0 /* 0x4c */ | ||
769 | .long 0 /* 0x50 */ | ||
770 | .long 0 /* 0x54 */ | ||
771 | .long 0 /* 0x58 */ | ||
772 | .long 0 /* 0x5c */ | ||
773 | .long 0 /* 0x60 */ | ||
774 | .long 0 /* 0x64 */ | ||
775 | .long 0 /* 0x68 */ | ||
776 | .long 0 /* 0x6c */ | ||
777 | .long 0 /* 0x70 */ | ||
778 | .long 0 /* 0x74 */ | ||
779 | .long 0 /* 0x78 */ | ||
780 | .long 0 /* 0x7c */ | ||
781 | .long 0 /* 0x80 */ | ||
782 | .long 0 /* 0x84 */ | ||
783 | .long 0 /* 0x88 */ | ||
784 | .long 0 /* 0x8c */ | ||
785 | .long 0 /* 0x90 */ | ||
786 | .long 0 /* 0x94 */ | ||
787 | .long 0 /* 0x98 */ | ||
788 | .long 0 /* 0x9c */ | ||
789 | .long 0 /* 0xa0 */ | ||
790 | .long 0 /* 0xa4 */ | ||
791 | .long 0 /* 0xa8 */ | ||
792 | .long 0 /* 0xac */ | ||
793 | .long 0 /* 0xb0 */ | ||
794 | .long 0 /* 0xb4 */ | ||
795 | .long 0 /* 0xb8 */ | ||
796 | .long 0 /* 0xbc */ | ||
797 | .long 0 /* 0xc0 */ | ||
798 | .long 0 /* 0xc4 */ | ||
799 | .long 0 /* 0xc8 */ | ||
800 | .long 0 /* 0xcc */ | ||
801 | .long 0 /* 0xd0 */ | ||
802 | .long 0 /* 0xd4 */ | ||
803 | .long 0 /* 0xd8 */ | ||
804 | .long 0 /* 0xdc */ | ||
805 | .long 0 /* 0xe0 */ | ||
806 | .long 0 /* 0xe4 */ | ||
807 | .long 0 /* 0xe8 */ | ||
808 | .long 0 /* 0xec */ | ||
809 | .long 0 /* 0xf0 */ | ||
810 | .long 0 /* 0xf4 */ | ||
811 | .long 0 /* 0xf8 */ | ||
812 | .long 0 /* 0xfc */ | ||
813 | .long 0 /* 0x100 */ | ||
814 | .long 0 /* 0x104 */ | ||
815 | .long 0 /* 0x108 */ | ||
816 | .long 0 /* 0x10c */ | ||
817 | .long 0 /* 0x110 */ | ||
818 | .long 0 /* 0x114 */ | ||
819 | .long 0 /* 0x118 */ | ||
820 | .long 0 /* 0x11c */ | ||
821 | .long 0 /* 0x120 */ | ||
822 | .long .kvmppc_h_bulk_remove - hcall_real_table | ||
823 | hcall_real_table_end: | ||
824 | |||
679 | ignore_hdec: | 825 | ignore_hdec: |
680 | mr r4,r9 | 826 | mr r4,r9 |
681 | b fast_guest_return | 827 | b fast_guest_return |
@@ -688,10 +834,16 @@ bounce_ext_interrupt: | |||
688 | LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME); | 834 | LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME); |
689 | b fast_guest_return | 835 | b fast_guest_return |
690 | 836 | ||
837 | _GLOBAL(kvmppc_h_set_dabr) | ||
838 | std r4,VCPU_DABR(r3) | ||
839 | mtspr SPRN_DABR,r4 | ||
840 | li r3,0 | ||
841 | blr | ||
842 | |||
691 | /* | 843 | /* |
692 | * Save away FP, VMX and VSX registers. | 844 | * Save away FP, VMX and VSX registers. |
693 | * r3 = vcpu pointer | 845 | * r3 = vcpu pointer |
694 | */ | 846 | */ |
695 | _GLOBAL(kvmppc_save_fp) | 847 | _GLOBAL(kvmppc_save_fp) |
696 | mfmsr r9 | 848 | mfmsr r9 |
697 | ori r8,r9,MSR_FP | 849 | ori r8,r9,MSR_FP |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 3a4f379ee70f..6fc9ee499b61 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -42,7 +42,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | |||
42 | return !(v->arch.shared->msr & MSR_WE) || | 42 | return !(v->arch.shared->msr & MSR_WE) || |
43 | !!(v->arch.pending_exceptions); | 43 | !!(v->arch.pending_exceptions); |
44 | #else | 44 | #else |
45 | return 1; | 45 | return !(v->arch.ceded) || !!(v->arch.pending_exceptions); |
46 | #endif | 46 | #endif |
47 | } | 47 | } |
48 | 48 | ||