aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s.c')
-rw-r--r--arch/powerpc/kvm/book3s.c272
1 files changed, 181 insertions, 91 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index a3cef30d1d42..e316847c08c0 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -17,6 +17,7 @@
17#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include "trace.h"
20 21
21#include <asm/reg.h> 22#include <asm/reg.h>
22#include <asm/cputable.h> 23#include <asm/cputable.h>
@@ -35,7 +36,6 @@
35#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 36#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 37
37/* #define EXIT_DEBUG */ 38/* #define EXIT_DEBUG */
38/* #define EXIT_DEBUG_SIMPLE */
39/* #define DEBUG_EXT */ 39/* #define DEBUG_EXT */
40 40
41static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 41static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
@@ -105,65 +105,71 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
105 kvmppc_giveup_ext(vcpu, MSR_VSX); 105 kvmppc_giveup_ext(vcpu, MSR_VSX);
106} 106}
107 107
108#if defined(EXIT_DEBUG)
109static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
110{
111 u64 jd = mftb() - vcpu->arch.dec_jiffies;
112 return vcpu->arch.dec - jd;
113}
114#endif
115
116static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) 108static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
117{ 109{
118 vcpu->arch.shadow_msr = vcpu->arch.msr; 110 ulong smsr = vcpu->arch.shared->msr;
111
119 /* Guest MSR values */ 112 /* Guest MSR values */
120 vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | 113 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
121 MSR_BE | MSR_DE;
122 /* Process MSR values */ 114 /* Process MSR values */
123 vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | 115 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
124 MSR_EE;
125 /* External providers the guest reserved */ 116 /* External providers the guest reserved */
126 vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); 117 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
127 /* 64-bit Process MSR values */ 118 /* 64-bit Process MSR values */
128#ifdef CONFIG_PPC_BOOK3S_64 119#ifdef CONFIG_PPC_BOOK3S_64
129 vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; 120 smsr |= MSR_ISF | MSR_HV;
130#endif 121#endif
122 vcpu->arch.shadow_msr = smsr;
131} 123}
132 124
133void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 125void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
134{ 126{
135 ulong old_msr = vcpu->arch.msr; 127 ulong old_msr = vcpu->arch.shared->msr;
136 128
137#ifdef EXIT_DEBUG 129#ifdef EXIT_DEBUG
138 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); 130 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
139#endif 131#endif
140 132
141 msr &= to_book3s(vcpu)->msr_mask; 133 msr &= to_book3s(vcpu)->msr_mask;
142 vcpu->arch.msr = msr; 134 vcpu->arch.shared->msr = msr;
143 kvmppc_recalc_shadow_msr(vcpu); 135 kvmppc_recalc_shadow_msr(vcpu);
144 136
145 if (msr & (MSR_WE|MSR_POW)) { 137 if (msr & MSR_POW) {
146 if (!vcpu->arch.pending_exceptions) { 138 if (!vcpu->arch.pending_exceptions) {
147 kvm_vcpu_block(vcpu); 139 kvm_vcpu_block(vcpu);
148 vcpu->stat.halt_wakeup++; 140 vcpu->stat.halt_wakeup++;
141
142 /* Unset POW bit after we woke up */
143 msr &= ~MSR_POW;
144 vcpu->arch.shared->msr = msr;
149 } 145 }
150 } 146 }
151 147
152 if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) != 148 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
153 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { 149 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
154 kvmppc_mmu_flush_segments(vcpu); 150 kvmppc_mmu_flush_segments(vcpu);
155 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 151 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
152
153 /* Preload magic page segment when in kernel mode */
154 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
155 struct kvm_vcpu_arch *a = &vcpu->arch;
156
157 if (msr & MSR_DR)
158 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
159 else
160 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
161 }
156 } 162 }
157 163
158 /* Preload FPU if it's enabled */ 164 /* Preload FPU if it's enabled */
159 if (vcpu->arch.msr & MSR_FP) 165 if (vcpu->arch.shared->msr & MSR_FP)
160 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 166 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
161} 167}
162 168
163void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 169void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
164{ 170{
165 vcpu->arch.srr0 = kvmppc_get_pc(vcpu); 171 vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
166 vcpu->arch.srr1 = vcpu->arch.msr | flags; 172 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
167 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); 173 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
168 vcpu->arch.mmu.reset_msr(vcpu); 174 vcpu->arch.mmu.reset_msr(vcpu);
169} 175}
@@ -180,6 +186,7 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
180 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; 186 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
181 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; 187 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
182 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; 188 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
189 case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
183 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; 190 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
184 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; 191 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
185 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; 192 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
@@ -199,6 +206,9 @@ static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
199{ 206{
200 clear_bit(kvmppc_book3s_vec2irqprio(vec), 207 clear_bit(kvmppc_book3s_vec2irqprio(vec),
201 &vcpu->arch.pending_exceptions); 208 &vcpu->arch.pending_exceptions);
209
210 if (!vcpu->arch.pending_exceptions)
211 vcpu->arch.shared->int_pending = 0;
202} 212}
203 213
204void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) 214void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
@@ -237,13 +247,19 @@ void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
237void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 247void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
238 struct kvm_interrupt *irq) 248 struct kvm_interrupt *irq)
239{ 249{
240 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 250 unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
251
252 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
253 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
254
255 kvmppc_book3s_queue_irqprio(vcpu, vec);
241} 256}
242 257
243void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, 258void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
244 struct kvm_interrupt *irq) 259 struct kvm_interrupt *irq)
245{ 260{
246 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 261 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
262 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
247} 263}
248 264
249int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) 265int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
@@ -251,14 +267,29 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
251 int deliver = 1; 267 int deliver = 1;
252 int vec = 0; 268 int vec = 0;
253 ulong flags = 0ULL; 269 ulong flags = 0ULL;
270 ulong crit_raw = vcpu->arch.shared->critical;
271 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
272 bool crit;
273
274 /* Truncate crit indicators in 32 bit mode */
275 if (!(vcpu->arch.shared->msr & MSR_SF)) {
276 crit_raw &= 0xffffffff;
277 crit_r1 &= 0xffffffff;
278 }
279
280 /* Critical section when crit == r1 */
281 crit = (crit_raw == crit_r1);
282 /* ... and we're in supervisor mode */
283 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
254 284
255 switch (priority) { 285 switch (priority) {
256 case BOOK3S_IRQPRIO_DECREMENTER: 286 case BOOK3S_IRQPRIO_DECREMENTER:
257 deliver = vcpu->arch.msr & MSR_EE; 287 deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
258 vec = BOOK3S_INTERRUPT_DECREMENTER; 288 vec = BOOK3S_INTERRUPT_DECREMENTER;
259 break; 289 break;
260 case BOOK3S_IRQPRIO_EXTERNAL: 290 case BOOK3S_IRQPRIO_EXTERNAL:
261 deliver = vcpu->arch.msr & MSR_EE; 291 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
292 deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
262 vec = BOOK3S_INTERRUPT_EXTERNAL; 293 vec = BOOK3S_INTERRUPT_EXTERNAL;
263 break; 294 break;
264 case BOOK3S_IRQPRIO_SYSTEM_RESET: 295 case BOOK3S_IRQPRIO_SYSTEM_RESET:
@@ -320,9 +351,27 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
320 return deliver; 351 return deliver;
321} 352}
322 353
354/*
355 * This function determines if an irqprio should be cleared once issued.
356 */
357static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
358{
359 switch (priority) {
360 case BOOK3S_IRQPRIO_DECREMENTER:
361 /* DEC interrupts get cleared by mtdec */
362 return false;
363 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
364 /* External interrupts get cleared by userspace */
365 return false;
366 }
367
368 return true;
369}
370
323void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) 371void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
324{ 372{
325 unsigned long *pending = &vcpu->arch.pending_exceptions; 373 unsigned long *pending = &vcpu->arch.pending_exceptions;
374 unsigned long old_pending = vcpu->arch.pending_exceptions;
326 unsigned int priority; 375 unsigned int priority;
327 376
328#ifdef EXIT_DEBUG 377#ifdef EXIT_DEBUG
@@ -332,8 +381,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
332 priority = __ffs(*pending); 381 priority = __ffs(*pending);
333 while (priority < BOOK3S_IRQPRIO_MAX) { 382 while (priority < BOOK3S_IRQPRIO_MAX) {
334 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && 383 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
335 (priority != BOOK3S_IRQPRIO_DECREMENTER)) { 384 clear_irqprio(vcpu, priority)) {
336 /* DEC interrupts get cleared by mtdec */
337 clear_bit(priority, &vcpu->arch.pending_exceptions); 385 clear_bit(priority, &vcpu->arch.pending_exceptions);
338 break; 386 break;
339 } 387 }
@@ -342,6 +390,12 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
342 BITS_PER_BYTE * sizeof(*pending), 390 BITS_PER_BYTE * sizeof(*pending),
343 priority + 1); 391 priority + 1);
344 } 392 }
393
394 /* Tell the guest about our interrupt status */
395 if (*pending)
396 vcpu->arch.shared->int_pending = 1;
397 else if (old_pending)
398 vcpu->arch.shared->int_pending = 0;
345} 399}
346 400
347void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) 401void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
@@ -398,6 +452,25 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
398 } 452 }
399} 453}
400 454
455pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
456{
457 ulong mp_pa = vcpu->arch.magic_page_pa;
458
459 /* Magic page override */
460 if (unlikely(mp_pa) &&
461 unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
462 ((mp_pa & PAGE_MASK) & KVM_PAM))) {
463 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
464 pfn_t pfn;
465
466 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
467 get_page(pfn_to_page(pfn));
468 return pfn;
469 }
470
471 return gfn_to_pfn(vcpu->kvm, gfn);
472}
473
401/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To 474/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
402 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to 475 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
403 * emulate 32 bytes dcbz length. 476 * emulate 32 bytes dcbz length.
@@ -415,8 +488,10 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
415 int i; 488 int i;
416 489
417 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); 490 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
418 if (is_error_page(hpage)) 491 if (is_error_page(hpage)) {
492 kvm_release_page_clean(hpage);
419 return; 493 return;
494 }
420 495
421 hpage_offset = pte->raddr & ~PAGE_MASK; 496 hpage_offset = pte->raddr & ~PAGE_MASK;
422 hpage_offset &= ~0xFFFULL; 497 hpage_offset &= ~0xFFFULL;
@@ -437,14 +512,14 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
437static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 512static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
438 struct kvmppc_pte *pte) 513 struct kvmppc_pte *pte)
439{ 514{
440 int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR)); 515 int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
441 int r; 516 int r;
442 517
443 if (relocated) { 518 if (relocated) {
444 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); 519 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
445 } else { 520 } else {
446 pte->eaddr = eaddr; 521 pte->eaddr = eaddr;
447 pte->raddr = eaddr & 0xffffffff; 522 pte->raddr = eaddr & KVM_PAM;
448 pte->vpage = VSID_REAL | eaddr >> 12; 523 pte->vpage = VSID_REAL | eaddr >> 12;
449 pte->may_read = true; 524 pte->may_read = true;
450 pte->may_write = true; 525 pte->may_write = true;
@@ -533,6 +608,13 @@ mmio:
533 608
534static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 609static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
535{ 610{
611 ulong mp_pa = vcpu->arch.magic_page_pa;
612
613 if (unlikely(mp_pa) &&
614 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
615 return 1;
616 }
617
536 return kvm_is_visible_gfn(vcpu->kvm, gfn); 618 return kvm_is_visible_gfn(vcpu->kvm, gfn);
537} 619}
538 620
@@ -545,8 +627,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
545 int page_found = 0; 627 int page_found = 0;
546 struct kvmppc_pte pte; 628 struct kvmppc_pte pte;
547 bool is_mmio = false; 629 bool is_mmio = false;
548 bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; 630 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
549 bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; 631 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
550 u64 vsid; 632 u64 vsid;
551 633
552 relocated = data ? dr : ir; 634 relocated = data ? dr : ir;
@@ -558,12 +640,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
558 pte.may_execute = true; 640 pte.may_execute = true;
559 pte.may_read = true; 641 pte.may_read = true;
560 pte.may_write = true; 642 pte.may_write = true;
561 pte.raddr = eaddr & 0xffffffff; 643 pte.raddr = eaddr & KVM_PAM;
562 pte.eaddr = eaddr; 644 pte.eaddr = eaddr;
563 pte.vpage = eaddr >> 12; 645 pte.vpage = eaddr >> 12;
564 } 646 }
565 647
566 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 648 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
567 case 0: 649 case 0:
568 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); 650 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
569 break; 651 break;
@@ -571,7 +653,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
571 case MSR_IR: 653 case MSR_IR:
572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 654 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
573 655
574 if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR) 656 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
575 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); 657 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
576 else 658 else
577 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); 659 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
@@ -594,20 +676,23 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
594 676
595 if (page_found == -ENOENT) { 677 if (page_found == -ENOENT) {
596 /* Page not found in guest PTE entries */ 678 /* Page not found in guest PTE entries */
597 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 679 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
598 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; 680 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
599 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 681 vcpu->arch.shared->msr |=
682 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
600 kvmppc_book3s_queue_irqprio(vcpu, vec); 683 kvmppc_book3s_queue_irqprio(vcpu, vec);
601 } else if (page_found == -EPERM) { 684 } else if (page_found == -EPERM) {
602 /* Storage protection */ 685 /* Storage protection */
603 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 686 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
604 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; 687 vcpu->arch.shared->dsisr =
605 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; 688 to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
606 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 689 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
690 vcpu->arch.shared->msr |=
691 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
607 kvmppc_book3s_queue_irqprio(vcpu, vec); 692 kvmppc_book3s_queue_irqprio(vcpu, vec);
608 } else if (page_found == -EINVAL) { 693 } else if (page_found == -EINVAL) {
609 /* Page not found in guest SLB */ 694 /* Page not found in guest SLB */
610 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 695 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
611 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 696 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
612 } else if (!is_mmio && 697 } else if (!is_mmio &&
613 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 698 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
@@ -695,9 +780,11 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
695 780
696 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); 781 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
697 if (ret == -ENOENT) { 782 if (ret == -ENOENT) {
698 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); 783 ulong msr = vcpu->arch.shared->msr;
699 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); 784
700 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); 785 msr = kvmppc_set_field(msr, 33, 33, 1);
786 msr = kvmppc_set_field(msr, 34, 36, 0);
787 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
701 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); 788 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
702 return EMULATE_AGAIN; 789 return EMULATE_AGAIN;
703 } 790 }
@@ -736,7 +823,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
736 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) 823 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
737 return RESUME_GUEST; 824 return RESUME_GUEST;
738 825
739 if (!(vcpu->arch.msr & msr)) { 826 if (!(vcpu->arch.shared->msr & msr)) {
740 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 827 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
741 return RESUME_GUEST; 828 return RESUME_GUEST;
742 } 829 }
@@ -796,16 +883,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
796 883
797 run->exit_reason = KVM_EXIT_UNKNOWN; 884 run->exit_reason = KVM_EXIT_UNKNOWN;
798 run->ready_for_interrupt_injection = 1; 885 run->ready_for_interrupt_injection = 1;
799#ifdef EXIT_DEBUG 886
800 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", 887 trace_kvm_book3s_exit(exit_nr, vcpu);
801 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
802 kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1);
803#elif defined (EXIT_DEBUG_SIMPLE)
804 if ((exit_nr != 0x900) && (exit_nr != 0x500))
805 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
806 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
807 vcpu->arch.msr);
808#endif
809 kvm_resched(vcpu); 888 kvm_resched(vcpu);
810 switch (exit_nr) { 889 switch (exit_nr) {
811 case BOOK3S_INTERRUPT_INST_STORAGE: 890 case BOOK3S_INTERRUPT_INST_STORAGE:
@@ -836,9 +915,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
836 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 915 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
837 r = RESUME_GUEST; 916 r = RESUME_GUEST;
838 } else { 917 } else {
839 vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000; 918 vcpu->arch.shared->msr |=
919 to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
840 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 920 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
841 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
842 r = RESUME_GUEST; 921 r = RESUME_GUEST;
843 } 922 }
844 break; 923 break;
@@ -861,17 +940,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
861 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { 940 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
862 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 941 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
863 } else { 942 } else {
864 vcpu->arch.dear = dar; 943 vcpu->arch.shared->dar = dar;
865 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; 944 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
866 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 945 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
867 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL);
868 r = RESUME_GUEST; 946 r = RESUME_GUEST;
869 } 947 }
870 break; 948 break;
871 } 949 }
872 case BOOK3S_INTERRUPT_DATA_SEGMENT: 950 case BOOK3S_INTERRUPT_DATA_SEGMENT:
873 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { 951 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
874 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 952 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
875 kvmppc_book3s_queue_irqprio(vcpu, 953 kvmppc_book3s_queue_irqprio(vcpu,
876 BOOK3S_INTERRUPT_DATA_SEGMENT); 954 BOOK3S_INTERRUPT_DATA_SEGMENT);
877 } 955 }
@@ -904,7 +982,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
904program_interrupt: 982program_interrupt:
905 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; 983 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
906 984
907 if (vcpu->arch.msr & MSR_PR) { 985 if (vcpu->arch.shared->msr & MSR_PR) {
908#ifdef EXIT_DEBUG 986#ifdef EXIT_DEBUG
909 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); 987 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
910#endif 988#endif
@@ -941,10 +1019,10 @@ program_interrupt:
941 break; 1019 break;
942 } 1020 }
943 case BOOK3S_INTERRUPT_SYSCALL: 1021 case BOOK3S_INTERRUPT_SYSCALL:
944 // XXX make user settable
945 if (vcpu->arch.osi_enabled && 1022 if (vcpu->arch.osi_enabled &&
946 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && 1023 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
947 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { 1024 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1025 /* MOL hypercalls */
948 u64 *gprs = run->osi.gprs; 1026 u64 *gprs = run->osi.gprs;
949 int i; 1027 int i;
950 1028
@@ -953,8 +1031,13 @@ program_interrupt:
953 gprs[i] = kvmppc_get_gpr(vcpu, i); 1031 gprs[i] = kvmppc_get_gpr(vcpu, i);
954 vcpu->arch.osi_needed = 1; 1032 vcpu->arch.osi_needed = 1;
955 r = RESUME_HOST_NV; 1033 r = RESUME_HOST_NV;
956 1034 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
1035 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1036 /* KVM PV hypercalls */
1037 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1038 r = RESUME_GUEST;
957 } else { 1039 } else {
1040 /* Guest syscalls */
958 vcpu->stat.syscall_exits++; 1041 vcpu->stat.syscall_exits++;
959 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 1042 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
960 r = RESUME_GUEST; 1043 r = RESUME_GUEST;
@@ -989,9 +1072,9 @@ program_interrupt:
989 } 1072 }
990 case BOOK3S_INTERRUPT_ALIGNMENT: 1073 case BOOK3S_INTERRUPT_ALIGNMENT:
991 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { 1074 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
992 to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu, 1075 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
993 kvmppc_get_last_inst(vcpu)); 1076 kvmppc_get_last_inst(vcpu));
994 vcpu->arch.dear = kvmppc_alignment_dar(vcpu, 1077 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
995 kvmppc_get_last_inst(vcpu)); 1078 kvmppc_get_last_inst(vcpu));
996 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 1079 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
997 } 1080 }
@@ -1031,9 +1114,7 @@ program_interrupt:
1031 } 1114 }
1032 } 1115 }
1033 1116
1034#ifdef EXIT_DEBUG 1117 trace_kvm_book3s_reenter(r, vcpu);
1035 printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r);
1036#endif
1037 1118
1038 return r; 1119 return r;
1039} 1120}
@@ -1052,14 +1133,14 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1052 regs->ctr = kvmppc_get_ctr(vcpu); 1133 regs->ctr = kvmppc_get_ctr(vcpu);
1053 regs->lr = kvmppc_get_lr(vcpu); 1134 regs->lr = kvmppc_get_lr(vcpu);
1054 regs->xer = kvmppc_get_xer(vcpu); 1135 regs->xer = kvmppc_get_xer(vcpu);
1055 regs->msr = vcpu->arch.msr; 1136 regs->msr = vcpu->arch.shared->msr;
1056 regs->srr0 = vcpu->arch.srr0; 1137 regs->srr0 = vcpu->arch.shared->srr0;
1057 regs->srr1 = vcpu->arch.srr1; 1138 regs->srr1 = vcpu->arch.shared->srr1;
1058 regs->pid = vcpu->arch.pid; 1139 regs->pid = vcpu->arch.pid;
1059 regs->sprg0 = vcpu->arch.sprg0; 1140 regs->sprg0 = vcpu->arch.shared->sprg0;
1060 regs->sprg1 = vcpu->arch.sprg1; 1141 regs->sprg1 = vcpu->arch.shared->sprg1;
1061 regs->sprg2 = vcpu->arch.sprg2; 1142 regs->sprg2 = vcpu->arch.shared->sprg2;
1062 regs->sprg3 = vcpu->arch.sprg3; 1143 regs->sprg3 = vcpu->arch.shared->sprg3;
1063 regs->sprg5 = vcpu->arch.sprg4; 1144 regs->sprg5 = vcpu->arch.sprg4;
1064 regs->sprg6 = vcpu->arch.sprg5; 1145 regs->sprg6 = vcpu->arch.sprg5;
1065 regs->sprg7 = vcpu->arch.sprg6; 1146 regs->sprg7 = vcpu->arch.sprg6;
@@ -1080,12 +1161,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1080 kvmppc_set_lr(vcpu, regs->lr); 1161 kvmppc_set_lr(vcpu, regs->lr);
1081 kvmppc_set_xer(vcpu, regs->xer); 1162 kvmppc_set_xer(vcpu, regs->xer);
1082 kvmppc_set_msr(vcpu, regs->msr); 1163 kvmppc_set_msr(vcpu, regs->msr);
1083 vcpu->arch.srr0 = regs->srr0; 1164 vcpu->arch.shared->srr0 = regs->srr0;
1084 vcpu->arch.srr1 = regs->srr1; 1165 vcpu->arch.shared->srr1 = regs->srr1;
1085 vcpu->arch.sprg0 = regs->sprg0; 1166 vcpu->arch.shared->sprg0 = regs->sprg0;
1086 vcpu->arch.sprg1 = regs->sprg1; 1167 vcpu->arch.shared->sprg1 = regs->sprg1;
1087 vcpu->arch.sprg2 = regs->sprg2; 1168 vcpu->arch.shared->sprg2 = regs->sprg2;
1088 vcpu->arch.sprg3 = regs->sprg3; 1169 vcpu->arch.shared->sprg3 = regs->sprg3;
1089 vcpu->arch.sprg5 = regs->sprg4; 1170 vcpu->arch.sprg5 = regs->sprg4;
1090 vcpu->arch.sprg6 = regs->sprg5; 1171 vcpu->arch.sprg6 = regs->sprg5;
1091 vcpu->arch.sprg7 = regs->sprg6; 1172 vcpu->arch.sprg7 = regs->sprg6;
@@ -1111,10 +1192,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1111 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; 1192 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
1112 } 1193 }
1113 } else { 1194 } else {
1114 for (i = 0; i < 16; i++) { 1195 for (i = 0; i < 16; i++)
1115 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; 1196 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
1116 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; 1197
1117 }
1118 for (i = 0; i < 8; i++) { 1198 for (i = 0; i < 8; i++) {
1119 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; 1199 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1120 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; 1200 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
@@ -1225,6 +1305,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1225 struct kvmppc_vcpu_book3s *vcpu_book3s; 1305 struct kvmppc_vcpu_book3s *vcpu_book3s;
1226 struct kvm_vcpu *vcpu; 1306 struct kvm_vcpu *vcpu;
1227 int err = -ENOMEM; 1307 int err = -ENOMEM;
1308 unsigned long p;
1228 1309
1229 vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); 1310 vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
1230 if (!vcpu_book3s) 1311 if (!vcpu_book3s)
@@ -1242,6 +1323,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1242 if (err) 1323 if (err)
1243 goto free_shadow_vcpu; 1324 goto free_shadow_vcpu;
1244 1325
1326 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1327 /* the real shared page fills the last 4k of our page */
1328 vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
1329 if (!p)
1330 goto uninit_vcpu;
1331
1245 vcpu->arch.host_retip = kvm_return_point; 1332 vcpu->arch.host_retip = kvm_return_point;
1246 vcpu->arch.host_msr = mfmsr(); 1333 vcpu->arch.host_msr = mfmsr();
1247#ifdef CONFIG_PPC_BOOK3S_64 1334#ifdef CONFIG_PPC_BOOK3S_64
@@ -1268,10 +1355,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1268 1355
1269 err = kvmppc_mmu_init(vcpu); 1356 err = kvmppc_mmu_init(vcpu);
1270 if (err < 0) 1357 if (err < 0)
1271 goto free_shadow_vcpu; 1358 goto uninit_vcpu;
1272 1359
1273 return vcpu; 1360 return vcpu;
1274 1361
1362uninit_vcpu:
1363 kvm_vcpu_uninit(vcpu);
1275free_shadow_vcpu: 1364free_shadow_vcpu:
1276 kfree(vcpu_book3s->shadow_vcpu); 1365 kfree(vcpu_book3s->shadow_vcpu);
1277free_vcpu: 1366free_vcpu:
@@ -1284,6 +1373,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1284{ 1373{
1285 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 1374 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1286 1375
1376 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1287 kvm_vcpu_uninit(vcpu); 1377 kvm_vcpu_uninit(vcpu);
1288 kfree(vcpu_book3s->shadow_vcpu); 1378 kfree(vcpu_book3s->shadow_vcpu);
1289 vfree(vcpu_book3s); 1379 vfree(vcpu_book3s);
@@ -1346,7 +1436,7 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1346 local_irq_enable(); 1436 local_irq_enable();
1347 1437
1348 /* Preload FPU if it's enabled */ 1438 /* Preload FPU if it's enabled */
1349 if (vcpu->arch.msr & MSR_FP) 1439 if (vcpu->arch.shared->msr & MSR_FP)
1350 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1440 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1351 1441
1352 ret = __kvmppc_vcpu_entry(kvm_run, vcpu); 1442 ret = __kvmppc_vcpu_entry(kvm_run, vcpu);