aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_pr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_pr.c')
-rw-r--r--arch/powerpc/kvm/book3s_pr.c534
1 files changed, 376 insertions, 158 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 27db1e665959..fe14ca3dd171 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -40,8 +40,12 @@
40#include <linux/sched.h> 40#include <linux/sched.h>
41#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/module.h>
43 44
44#include "trace.h" 45#include "book3s.h"
46
47#define CREATE_TRACE_POINTS
48#include "trace_pr.h"
45 49
46/* #define EXIT_DEBUG */ 50/* #define EXIT_DEBUG */
47/* #define DEBUG_EXT */ 51/* #define DEBUG_EXT */
@@ -56,29 +60,25 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
56#define HW_PAGE_SIZE PAGE_SIZE 60#define HW_PAGE_SIZE PAGE_SIZE
57#endif 61#endif
58 62
59void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 63static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
60{ 64{
61#ifdef CONFIG_PPC_BOOK3S_64 65#ifdef CONFIG_PPC_BOOK3S_64
62 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 66 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
63 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); 67 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
64 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
65 sizeof(get_paca()->shadow_vcpu));
66 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; 68 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
67 svcpu_put(svcpu); 69 svcpu_put(svcpu);
68#endif 70#endif
69 vcpu->cpu = smp_processor_id(); 71 vcpu->cpu = smp_processor_id();
70#ifdef CONFIG_PPC_BOOK3S_32 72#ifdef CONFIG_PPC_BOOK3S_32
71 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; 73 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
72#endif 74#endif
73} 75}
74 76
75void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 77static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
76{ 78{
77#ifdef CONFIG_PPC_BOOK3S_64 79#ifdef CONFIG_PPC_BOOK3S_64
78 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 80 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
79 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); 81 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
80 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
81 sizeof(get_paca()->shadow_vcpu));
82 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; 82 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
83 svcpu_put(svcpu); 83 svcpu_put(svcpu);
84#endif 84#endif
@@ -87,7 +87,61 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
87 vcpu->cpu = -1; 87 vcpu->cpu = -1;
88} 88}
89 89
90int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) 90/* Copy data needed by real-mode code from vcpu to shadow vcpu */
91void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
92 struct kvm_vcpu *vcpu)
93{
94 svcpu->gpr[0] = vcpu->arch.gpr[0];
95 svcpu->gpr[1] = vcpu->arch.gpr[1];
96 svcpu->gpr[2] = vcpu->arch.gpr[2];
97 svcpu->gpr[3] = vcpu->arch.gpr[3];
98 svcpu->gpr[4] = vcpu->arch.gpr[4];
99 svcpu->gpr[5] = vcpu->arch.gpr[5];
100 svcpu->gpr[6] = vcpu->arch.gpr[6];
101 svcpu->gpr[7] = vcpu->arch.gpr[7];
102 svcpu->gpr[8] = vcpu->arch.gpr[8];
103 svcpu->gpr[9] = vcpu->arch.gpr[9];
104 svcpu->gpr[10] = vcpu->arch.gpr[10];
105 svcpu->gpr[11] = vcpu->arch.gpr[11];
106 svcpu->gpr[12] = vcpu->arch.gpr[12];
107 svcpu->gpr[13] = vcpu->arch.gpr[13];
108 svcpu->cr = vcpu->arch.cr;
109 svcpu->xer = vcpu->arch.xer;
110 svcpu->ctr = vcpu->arch.ctr;
111 svcpu->lr = vcpu->arch.lr;
112 svcpu->pc = vcpu->arch.pc;
113}
114
115/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
116void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
117 struct kvmppc_book3s_shadow_vcpu *svcpu)
118{
119 vcpu->arch.gpr[0] = svcpu->gpr[0];
120 vcpu->arch.gpr[1] = svcpu->gpr[1];
121 vcpu->arch.gpr[2] = svcpu->gpr[2];
122 vcpu->arch.gpr[3] = svcpu->gpr[3];
123 vcpu->arch.gpr[4] = svcpu->gpr[4];
124 vcpu->arch.gpr[5] = svcpu->gpr[5];
125 vcpu->arch.gpr[6] = svcpu->gpr[6];
126 vcpu->arch.gpr[7] = svcpu->gpr[7];
127 vcpu->arch.gpr[8] = svcpu->gpr[8];
128 vcpu->arch.gpr[9] = svcpu->gpr[9];
129 vcpu->arch.gpr[10] = svcpu->gpr[10];
130 vcpu->arch.gpr[11] = svcpu->gpr[11];
131 vcpu->arch.gpr[12] = svcpu->gpr[12];
132 vcpu->arch.gpr[13] = svcpu->gpr[13];
133 vcpu->arch.cr = svcpu->cr;
134 vcpu->arch.xer = svcpu->xer;
135 vcpu->arch.ctr = svcpu->ctr;
136 vcpu->arch.lr = svcpu->lr;
137 vcpu->arch.pc = svcpu->pc;
138 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
139 vcpu->arch.fault_dar = svcpu->fault_dar;
140 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
141 vcpu->arch.last_inst = svcpu->last_inst;
142}
143
144static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
91{ 145{
92 int r = 1; /* Indicate we want to get back into the guest */ 146 int r = 1; /* Indicate we want to get back into the guest */
93 147
@@ -100,44 +154,69 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
100} 154}
101 155
102/************* MMU Notifiers *************/ 156/************* MMU Notifiers *************/
157static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
158 unsigned long end)
159{
160 long i;
161 struct kvm_vcpu *vcpu;
162 struct kvm_memslots *slots;
163 struct kvm_memory_slot *memslot;
164
165 slots = kvm_memslots(kvm);
166 kvm_for_each_memslot(memslot, slots) {
167 unsigned long hva_start, hva_end;
168 gfn_t gfn, gfn_end;
169
170 hva_start = max(start, memslot->userspace_addr);
171 hva_end = min(end, memslot->userspace_addr +
172 (memslot->npages << PAGE_SHIFT));
173 if (hva_start >= hva_end)
174 continue;
175 /*
176 * {gfn(page) | page intersects with [hva_start, hva_end)} =
177 * {gfn, gfn+1, ..., gfn_end-1}.
178 */
179 gfn = hva_to_gfn_memslot(hva_start, memslot);
180 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
181 kvm_for_each_vcpu(i, vcpu, kvm)
182 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
183 gfn_end << PAGE_SHIFT);
184 }
185}
103 186
104int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 187static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
105{ 188{
106 trace_kvm_unmap_hva(hva); 189 trace_kvm_unmap_hva(hva);
107 190
108 /* 191 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
109 * Flush all shadow tlb entries everywhere. This is slow, but
110 * we are 100% sure that we catch the to be unmapped page
111 */
112 kvm_flush_remote_tlbs(kvm);
113 192
114 return 0; 193 return 0;
115} 194}
116 195
117int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 196static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
197 unsigned long end)
118{ 198{
119 /* kvm_unmap_hva flushes everything anyways */ 199 do_kvm_unmap_hva(kvm, start, end);
120 kvm_unmap_hva(kvm, start);
121 200
122 return 0; 201 return 0;
123} 202}
124 203
125int kvm_age_hva(struct kvm *kvm, unsigned long hva) 204static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
126{ 205{
127 /* XXX could be more clever ;) */ 206 /* XXX could be more clever ;) */
128 return 0; 207 return 0;
129} 208}
130 209
131int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 210static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
132{ 211{
133 /* XXX could be more clever ;) */ 212 /* XXX could be more clever ;) */
134 return 0; 213 return 0;
135} 214}
136 215
137void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 216static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
138{ 217{
139 /* The page will get remapped properly on its next fault */ 218 /* The page will get remapped properly on its next fault */
140 kvm_unmap_hva(kvm, hva); 219 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
141} 220}
142 221
143/*****************************************/ 222/*****************************************/
@@ -159,7 +238,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
159 vcpu->arch.shadow_msr = smsr; 238 vcpu->arch.shadow_msr = smsr;
160} 239}
161 240
162void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 241static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
163{ 242{
164 ulong old_msr = vcpu->arch.shared->msr; 243 ulong old_msr = vcpu->arch.shared->msr;
165 244
@@ -219,7 +298,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
219 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 298 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
220} 299}
221 300
222void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) 301void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
223{ 302{
224 u32 host_pvr; 303 u32 host_pvr;
225 304
@@ -256,6 +335,23 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
256 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) 335 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
257 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); 336 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
258 337
338 /*
339 * If they're asking for POWER6 or later, set the flag
340 * indicating that we can do multiple large page sizes
341 * and 1TB segments.
342 * Also set the flag that indicates that tlbie has the large
343 * page bit in the RB operand instead of the instruction.
344 */
345 switch (PVR_VER(pvr)) {
346 case PVR_POWER6:
347 case PVR_POWER7:
348 case PVR_POWER7p:
349 case PVR_POWER8:
350 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
351 BOOK3S_HFLAG_NEW_TLBIE;
352 break;
353 }
354
259#ifdef CONFIG_PPC_BOOK3S_32 355#ifdef CONFIG_PPC_BOOK3S_32
260 /* 32 bit Book3S always has 32 byte dcbz */ 356 /* 32 bit Book3S always has 32 byte dcbz */
261 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 357 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
@@ -334,6 +430,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
334 ulong eaddr, int vec) 430 ulong eaddr, int vec)
335{ 431{
336 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); 432 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
433 bool iswrite = false;
337 int r = RESUME_GUEST; 434 int r = RESUME_GUEST;
338 int relocated; 435 int relocated;
339 int page_found = 0; 436 int page_found = 0;
@@ -344,10 +441,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
344 u64 vsid; 441 u64 vsid;
345 442
346 relocated = data ? dr : ir; 443 relocated = data ? dr : ir;
444 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
445 iswrite = true;
347 446
348 /* Resolve real address if translation turned on */ 447 /* Resolve real address if translation turned on */
349 if (relocated) { 448 if (relocated) {
350 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); 449 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
351 } else { 450 } else {
352 pte.may_execute = true; 451 pte.may_execute = true;
353 pte.may_read = true; 452 pte.may_read = true;
@@ -355,6 +454,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
355 pte.raddr = eaddr & KVM_PAM; 454 pte.raddr = eaddr & KVM_PAM;
356 pte.eaddr = eaddr; 455 pte.eaddr = eaddr;
357 pte.vpage = eaddr >> 12; 456 pte.vpage = eaddr >> 12;
457 pte.page_size = MMU_PAGE_64K;
358 } 458 }
359 459
360 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 460 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
@@ -388,22 +488,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
388 488
389 if (page_found == -ENOENT) { 489 if (page_found == -ENOENT) {
390 /* Page not found in guest PTE entries */ 490 /* Page not found in guest PTE entries */
391 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
392 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 491 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
393 vcpu->arch.shared->dsisr = svcpu->fault_dsisr; 492 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
394 vcpu->arch.shared->msr |= 493 vcpu->arch.shared->msr |=
395 (svcpu->shadow_srr1 & 0x00000000f8000000ULL); 494 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
396 svcpu_put(svcpu);
397 kvmppc_book3s_queue_irqprio(vcpu, vec); 495 kvmppc_book3s_queue_irqprio(vcpu, vec);
398 } else if (page_found == -EPERM) { 496 } else if (page_found == -EPERM) {
399 /* Storage protection */ 497 /* Storage protection */
400 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
401 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 498 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
402 vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; 499 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
403 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; 500 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
404 vcpu->arch.shared->msr |= 501 vcpu->arch.shared->msr |=
405 svcpu->shadow_srr1 & 0x00000000f8000000ULL; 502 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
406 svcpu_put(svcpu);
407 kvmppc_book3s_queue_irqprio(vcpu, vec); 503 kvmppc_book3s_queue_irqprio(vcpu, vec);
408 } else if (page_found == -EINVAL) { 504 } else if (page_found == -EINVAL) {
409 /* Page not found in guest SLB */ 505 /* Page not found in guest SLB */
@@ -411,12 +507,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
411 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 507 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
412 } else if (!is_mmio && 508 } else if (!is_mmio &&
413 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 509 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
510 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
511 /*
512 * There is already a host HPTE there, presumably
513 * a read-only one for a page the guest thinks
514 * is writable, so get rid of it first.
515 */
516 kvmppc_mmu_unmap_page(vcpu, &pte);
517 }
414 /* The guest's PTE is not mapped yet. Map on the host */ 518 /* The guest's PTE is not mapped yet. Map on the host */
415 kvmppc_mmu_map_page(vcpu, &pte); 519 kvmppc_mmu_map_page(vcpu, &pte, iswrite);
416 if (data) 520 if (data)
417 vcpu->stat.sp_storage++; 521 vcpu->stat.sp_storage++;
418 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 522 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
419 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) 523 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
420 kvmppc_patch_dcbz(vcpu, &pte); 524 kvmppc_patch_dcbz(vcpu, &pte);
421 } else { 525 } else {
422 /* MMIO */ 526 /* MMIO */
@@ -444,7 +548,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
444#ifdef CONFIG_VSX 548#ifdef CONFIG_VSX
445 u64 *vcpu_vsx = vcpu->arch.vsr; 549 u64 *vcpu_vsx = vcpu->arch.vsr;
446#endif 550#endif
447 u64 *thread_fpr = (u64*)t->fpr; 551 u64 *thread_fpr = &t->fp_state.fpr[0][0];
448 int i; 552 int i;
449 553
450 /* 554 /*
@@ -466,14 +570,14 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
466 /* 570 /*
467 * Note that on CPUs with VSX, giveup_fpu stores 571 * Note that on CPUs with VSX, giveup_fpu stores
468 * both the traditional FP registers and the added VSX 572 * both the traditional FP registers and the added VSX
469 * registers into thread.fpr[]. 573 * registers into thread.fp_state.fpr[].
470 */ 574 */
471 if (current->thread.regs->msr & MSR_FP) 575 if (current->thread.regs->msr & MSR_FP)
472 giveup_fpu(current); 576 giveup_fpu(current);
473 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 577 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
474 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; 578 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
475 579
476 vcpu->arch.fpscr = t->fpscr.val; 580 vcpu->arch.fpscr = t->fp_state.fpscr;
477 581
478#ifdef CONFIG_VSX 582#ifdef CONFIG_VSX
479 if (cpu_has_feature(CPU_FTR_VSX)) 583 if (cpu_has_feature(CPU_FTR_VSX))
@@ -486,8 +590,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
486 if (msr & MSR_VEC) { 590 if (msr & MSR_VEC) {
487 if (current->thread.regs->msr & MSR_VEC) 591 if (current->thread.regs->msr & MSR_VEC)
488 giveup_altivec(current); 592 giveup_altivec(current);
489 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); 593 memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
490 vcpu->arch.vscr = t->vscr; 594 vcpu->arch.vscr = t->vr_state.vscr;
491 } 595 }
492#endif 596#endif
493 597
@@ -539,7 +643,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
539#ifdef CONFIG_VSX 643#ifdef CONFIG_VSX
540 u64 *vcpu_vsx = vcpu->arch.vsr; 644 u64 *vcpu_vsx = vcpu->arch.vsr;
541#endif 645#endif
542 u64 *thread_fpr = (u64*)t->fpr; 646 u64 *thread_fpr = &t->fp_state.fpr[0][0];
543 int i; 647 int i;
544 648
545 /* When we have paired singles, we emulate in software */ 649 /* When we have paired singles, we emulate in software */
@@ -584,15 +688,15 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
584 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) 688 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
585 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; 689 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
586#endif 690#endif
587 t->fpscr.val = vcpu->arch.fpscr; 691 t->fp_state.fpscr = vcpu->arch.fpscr;
588 t->fpexc_mode = 0; 692 t->fpexc_mode = 0;
589 kvmppc_load_up_fpu(); 693 kvmppc_load_up_fpu();
590 } 694 }
591 695
592 if (msr & MSR_VEC) { 696 if (msr & MSR_VEC) {
593#ifdef CONFIG_ALTIVEC 697#ifdef CONFIG_ALTIVEC
594 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); 698 memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
595 t->vscr = vcpu->arch.vscr; 699 t->vr_state.vscr = vcpu->arch.vscr;
596 t->vrsave = -1; 700 t->vrsave = -1;
597 kvmppc_load_up_altivec(); 701 kvmppc_load_up_altivec();
598#endif 702#endif
@@ -619,13 +723,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
619 723
620 if (lost_ext & MSR_FP) 724 if (lost_ext & MSR_FP)
621 kvmppc_load_up_fpu(); 725 kvmppc_load_up_fpu();
726#ifdef CONFIG_ALTIVEC
622 if (lost_ext & MSR_VEC) 727 if (lost_ext & MSR_VEC)
623 kvmppc_load_up_altivec(); 728 kvmppc_load_up_altivec();
729#endif
624 current->thread.regs->msr |= lost_ext; 730 current->thread.regs->msr |= lost_ext;
625} 731}
626 732
627int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 733int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
628 unsigned int exit_nr) 734 unsigned int exit_nr)
629{ 735{
630 int r = RESUME_HOST; 736 int r = RESUME_HOST;
631 int s; 737 int s;
@@ -643,25 +749,32 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
643 switch (exit_nr) { 749 switch (exit_nr) {
644 case BOOK3S_INTERRUPT_INST_STORAGE: 750 case BOOK3S_INTERRUPT_INST_STORAGE:
645 { 751 {
646 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 752 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
647 ulong shadow_srr1 = svcpu->shadow_srr1;
648 vcpu->stat.pf_instruc++; 753 vcpu->stat.pf_instruc++;
649 754
650#ifdef CONFIG_PPC_BOOK3S_32 755#ifdef CONFIG_PPC_BOOK3S_32
651 /* We set segments as unused segments when invalidating them. So 756 /* We set segments as unused segments when invalidating them. So
652 * treat the respective fault as segment fault. */ 757 * treat the respective fault as segment fault. */
653 if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { 758 {
654 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 759 struct kvmppc_book3s_shadow_vcpu *svcpu;
655 r = RESUME_GUEST; 760 u32 sr;
761
762 svcpu = svcpu_get(vcpu);
763 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
656 svcpu_put(svcpu); 764 svcpu_put(svcpu);
657 break; 765 if (sr == SR_INVALID) {
766 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
767 r = RESUME_GUEST;
768 break;
769 }
658 } 770 }
659#endif 771#endif
660 svcpu_put(svcpu);
661 772
662 /* only care about PTEG not found errors, but leave NX alone */ 773 /* only care about PTEG not found errors, but leave NX alone */
663 if (shadow_srr1 & 0x40000000) { 774 if (shadow_srr1 & 0x40000000) {
775 int idx = srcu_read_lock(&vcpu->kvm->srcu);
664 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); 776 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
777 srcu_read_unlock(&vcpu->kvm->srcu, idx);
665 vcpu->stat.sp_instruc++; 778 vcpu->stat.sp_instruc++;
666 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 779 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
667 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { 780 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
@@ -682,25 +795,36 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
682 case BOOK3S_INTERRUPT_DATA_STORAGE: 795 case BOOK3S_INTERRUPT_DATA_STORAGE:
683 { 796 {
684 ulong dar = kvmppc_get_fault_dar(vcpu); 797 ulong dar = kvmppc_get_fault_dar(vcpu);
685 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 798 u32 fault_dsisr = vcpu->arch.fault_dsisr;
686 u32 fault_dsisr = svcpu->fault_dsisr;
687 vcpu->stat.pf_storage++; 799 vcpu->stat.pf_storage++;
688 800
689#ifdef CONFIG_PPC_BOOK3S_32 801#ifdef CONFIG_PPC_BOOK3S_32
690 /* We set segments as unused segments when invalidating them. So 802 /* We set segments as unused segments when invalidating them. So
691 * treat the respective fault as segment fault. */ 803 * treat the respective fault as segment fault. */
692 if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { 804 {
693 kvmppc_mmu_map_segment(vcpu, dar); 805 struct kvmppc_book3s_shadow_vcpu *svcpu;
694 r = RESUME_GUEST; 806 u32 sr;
807
808 svcpu = svcpu_get(vcpu);
809 sr = svcpu->sr[dar >> SID_SHIFT];
695 svcpu_put(svcpu); 810 svcpu_put(svcpu);
696 break; 811 if (sr == SR_INVALID) {
812 kvmppc_mmu_map_segment(vcpu, dar);
813 r = RESUME_GUEST;
814 break;
815 }
697 } 816 }
698#endif 817#endif
699 svcpu_put(svcpu);
700 818
701 /* The only case we need to handle is missing shadow PTEs */ 819 /*
702 if (fault_dsisr & DSISR_NOHPTE) { 820 * We need to handle missing shadow PTEs, and
821 * protection faults due to us mapping a page read-only
822 * when the guest thinks it is writable.
823 */
824 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
825 int idx = srcu_read_lock(&vcpu->kvm->srcu);
703 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 826 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
827 srcu_read_unlock(&vcpu->kvm->srcu, idx);
704 } else { 828 } else {
705 vcpu->arch.shared->dar = dar; 829 vcpu->arch.shared->dar = dar;
706 vcpu->arch.shared->dsisr = fault_dsisr; 830 vcpu->arch.shared->dsisr = fault_dsisr;
@@ -743,13 +867,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
743 case BOOK3S_INTERRUPT_H_EMUL_ASSIST: 867 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
744 { 868 {
745 enum emulation_result er; 869 enum emulation_result er;
746 struct kvmppc_book3s_shadow_vcpu *svcpu;
747 ulong flags; 870 ulong flags;
748 871
749program_interrupt: 872program_interrupt:
750 svcpu = svcpu_get(vcpu); 873 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
751 flags = svcpu->shadow_srr1 & 0x1f0000ull;
752 svcpu_put(svcpu);
753 874
754 if (vcpu->arch.shared->msr & MSR_PR) { 875 if (vcpu->arch.shared->msr & MSR_PR) {
755#ifdef EXIT_DEBUG 876#ifdef EXIT_DEBUG
@@ -798,7 +919,7 @@ program_interrupt:
798 ulong cmd = kvmppc_get_gpr(vcpu, 3); 919 ulong cmd = kvmppc_get_gpr(vcpu, 3);
799 int i; 920 int i;
800 921
801#ifdef CONFIG_KVM_BOOK3S_64_PR 922#ifdef CONFIG_PPC_BOOK3S_64
802 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { 923 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
803 r = RESUME_GUEST; 924 r = RESUME_GUEST;
804 break; 925 break;
@@ -881,9 +1002,7 @@ program_interrupt:
881 break; 1002 break;
882 default: 1003 default:
883 { 1004 {
884 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 1005 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
885 ulong shadow_srr1 = svcpu->shadow_srr1;
886 svcpu_put(svcpu);
887 /* Ugh - bork here! What did we get? */ 1006 /* Ugh - bork here! What did we get? */
888 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", 1007 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
889 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); 1008 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
@@ -920,8 +1039,8 @@ program_interrupt:
920 return r; 1039 return r;
921} 1040}
922 1041
923int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1042static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
924 struct kvm_sregs *sregs) 1043 struct kvm_sregs *sregs)
925{ 1044{
926 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1045 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
927 int i; 1046 int i;
@@ -947,13 +1066,13 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
947 return 0; 1066 return 0;
948} 1067}
949 1068
950int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1069static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
951 struct kvm_sregs *sregs) 1070 struct kvm_sregs *sregs)
952{ 1071{
953 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1072 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
954 int i; 1073 int i;
955 1074
956 kvmppc_set_pvr(vcpu, sregs->pvr); 1075 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
957 1076
958 vcpu3s->sdr1 = sregs->u.s.sdr1; 1077 vcpu3s->sdr1 = sregs->u.s.sdr1;
959 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { 1078 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
@@ -983,7 +1102,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
983 return 0; 1102 return 0;
984} 1103}
985 1104
986int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) 1105static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1106 union kvmppc_one_reg *val)
987{ 1107{
988 int r = 0; 1108 int r = 0;
989 1109
@@ -1012,7 +1132,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
1012 return r; 1132 return r;
1013} 1133}
1014 1134
1015int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) 1135static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1136 union kvmppc_one_reg *val)
1016{ 1137{
1017 int r = 0; 1138 int r = 0;
1018 1139
@@ -1042,28 +1163,30 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
1042 return r; 1163 return r;
1043} 1164}
1044 1165
1045int kvmppc_core_check_processor_compat(void) 1166static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1046{ 1167 unsigned int id)
1047 return 0;
1048}
1049
1050struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1051{ 1168{
1052 struct kvmppc_vcpu_book3s *vcpu_book3s; 1169 struct kvmppc_vcpu_book3s *vcpu_book3s;
1053 struct kvm_vcpu *vcpu; 1170 struct kvm_vcpu *vcpu;
1054 int err = -ENOMEM; 1171 int err = -ENOMEM;
1055 unsigned long p; 1172 unsigned long p;
1056 1173
1057 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); 1174 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1058 if (!vcpu_book3s) 1175 if (!vcpu)
1059 goto out; 1176 goto out;
1060 1177
1061 vcpu_book3s->shadow_vcpu = 1178 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1062 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); 1179 if (!vcpu_book3s)
1063 if (!vcpu_book3s->shadow_vcpu)
1064 goto free_vcpu; 1180 goto free_vcpu;
1181 vcpu->arch.book3s = vcpu_book3s;
1182
1183#ifdef CONFIG_KVM_BOOK3S_32
1184 vcpu->arch.shadow_vcpu =
1185 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1186 if (!vcpu->arch.shadow_vcpu)
1187 goto free_vcpu3s;
1188#endif
1065 1189
1066 vcpu = &vcpu_book3s->vcpu;
1067 err = kvm_vcpu_init(vcpu, kvm, id); 1190 err = kvm_vcpu_init(vcpu, kvm, id);
1068 if (err) 1191 if (err)
1069 goto free_shadow_vcpu; 1192 goto free_shadow_vcpu;
@@ -1076,13 +1199,19 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1076 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); 1199 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1077 1200
1078#ifdef CONFIG_PPC_BOOK3S_64 1201#ifdef CONFIG_PPC_BOOK3S_64
1079 /* default to book3s_64 (970fx) */ 1202 /*
1203 * Default to the same as the host if we're on sufficiently
1204 * recent machine that we have 1TB segments;
1205 * otherwise default to PPC970FX.
1206 */
1080 vcpu->arch.pvr = 0x3C0301; 1207 vcpu->arch.pvr = 0x3C0301;
1208 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1209 vcpu->arch.pvr = mfspr(SPRN_PVR);
1081#else 1210#else
1082 /* default to book3s_32 (750) */ 1211 /* default to book3s_32 (750) */
1083 vcpu->arch.pvr = 0x84202; 1212 vcpu->arch.pvr = 0x84202;
1084#endif 1213#endif
1085 kvmppc_set_pvr(vcpu, vcpu->arch.pvr); 1214 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1086 vcpu->arch.slb_nr = 64; 1215 vcpu->arch.slb_nr = 64;
1087 1216
1088 vcpu->arch.shadow_msr = MSR_USER64; 1217 vcpu->arch.shadow_msr = MSR_USER64;
@@ -1096,32 +1225,37 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1096uninit_vcpu: 1225uninit_vcpu:
1097 kvm_vcpu_uninit(vcpu); 1226 kvm_vcpu_uninit(vcpu);
1098free_shadow_vcpu: 1227free_shadow_vcpu:
1099 kfree(vcpu_book3s->shadow_vcpu); 1228#ifdef CONFIG_KVM_BOOK3S_32
1100free_vcpu: 1229 kfree(vcpu->arch.shadow_vcpu);
1230free_vcpu3s:
1231#endif
1101 vfree(vcpu_book3s); 1232 vfree(vcpu_book3s);
1233free_vcpu:
1234 kmem_cache_free(kvm_vcpu_cache, vcpu);
1102out: 1235out:
1103 return ERR_PTR(err); 1236 return ERR_PTR(err);
1104} 1237}
1105 1238
1106void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 1239static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1107{ 1240{
1108 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 1241 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1109 1242
1110 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); 1243 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1111 kvm_vcpu_uninit(vcpu); 1244 kvm_vcpu_uninit(vcpu);
1112 kfree(vcpu_book3s->shadow_vcpu); 1245#ifdef CONFIG_KVM_BOOK3S_32
1246 kfree(vcpu->arch.shadow_vcpu);
1247#endif
1113 vfree(vcpu_book3s); 1248 vfree(vcpu_book3s);
1249 kmem_cache_free(kvm_vcpu_cache, vcpu);
1114} 1250}
1115 1251
1116int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1252static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1117{ 1253{
1118 int ret; 1254 int ret;
1119 double fpr[32][TS_FPRWIDTH]; 1255 struct thread_fp_state fp;
1120 unsigned int fpscr;
1121 int fpexc_mode; 1256 int fpexc_mode;
1122#ifdef CONFIG_ALTIVEC 1257#ifdef CONFIG_ALTIVEC
1123 vector128 vr[32]; 1258 struct thread_vr_state vr;
1124 vector128 vscr;
1125 unsigned long uninitialized_var(vrsave); 1259 unsigned long uninitialized_var(vrsave);
1126 int used_vr; 1260 int used_vr;
1127#endif 1261#endif
@@ -1153,8 +1287,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1153 /* Save FPU state in stack */ 1287 /* Save FPU state in stack */
1154 if (current->thread.regs->msr & MSR_FP) 1288 if (current->thread.regs->msr & MSR_FP)
1155 giveup_fpu(current); 1289 giveup_fpu(current);
1156 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); 1290 fp = current->thread.fp_state;
1157 fpscr = current->thread.fpscr.val;
1158 fpexc_mode = current->thread.fpexc_mode; 1291 fpexc_mode = current->thread.fpexc_mode;
1159 1292
1160#ifdef CONFIG_ALTIVEC 1293#ifdef CONFIG_ALTIVEC
@@ -1163,8 +1296,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1163 if (used_vr) { 1296 if (used_vr) {
1164 if (current->thread.regs->msr & MSR_VEC) 1297 if (current->thread.regs->msr & MSR_VEC)
1165 giveup_altivec(current); 1298 giveup_altivec(current);
1166 memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); 1299 vr = current->thread.vr_state;
1167 vscr = current->thread.vscr;
1168 vrsave = current->thread.vrsave; 1300 vrsave = current->thread.vrsave;
1169 } 1301 }
1170#endif 1302#endif
@@ -1196,15 +1328,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1196 current->thread.regs->msr = ext_msr; 1328 current->thread.regs->msr = ext_msr;
1197 1329
1198 /* Restore FPU/VSX state from stack */ 1330 /* Restore FPU/VSX state from stack */
1199 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); 1331 current->thread.fp_state = fp;
1200 current->thread.fpscr.val = fpscr;
1201 current->thread.fpexc_mode = fpexc_mode; 1332 current->thread.fpexc_mode = fpexc_mode;
1202 1333
1203#ifdef CONFIG_ALTIVEC 1334#ifdef CONFIG_ALTIVEC
1204 /* Restore Altivec state from stack */ 1335 /* Restore Altivec state from stack */
1205 if (used_vr && current->thread.used_vr) { 1336 if (used_vr && current->thread.used_vr) {
1206 memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); 1337 current->thread.vr_state = vr;
1207 current->thread.vscr = vscr;
1208 current->thread.vrsave = vrsave; 1338 current->thread.vrsave = vrsave;
1209 } 1339 }
1210 current->thread.used_vr = used_vr; 1340 current->thread.used_vr = used_vr;
@@ -1222,8 +1352,8 @@ out:
1222/* 1352/*
1223 * Get (and clear) the dirty memory log for a memory slot. 1353 * Get (and clear) the dirty memory log for a memory slot.
1224 */ 1354 */
1225int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 1355static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1226 struct kvm_dirty_log *log) 1356 struct kvm_dirty_log *log)
1227{ 1357{
1228 struct kvm_memory_slot *memslot; 1358 struct kvm_memory_slot *memslot;
1229 struct kvm_vcpu *vcpu; 1359 struct kvm_vcpu *vcpu;
@@ -1258,67 +1388,100 @@ out:
1258 return r; 1388 return r;
1259} 1389}
1260 1390
1261#ifdef CONFIG_PPC64 1391static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1262int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) 1392 struct kvm_memory_slot *memslot)
1263{ 1393{
1264 info->flags = KVM_PPC_1T_SEGMENTS; 1394 return;
1265 1395}
1266 /* SLB is always 64 entries */
1267 info->slb_size = 64;
1268
1269 /* Standard 4k base page size segment */
1270 info->sps[0].page_shift = 12;
1271 info->sps[0].slb_enc = 0;
1272 info->sps[0].enc[0].page_shift = 12;
1273 info->sps[0].enc[0].pte_enc = 0;
1274
1275 /* Standard 16M large page size segment */
1276 info->sps[1].page_shift = 24;
1277 info->sps[1].slb_enc = SLB_VSID_L;
1278 info->sps[1].enc[0].page_shift = 24;
1279 info->sps[1].enc[0].pte_enc = 0;
1280 1396
1397static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1398 struct kvm_memory_slot *memslot,
1399 struct kvm_userspace_memory_region *mem)
1400{
1281 return 0; 1401 return 0;
1282} 1402}
1283#endif /* CONFIG_PPC64 */
1284 1403
1285void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 1404static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1286 struct kvm_memory_slot *dont) 1405 struct kvm_userspace_memory_region *mem,
1406 const struct kvm_memory_slot *old)
1287{ 1407{
1408 return;
1288} 1409}
1289 1410
1290int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, 1411static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1291 unsigned long npages) 1412 struct kvm_memory_slot *dont)
1292{ 1413{
1293 return 0; 1414 return;
1294} 1415}
1295 1416
1296int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1417static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1297 struct kvm_memory_slot *memslot, 1418 unsigned long npages)
1298 struct kvm_userspace_memory_region *mem)
1299{ 1419{
1300 return 0; 1420 return 0;
1301} 1421}
1302 1422
1303void kvmppc_core_commit_memory_region(struct kvm *kvm, 1423
1304 struct kvm_userspace_memory_region *mem, 1424#ifdef CONFIG_PPC64
1305 const struct kvm_memory_slot *old) 1425static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1426 struct kvm_ppc_smmu_info *info)
1306{ 1427{
1307} 1428 long int i;
1429 struct kvm_vcpu *vcpu;
1430
1431 info->flags = 0;
1432
1433 /* SLB is always 64 entries */
1434 info->slb_size = 64;
1435
1436 /* Standard 4k base page size segment */
1437 info->sps[0].page_shift = 12;
1438 info->sps[0].slb_enc = 0;
1439 info->sps[0].enc[0].page_shift = 12;
1440 info->sps[0].enc[0].pte_enc = 0;
1441
1442 /*
1443 * 64k large page size.
1444 * We only want to put this in if the CPUs we're emulating
1445 * support it, but unfortunately we don't have a vcpu easily
1446 * to hand here to test. Just pick the first vcpu, and if
1447 * that doesn't exist yet, report the minimum capability,
1448 * i.e., no 64k pages.
1449 * 1T segment support goes along with 64k pages.
1450 */
1451 i = 1;
1452 vcpu = kvm_get_vcpu(kvm, 0);
1453 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1454 info->flags = KVM_PPC_1T_SEGMENTS;
1455 info->sps[i].page_shift = 16;
1456 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1457 info->sps[i].enc[0].page_shift = 16;
1458 info->sps[i].enc[0].pte_enc = 1;
1459 ++i;
1460 }
1461
1462 /* Standard 16M large page size segment */
1463 info->sps[i].page_shift = 24;
1464 info->sps[i].slb_enc = SLB_VSID_L;
1465 info->sps[i].enc[0].page_shift = 24;
1466 info->sps[i].enc[0].pte_enc = 0;
1308 1467
1309void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) 1468 return 0;
1469}
1470#else
1471static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1472 struct kvm_ppc_smmu_info *info)
1310{ 1473{
1474 /* We should not get called */
1475 BUG();
1311} 1476}
1477#endif /* CONFIG_PPC64 */
1312 1478
1313static unsigned int kvm_global_user_count = 0; 1479static unsigned int kvm_global_user_count = 0;
1314static DEFINE_SPINLOCK(kvm_global_user_count_lock); 1480static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1315 1481
1316int kvmppc_core_init_vm(struct kvm *kvm) 1482static int kvmppc_core_init_vm_pr(struct kvm *kvm)
1317{ 1483{
1318#ifdef CONFIG_PPC64 1484 mutex_init(&kvm->arch.hpt_mutex);
1319 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1320 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
1321#endif
1322 1485
1323 if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 1486 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1324 spin_lock(&kvm_global_user_count_lock); 1487 spin_lock(&kvm_global_user_count_lock);
@@ -1329,7 +1492,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1329 return 0; 1492 return 0;
1330} 1493}
1331 1494
1332void kvmppc_core_destroy_vm(struct kvm *kvm) 1495static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
1333{ 1496{
1334#ifdef CONFIG_PPC64 1497#ifdef CONFIG_PPC64
1335 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); 1498 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
@@ -1344,26 +1507,81 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
1344 } 1507 }
1345} 1508}
1346 1509
1347static int kvmppc_book3s_init(void) 1510static int kvmppc_core_check_processor_compat_pr(void)
1348{ 1511{
1349 int r; 1512 /* we are always compatible */
1513 return 0;
1514}
1350 1515
1351 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, 1516static long kvm_arch_vm_ioctl_pr(struct file *filp,
1352 THIS_MODULE); 1517 unsigned int ioctl, unsigned long arg)
1518{
1519 return -ENOTTY;
1520}
1353 1521
1354 if (r) 1522static struct kvmppc_ops kvm_ops_pr = {
1523 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1524 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1525 .get_one_reg = kvmppc_get_one_reg_pr,
1526 .set_one_reg = kvmppc_set_one_reg_pr,
1527 .vcpu_load = kvmppc_core_vcpu_load_pr,
1528 .vcpu_put = kvmppc_core_vcpu_put_pr,
1529 .set_msr = kvmppc_set_msr_pr,
1530 .vcpu_run = kvmppc_vcpu_run_pr,
1531 .vcpu_create = kvmppc_core_vcpu_create_pr,
1532 .vcpu_free = kvmppc_core_vcpu_free_pr,
1533 .check_requests = kvmppc_core_check_requests_pr,
1534 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1535 .flush_memslot = kvmppc_core_flush_memslot_pr,
1536 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1537 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1538 .unmap_hva = kvm_unmap_hva_pr,
1539 .unmap_hva_range = kvm_unmap_hva_range_pr,
1540 .age_hva = kvm_age_hva_pr,
1541 .test_age_hva = kvm_test_age_hva_pr,
1542 .set_spte_hva = kvm_set_spte_hva_pr,
1543 .mmu_destroy = kvmppc_mmu_destroy_pr,
1544 .free_memslot = kvmppc_core_free_memslot_pr,
1545 .create_memslot = kvmppc_core_create_memslot_pr,
1546 .init_vm = kvmppc_core_init_vm_pr,
1547 .destroy_vm = kvmppc_core_destroy_vm_pr,
1548 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1549 .emulate_op = kvmppc_core_emulate_op_pr,
1550 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1551 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1552 .fast_vcpu_kick = kvm_vcpu_kick,
1553 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1554};
1555
1556
1557int kvmppc_book3s_init_pr(void)
1558{
1559 int r;
1560
1561 r = kvmppc_core_check_processor_compat_pr();
1562 if (r < 0)
1355 return r; 1563 return r;
1356 1564
1357 r = kvmppc_mmu_hpte_sysinit(); 1565 kvm_ops_pr.owner = THIS_MODULE;
1566 kvmppc_pr_ops = &kvm_ops_pr;
1358 1567
1568 r = kvmppc_mmu_hpte_sysinit();
1359 return r; 1569 return r;
1360} 1570}
1361 1571
1362static void kvmppc_book3s_exit(void) 1572void kvmppc_book3s_exit_pr(void)
1363{ 1573{
1574 kvmppc_pr_ops = NULL;
1364 kvmppc_mmu_hpte_sysexit(); 1575 kvmppc_mmu_hpte_sysexit();
1365 kvm_exit();
1366} 1576}
1367 1577
1368module_init(kvmppc_book3s_init); 1578/*
1369module_exit(kvmppc_book3s_exit); 1579 * We only support separate modules for book3s 64
1580 */
1581#ifdef CONFIG_PPC_BOOK3S_64
1582
1583module_init(kvmppc_book3s_init_pr);
1584module_exit(kvmppc_book3s_exit_pr);
1585
1586MODULE_LICENSE("GPL");
1587#endif