diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_pr.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 498 |
1 files changed, 361 insertions, 137 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index c0b48f96a91c..fe14ca3dd171 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -40,8 +40,12 @@ | |||
40 | #include <linux/sched.h> | 40 | #include <linux/sched.h> |
41 | #include <linux/vmalloc.h> | 41 | #include <linux/vmalloc.h> |
42 | #include <linux/highmem.h> | 42 | #include <linux/highmem.h> |
43 | #include <linux/module.h> | ||
43 | 44 | ||
44 | #include "trace.h" | 45 | #include "book3s.h" |
46 | |||
47 | #define CREATE_TRACE_POINTS | ||
48 | #include "trace_pr.h" | ||
45 | 49 | ||
46 | /* #define EXIT_DEBUG */ | 50 | /* #define EXIT_DEBUG */ |
47 | /* #define DEBUG_EXT */ | 51 | /* #define DEBUG_EXT */ |
@@ -56,29 +60,25 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
56 | #define HW_PAGE_SIZE PAGE_SIZE | 60 | #define HW_PAGE_SIZE PAGE_SIZE |
57 | #endif | 61 | #endif |
58 | 62 | ||
59 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 63 | static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) |
60 | { | 64 | { |
61 | #ifdef CONFIG_PPC_BOOK3S_64 | 65 | #ifdef CONFIG_PPC_BOOK3S_64 |
62 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
63 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | 67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); |
64 | memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, | ||
65 | sizeof(get_paca()->shadow_vcpu)); | ||
66 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; | 68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
67 | svcpu_put(svcpu); | 69 | svcpu_put(svcpu); |
68 | #endif | 70 | #endif |
69 | vcpu->cpu = smp_processor_id(); | 71 | vcpu->cpu = smp_processor_id(); |
70 | #ifdef CONFIG_PPC_BOOK3S_32 | 72 | #ifdef CONFIG_PPC_BOOK3S_32 |
71 | current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; | 73 | current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; |
72 | #endif | 74 | #endif |
73 | } | 75 | } |
74 | 76 | ||
75 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 77 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) |
76 | { | 78 | { |
77 | #ifdef CONFIG_PPC_BOOK3S_64 | 79 | #ifdef CONFIG_PPC_BOOK3S_64 |
78 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 80 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
79 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | 81 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
80 | memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | ||
81 | sizeof(get_paca()->shadow_vcpu)); | ||
82 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; | 82 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
83 | svcpu_put(svcpu); | 83 | svcpu_put(svcpu); |
84 | #endif | 84 | #endif |
@@ -87,7 +87,61 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |||
87 | vcpu->cpu = -1; | 87 | vcpu->cpu = -1; |
88 | } | 88 | } |
89 | 89 | ||
90 | int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | 90 | /* Copy data needed by real-mode code from vcpu to shadow vcpu */ |
91 | void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | ||
92 | struct kvm_vcpu *vcpu) | ||
93 | { | ||
94 | svcpu->gpr[0] = vcpu->arch.gpr[0]; | ||
95 | svcpu->gpr[1] = vcpu->arch.gpr[1]; | ||
96 | svcpu->gpr[2] = vcpu->arch.gpr[2]; | ||
97 | svcpu->gpr[3] = vcpu->arch.gpr[3]; | ||
98 | svcpu->gpr[4] = vcpu->arch.gpr[4]; | ||
99 | svcpu->gpr[5] = vcpu->arch.gpr[5]; | ||
100 | svcpu->gpr[6] = vcpu->arch.gpr[6]; | ||
101 | svcpu->gpr[7] = vcpu->arch.gpr[7]; | ||
102 | svcpu->gpr[8] = vcpu->arch.gpr[8]; | ||
103 | svcpu->gpr[9] = vcpu->arch.gpr[9]; | ||
104 | svcpu->gpr[10] = vcpu->arch.gpr[10]; | ||
105 | svcpu->gpr[11] = vcpu->arch.gpr[11]; | ||
106 | svcpu->gpr[12] = vcpu->arch.gpr[12]; | ||
107 | svcpu->gpr[13] = vcpu->arch.gpr[13]; | ||
108 | svcpu->cr = vcpu->arch.cr; | ||
109 | svcpu->xer = vcpu->arch.xer; | ||
110 | svcpu->ctr = vcpu->arch.ctr; | ||
111 | svcpu->lr = vcpu->arch.lr; | ||
112 | svcpu->pc = vcpu->arch.pc; | ||
113 | } | ||
114 | |||
115 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | ||
116 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | ||
117 | struct kvmppc_book3s_shadow_vcpu *svcpu) | ||
118 | { | ||
119 | vcpu->arch.gpr[0] = svcpu->gpr[0]; | ||
120 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | ||
121 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | ||
122 | vcpu->arch.gpr[3] = svcpu->gpr[3]; | ||
123 | vcpu->arch.gpr[4] = svcpu->gpr[4]; | ||
124 | vcpu->arch.gpr[5] = svcpu->gpr[5]; | ||
125 | vcpu->arch.gpr[6] = svcpu->gpr[6]; | ||
126 | vcpu->arch.gpr[7] = svcpu->gpr[7]; | ||
127 | vcpu->arch.gpr[8] = svcpu->gpr[8]; | ||
128 | vcpu->arch.gpr[9] = svcpu->gpr[9]; | ||
129 | vcpu->arch.gpr[10] = svcpu->gpr[10]; | ||
130 | vcpu->arch.gpr[11] = svcpu->gpr[11]; | ||
131 | vcpu->arch.gpr[12] = svcpu->gpr[12]; | ||
132 | vcpu->arch.gpr[13] = svcpu->gpr[13]; | ||
133 | vcpu->arch.cr = svcpu->cr; | ||
134 | vcpu->arch.xer = svcpu->xer; | ||
135 | vcpu->arch.ctr = svcpu->ctr; | ||
136 | vcpu->arch.lr = svcpu->lr; | ||
137 | vcpu->arch.pc = svcpu->pc; | ||
138 | vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; | ||
139 | vcpu->arch.fault_dar = svcpu->fault_dar; | ||
140 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | ||
141 | vcpu->arch.last_inst = svcpu->last_inst; | ||
142 | } | ||
143 | |||
144 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) | ||
91 | { | 145 | { |
92 | int r = 1; /* Indicate we want to get back into the guest */ | 146 | int r = 1; /* Indicate we want to get back into the guest */ |
93 | 147 | ||
@@ -100,44 +154,69 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | |||
100 | } | 154 | } |
101 | 155 | ||
102 | /************* MMU Notifiers *************/ | 156 | /************* MMU Notifiers *************/ |
157 | static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start, | ||
158 | unsigned long end) | ||
159 | { | ||
160 | long i; | ||
161 | struct kvm_vcpu *vcpu; | ||
162 | struct kvm_memslots *slots; | ||
163 | struct kvm_memory_slot *memslot; | ||
164 | |||
165 | slots = kvm_memslots(kvm); | ||
166 | kvm_for_each_memslot(memslot, slots) { | ||
167 | unsigned long hva_start, hva_end; | ||
168 | gfn_t gfn, gfn_end; | ||
169 | |||
170 | hva_start = max(start, memslot->userspace_addr); | ||
171 | hva_end = min(end, memslot->userspace_addr + | ||
172 | (memslot->npages << PAGE_SHIFT)); | ||
173 | if (hva_start >= hva_end) | ||
174 | continue; | ||
175 | /* | ||
176 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | ||
177 | * {gfn, gfn+1, ..., gfn_end-1}. | ||
178 | */ | ||
179 | gfn = hva_to_gfn_memslot(hva_start, memslot); | ||
180 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | ||
181 | kvm_for_each_vcpu(i, vcpu, kvm) | ||
182 | kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, | ||
183 | gfn_end << PAGE_SHIFT); | ||
184 | } | ||
185 | } | ||
103 | 186 | ||
104 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 187 | static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva) |
105 | { | 188 | { |
106 | trace_kvm_unmap_hva(hva); | 189 | trace_kvm_unmap_hva(hva); |
107 | 190 | ||
108 | /* | 191 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
109 | * Flush all shadow tlb entries everywhere. This is slow, but | ||
110 | * we are 100% sure that we catch the to be unmapped page | ||
111 | */ | ||
112 | kvm_flush_remote_tlbs(kvm); | ||
113 | 192 | ||
114 | return 0; | 193 | return 0; |
115 | } | 194 | } |
116 | 195 | ||
117 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | 196 | static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start, |
197 | unsigned long end) | ||
118 | { | 198 | { |
119 | /* kvm_unmap_hva flushes everything anyways */ | 199 | do_kvm_unmap_hva(kvm, start, end); |
120 | kvm_unmap_hva(kvm, start); | ||
121 | 200 | ||
122 | return 0; | 201 | return 0; |
123 | } | 202 | } |
124 | 203 | ||
125 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | 204 | static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva) |
126 | { | 205 | { |
127 | /* XXX could be more clever ;) */ | 206 | /* XXX could be more clever ;) */ |
128 | return 0; | 207 | return 0; |
129 | } | 208 | } |
130 | 209 | ||
131 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | 210 | static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva) |
132 | { | 211 | { |
133 | /* XXX could be more clever ;) */ | 212 | /* XXX could be more clever ;) */ |
134 | return 0; | 213 | return 0; |
135 | } | 214 | } |
136 | 215 | ||
137 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | 216 | static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) |
138 | { | 217 | { |
139 | /* The page will get remapped properly on its next fault */ | 218 | /* The page will get remapped properly on its next fault */ |
140 | kvm_unmap_hva(kvm, hva); | 219 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
141 | } | 220 | } |
142 | 221 | ||
143 | /*****************************************/ | 222 | /*****************************************/ |
@@ -159,7 +238,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | |||
159 | vcpu->arch.shadow_msr = smsr; | 238 | vcpu->arch.shadow_msr = smsr; |
160 | } | 239 | } |
161 | 240 | ||
162 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | 241 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) |
163 | { | 242 | { |
164 | ulong old_msr = vcpu->arch.shared->msr; | 243 | ulong old_msr = vcpu->arch.shared->msr; |
165 | 244 | ||
@@ -219,7 +298,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
219 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 298 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
220 | } | 299 | } |
221 | 300 | ||
222 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | 301 | void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |
223 | { | 302 | { |
224 | u32 host_pvr; | 303 | u32 host_pvr; |
225 | 304 | ||
@@ -256,6 +335,23 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |||
256 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) | 335 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) |
257 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); | 336 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); |
258 | 337 | ||
338 | /* | ||
339 | * If they're asking for POWER6 or later, set the flag | ||
340 | * indicating that we can do multiple large page sizes | ||
341 | * and 1TB segments. | ||
342 | * Also set the flag that indicates that tlbie has the large | ||
343 | * page bit in the RB operand instead of the instruction. | ||
344 | */ | ||
345 | switch (PVR_VER(pvr)) { | ||
346 | case PVR_POWER6: | ||
347 | case PVR_POWER7: | ||
348 | case PVR_POWER7p: | ||
349 | case PVR_POWER8: | ||
350 | vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | | ||
351 | BOOK3S_HFLAG_NEW_TLBIE; | ||
352 | break; | ||
353 | } | ||
354 | |||
259 | #ifdef CONFIG_PPC_BOOK3S_32 | 355 | #ifdef CONFIG_PPC_BOOK3S_32 |
260 | /* 32 bit Book3S always has 32 byte dcbz */ | 356 | /* 32 bit Book3S always has 32 byte dcbz */ |
261 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | 357 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; |
@@ -334,6 +430,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
334 | ulong eaddr, int vec) | 430 | ulong eaddr, int vec) |
335 | { | 431 | { |
336 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | 432 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); |
433 | bool iswrite = false; | ||
337 | int r = RESUME_GUEST; | 434 | int r = RESUME_GUEST; |
338 | int relocated; | 435 | int relocated; |
339 | int page_found = 0; | 436 | int page_found = 0; |
@@ -344,10 +441,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
344 | u64 vsid; | 441 | u64 vsid; |
345 | 442 | ||
346 | relocated = data ? dr : ir; | 443 | relocated = data ? dr : ir; |
444 | if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) | ||
445 | iswrite = true; | ||
347 | 446 | ||
348 | /* Resolve real address if translation turned on */ | 447 | /* Resolve real address if translation turned on */ |
349 | if (relocated) { | 448 | if (relocated) { |
350 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); | 449 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); |
351 | } else { | 450 | } else { |
352 | pte.may_execute = true; | 451 | pte.may_execute = true; |
353 | pte.may_read = true; | 452 | pte.may_read = true; |
@@ -355,6 +454,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
355 | pte.raddr = eaddr & KVM_PAM; | 454 | pte.raddr = eaddr & KVM_PAM; |
356 | pte.eaddr = eaddr; | 455 | pte.eaddr = eaddr; |
357 | pte.vpage = eaddr >> 12; | 456 | pte.vpage = eaddr >> 12; |
457 | pte.page_size = MMU_PAGE_64K; | ||
358 | } | 458 | } |
359 | 459 | ||
360 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | 460 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
@@ -388,22 +488,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
388 | 488 | ||
389 | if (page_found == -ENOENT) { | 489 | if (page_found == -ENOENT) { |
390 | /* Page not found in guest PTE entries */ | 490 | /* Page not found in guest PTE entries */ |
391 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
392 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | 491 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
393 | vcpu->arch.shared->dsisr = svcpu->fault_dsisr; | 492 | vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr; |
394 | vcpu->arch.shared->msr |= | 493 | vcpu->arch.shared->msr |= |
395 | (svcpu->shadow_srr1 & 0x00000000f8000000ULL); | 494 | vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; |
396 | svcpu_put(svcpu); | ||
397 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 495 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
398 | } else if (page_found == -EPERM) { | 496 | } else if (page_found == -EPERM) { |
399 | /* Storage protection */ | 497 | /* Storage protection */ |
400 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
401 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | 498 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
402 | vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; | 499 | vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; |
403 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; | 500 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; |
404 | vcpu->arch.shared->msr |= | 501 | vcpu->arch.shared->msr |= |
405 | svcpu->shadow_srr1 & 0x00000000f8000000ULL; | 502 | vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; |
406 | svcpu_put(svcpu); | ||
407 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 503 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
408 | } else if (page_found == -EINVAL) { | 504 | } else if (page_found == -EINVAL) { |
409 | /* Page not found in guest SLB */ | 505 | /* Page not found in guest SLB */ |
@@ -411,12 +507,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
411 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | 507 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
412 | } else if (!is_mmio && | 508 | } else if (!is_mmio && |
413 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | 509 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { |
510 | if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { | ||
511 | /* | ||
512 | * There is already a host HPTE there, presumably | ||
513 | * a read-only one for a page the guest thinks | ||
514 | * is writable, so get rid of it first. | ||
515 | */ | ||
516 | kvmppc_mmu_unmap_page(vcpu, &pte); | ||
517 | } | ||
414 | /* The guest's PTE is not mapped yet. Map on the host */ | 518 | /* The guest's PTE is not mapped yet. Map on the host */ |
415 | kvmppc_mmu_map_page(vcpu, &pte); | 519 | kvmppc_mmu_map_page(vcpu, &pte, iswrite); |
416 | if (data) | 520 | if (data) |
417 | vcpu->stat.sp_storage++; | 521 | vcpu->stat.sp_storage++; |
418 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 522 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
419 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) | 523 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) |
420 | kvmppc_patch_dcbz(vcpu, &pte); | 524 | kvmppc_patch_dcbz(vcpu, &pte); |
421 | } else { | 525 | } else { |
422 | /* MMIO */ | 526 | /* MMIO */ |
@@ -619,13 +723,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |||
619 | 723 | ||
620 | if (lost_ext & MSR_FP) | 724 | if (lost_ext & MSR_FP) |
621 | kvmppc_load_up_fpu(); | 725 | kvmppc_load_up_fpu(); |
726 | #ifdef CONFIG_ALTIVEC | ||
622 | if (lost_ext & MSR_VEC) | 727 | if (lost_ext & MSR_VEC) |
623 | kvmppc_load_up_altivec(); | 728 | kvmppc_load_up_altivec(); |
729 | #endif | ||
624 | current->thread.regs->msr |= lost_ext; | 730 | current->thread.regs->msr |= lost_ext; |
625 | } | 731 | } |
626 | 732 | ||
627 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | 733 | int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, |
628 | unsigned int exit_nr) | 734 | unsigned int exit_nr) |
629 | { | 735 | { |
630 | int r = RESUME_HOST; | 736 | int r = RESUME_HOST; |
631 | int s; | 737 | int s; |
@@ -643,25 +749,32 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
643 | switch (exit_nr) { | 749 | switch (exit_nr) { |
644 | case BOOK3S_INTERRUPT_INST_STORAGE: | 750 | case BOOK3S_INTERRUPT_INST_STORAGE: |
645 | { | 751 | { |
646 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 752 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
647 | ulong shadow_srr1 = svcpu->shadow_srr1; | ||
648 | vcpu->stat.pf_instruc++; | 753 | vcpu->stat.pf_instruc++; |
649 | 754 | ||
650 | #ifdef CONFIG_PPC_BOOK3S_32 | 755 | #ifdef CONFIG_PPC_BOOK3S_32 |
651 | /* We set segments as unused segments when invalidating them. So | 756 | /* We set segments as unused segments when invalidating them. So |
652 | * treat the respective fault as segment fault. */ | 757 | * treat the respective fault as segment fault. */ |
653 | if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { | 758 | { |
654 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | 759 | struct kvmppc_book3s_shadow_vcpu *svcpu; |
655 | r = RESUME_GUEST; | 760 | u32 sr; |
761 | |||
762 | svcpu = svcpu_get(vcpu); | ||
763 | sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; | ||
656 | svcpu_put(svcpu); | 764 | svcpu_put(svcpu); |
657 | break; | 765 | if (sr == SR_INVALID) { |
766 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | ||
767 | r = RESUME_GUEST; | ||
768 | break; | ||
769 | } | ||
658 | } | 770 | } |
659 | #endif | 771 | #endif |
660 | svcpu_put(svcpu); | ||
661 | 772 | ||
662 | /* only care about PTEG not found errors, but leave NX alone */ | 773 | /* only care about PTEG not found errors, but leave NX alone */ |
663 | if (shadow_srr1 & 0x40000000) { | 774 | if (shadow_srr1 & 0x40000000) { |
775 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
664 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); | 776 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
777 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
665 | vcpu->stat.sp_instruc++; | 778 | vcpu->stat.sp_instruc++; |
666 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 779 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
667 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | 780 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { |
@@ -682,25 +795,36 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
682 | case BOOK3S_INTERRUPT_DATA_STORAGE: | 795 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
683 | { | 796 | { |
684 | ulong dar = kvmppc_get_fault_dar(vcpu); | 797 | ulong dar = kvmppc_get_fault_dar(vcpu); |
685 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 798 | u32 fault_dsisr = vcpu->arch.fault_dsisr; |
686 | u32 fault_dsisr = svcpu->fault_dsisr; | ||
687 | vcpu->stat.pf_storage++; | 799 | vcpu->stat.pf_storage++; |
688 | 800 | ||
689 | #ifdef CONFIG_PPC_BOOK3S_32 | 801 | #ifdef CONFIG_PPC_BOOK3S_32 |
690 | /* We set segments as unused segments when invalidating them. So | 802 | /* We set segments as unused segments when invalidating them. So |
691 | * treat the respective fault as segment fault. */ | 803 | * treat the respective fault as segment fault. */ |
692 | if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { | 804 | { |
693 | kvmppc_mmu_map_segment(vcpu, dar); | 805 | struct kvmppc_book3s_shadow_vcpu *svcpu; |
694 | r = RESUME_GUEST; | 806 | u32 sr; |
807 | |||
808 | svcpu = svcpu_get(vcpu); | ||
809 | sr = svcpu->sr[dar >> SID_SHIFT]; | ||
695 | svcpu_put(svcpu); | 810 | svcpu_put(svcpu); |
696 | break; | 811 | if (sr == SR_INVALID) { |
812 | kvmppc_mmu_map_segment(vcpu, dar); | ||
813 | r = RESUME_GUEST; | ||
814 | break; | ||
815 | } | ||
697 | } | 816 | } |
698 | #endif | 817 | #endif |
699 | svcpu_put(svcpu); | ||
700 | 818 | ||
701 | /* The only case we need to handle is missing shadow PTEs */ | 819 | /* |
702 | if (fault_dsisr & DSISR_NOHPTE) { | 820 | * We need to handle missing shadow PTEs, and |
821 | * protection faults due to us mapping a page read-only | ||
822 | * when the guest thinks it is writable. | ||
823 | */ | ||
824 | if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { | ||
825 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
703 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); | 826 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
827 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
704 | } else { | 828 | } else { |
705 | vcpu->arch.shared->dar = dar; | 829 | vcpu->arch.shared->dar = dar; |
706 | vcpu->arch.shared->dsisr = fault_dsisr; | 830 | vcpu->arch.shared->dsisr = fault_dsisr; |
@@ -743,13 +867,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
743 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: | 867 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: |
744 | { | 868 | { |
745 | enum emulation_result er; | 869 | enum emulation_result er; |
746 | struct kvmppc_book3s_shadow_vcpu *svcpu; | ||
747 | ulong flags; | 870 | ulong flags; |
748 | 871 | ||
749 | program_interrupt: | 872 | program_interrupt: |
750 | svcpu = svcpu_get(vcpu); | 873 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; |
751 | flags = svcpu->shadow_srr1 & 0x1f0000ull; | ||
752 | svcpu_put(svcpu); | ||
753 | 874 | ||
754 | if (vcpu->arch.shared->msr & MSR_PR) { | 875 | if (vcpu->arch.shared->msr & MSR_PR) { |
755 | #ifdef EXIT_DEBUG | 876 | #ifdef EXIT_DEBUG |
@@ -798,7 +919,7 @@ program_interrupt: | |||
798 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | 919 | ulong cmd = kvmppc_get_gpr(vcpu, 3); |
799 | int i; | 920 | int i; |
800 | 921 | ||
801 | #ifdef CONFIG_KVM_BOOK3S_64_PR | 922 | #ifdef CONFIG_PPC_BOOK3S_64 |
802 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { | 923 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { |
803 | r = RESUME_GUEST; | 924 | r = RESUME_GUEST; |
804 | break; | 925 | break; |
@@ -881,9 +1002,7 @@ program_interrupt: | |||
881 | break; | 1002 | break; |
882 | default: | 1003 | default: |
883 | { | 1004 | { |
884 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 1005 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
885 | ulong shadow_srr1 = svcpu->shadow_srr1; | ||
886 | svcpu_put(svcpu); | ||
887 | /* Ugh - bork here! What did we get? */ | 1006 | /* Ugh - bork here! What did we get? */ |
888 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | 1007 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", |
889 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); | 1008 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); |
@@ -920,8 +1039,8 @@ program_interrupt: | |||
920 | return r; | 1039 | return r; |
921 | } | 1040 | } |
922 | 1041 | ||
923 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 1042 | static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, |
924 | struct kvm_sregs *sregs) | 1043 | struct kvm_sregs *sregs) |
925 | { | 1044 | { |
926 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 1045 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
927 | int i; | 1046 | int i; |
@@ -947,13 +1066,13 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
947 | return 0; | 1066 | return 0; |
948 | } | 1067 | } |
949 | 1068 | ||
950 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 1069 | static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, |
951 | struct kvm_sregs *sregs) | 1070 | struct kvm_sregs *sregs) |
952 | { | 1071 | { |
953 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 1072 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
954 | int i; | 1073 | int i; |
955 | 1074 | ||
956 | kvmppc_set_pvr(vcpu, sregs->pvr); | 1075 | kvmppc_set_pvr_pr(vcpu, sregs->pvr); |
957 | 1076 | ||
958 | vcpu3s->sdr1 = sregs->u.s.sdr1; | 1077 | vcpu3s->sdr1 = sregs->u.s.sdr1; |
959 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | 1078 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { |
@@ -983,7 +1102,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
983 | return 0; | 1102 | return 0; |
984 | } | 1103 | } |
985 | 1104 | ||
986 | int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | 1105 | static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1106 | union kvmppc_one_reg *val) | ||
987 | { | 1107 | { |
988 | int r = 0; | 1108 | int r = 0; |
989 | 1109 | ||
@@ -1012,7 +1132,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | |||
1012 | return r; | 1132 | return r; |
1013 | } | 1133 | } |
1014 | 1134 | ||
1015 | int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | 1135 | static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1136 | union kvmppc_one_reg *val) | ||
1016 | { | 1137 | { |
1017 | int r = 0; | 1138 | int r = 0; |
1018 | 1139 | ||
@@ -1042,28 +1163,30 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) | |||
1042 | return r; | 1163 | return r; |
1043 | } | 1164 | } |
1044 | 1165 | ||
1045 | int kvmppc_core_check_processor_compat(void) | 1166 | static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, |
1046 | { | 1167 | unsigned int id) |
1047 | return 0; | ||
1048 | } | ||
1049 | |||
1050 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | ||
1051 | { | 1168 | { |
1052 | struct kvmppc_vcpu_book3s *vcpu_book3s; | 1169 | struct kvmppc_vcpu_book3s *vcpu_book3s; |
1053 | struct kvm_vcpu *vcpu; | 1170 | struct kvm_vcpu *vcpu; |
1054 | int err = -ENOMEM; | 1171 | int err = -ENOMEM; |
1055 | unsigned long p; | 1172 | unsigned long p; |
1056 | 1173 | ||
1057 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); | 1174 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
1058 | if (!vcpu_book3s) | 1175 | if (!vcpu) |
1059 | goto out; | 1176 | goto out; |
1060 | 1177 | ||
1061 | vcpu_book3s->shadow_vcpu = | 1178 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); |
1062 | kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); | 1179 | if (!vcpu_book3s) |
1063 | if (!vcpu_book3s->shadow_vcpu) | ||
1064 | goto free_vcpu; | 1180 | goto free_vcpu; |
1181 | vcpu->arch.book3s = vcpu_book3s; | ||
1182 | |||
1183 | #ifdef CONFIG_KVM_BOOK3S_32 | ||
1184 | vcpu->arch.shadow_vcpu = | ||
1185 | kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); | ||
1186 | if (!vcpu->arch.shadow_vcpu) | ||
1187 | goto free_vcpu3s; | ||
1188 | #endif | ||
1065 | 1189 | ||
1066 | vcpu = &vcpu_book3s->vcpu; | ||
1067 | err = kvm_vcpu_init(vcpu, kvm, id); | 1190 | err = kvm_vcpu_init(vcpu, kvm, id); |
1068 | if (err) | 1191 | if (err) |
1069 | goto free_shadow_vcpu; | 1192 | goto free_shadow_vcpu; |
@@ -1076,13 +1199,19 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1076 | vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); | 1199 | vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); |
1077 | 1200 | ||
1078 | #ifdef CONFIG_PPC_BOOK3S_64 | 1201 | #ifdef CONFIG_PPC_BOOK3S_64 |
1079 | /* default to book3s_64 (970fx) */ | 1202 | /* |
1203 | * Default to the same as the host if we're on sufficiently | ||
1204 | * recent machine that we have 1TB segments; | ||
1205 | * otherwise default to PPC970FX. | ||
1206 | */ | ||
1080 | vcpu->arch.pvr = 0x3C0301; | 1207 | vcpu->arch.pvr = 0x3C0301; |
1208 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | ||
1209 | vcpu->arch.pvr = mfspr(SPRN_PVR); | ||
1081 | #else | 1210 | #else |
1082 | /* default to book3s_32 (750) */ | 1211 | /* default to book3s_32 (750) */ |
1083 | vcpu->arch.pvr = 0x84202; | 1212 | vcpu->arch.pvr = 0x84202; |
1084 | #endif | 1213 | #endif |
1085 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | 1214 | kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); |
1086 | vcpu->arch.slb_nr = 64; | 1215 | vcpu->arch.slb_nr = 64; |
1087 | 1216 | ||
1088 | vcpu->arch.shadow_msr = MSR_USER64; | 1217 | vcpu->arch.shadow_msr = MSR_USER64; |
@@ -1096,24 +1225,31 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1096 | uninit_vcpu: | 1225 | uninit_vcpu: |
1097 | kvm_vcpu_uninit(vcpu); | 1226 | kvm_vcpu_uninit(vcpu); |
1098 | free_shadow_vcpu: | 1227 | free_shadow_vcpu: |
1099 | kfree(vcpu_book3s->shadow_vcpu); | 1228 | #ifdef CONFIG_KVM_BOOK3S_32 |
1100 | free_vcpu: | 1229 | kfree(vcpu->arch.shadow_vcpu); |
1230 | free_vcpu3s: | ||
1231 | #endif | ||
1101 | vfree(vcpu_book3s); | 1232 | vfree(vcpu_book3s); |
1233 | free_vcpu: | ||
1234 | kmem_cache_free(kvm_vcpu_cache, vcpu); | ||
1102 | out: | 1235 | out: |
1103 | return ERR_PTR(err); | 1236 | return ERR_PTR(err); |
1104 | } | 1237 | } |
1105 | 1238 | ||
1106 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | 1239 | static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) |
1107 | { | 1240 | { |
1108 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 1241 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
1109 | 1242 | ||
1110 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); | 1243 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); |
1111 | kvm_vcpu_uninit(vcpu); | 1244 | kvm_vcpu_uninit(vcpu); |
1112 | kfree(vcpu_book3s->shadow_vcpu); | 1245 | #ifdef CONFIG_KVM_BOOK3S_32 |
1246 | kfree(vcpu->arch.shadow_vcpu); | ||
1247 | #endif | ||
1113 | vfree(vcpu_book3s); | 1248 | vfree(vcpu_book3s); |
1249 | kmem_cache_free(kvm_vcpu_cache, vcpu); | ||
1114 | } | 1250 | } |
1115 | 1251 | ||
1116 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 1252 | static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
1117 | { | 1253 | { |
1118 | int ret; | 1254 | int ret; |
1119 | struct thread_fp_state fp; | 1255 | struct thread_fp_state fp; |
@@ -1216,8 +1352,8 @@ out: | |||
1216 | /* | 1352 | /* |
1217 | * Get (and clear) the dirty memory log for a memory slot. | 1353 | * Get (and clear) the dirty memory log for a memory slot. |
1218 | */ | 1354 | */ |
1219 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 1355 | static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, |
1220 | struct kvm_dirty_log *log) | 1356 | struct kvm_dirty_log *log) |
1221 | { | 1357 | { |
1222 | struct kvm_memory_slot *memslot; | 1358 | struct kvm_memory_slot *memslot; |
1223 | struct kvm_vcpu *vcpu; | 1359 | struct kvm_vcpu *vcpu; |
@@ -1252,67 +1388,100 @@ out: | |||
1252 | return r; | 1388 | return r; |
1253 | } | 1389 | } |
1254 | 1390 | ||
1255 | #ifdef CONFIG_PPC64 | 1391 | static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, |
1256 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | 1392 | struct kvm_memory_slot *memslot) |
1257 | { | 1393 | { |
1258 | info->flags = KVM_PPC_1T_SEGMENTS; | 1394 | return; |
1259 | 1395 | } | |
1260 | /* SLB is always 64 entries */ | ||
1261 | info->slb_size = 64; | ||
1262 | |||
1263 | /* Standard 4k base page size segment */ | ||
1264 | info->sps[0].page_shift = 12; | ||
1265 | info->sps[0].slb_enc = 0; | ||
1266 | info->sps[0].enc[0].page_shift = 12; | ||
1267 | info->sps[0].enc[0].pte_enc = 0; | ||
1268 | |||
1269 | /* Standard 16M large page size segment */ | ||
1270 | info->sps[1].page_shift = 24; | ||
1271 | info->sps[1].slb_enc = SLB_VSID_L; | ||
1272 | info->sps[1].enc[0].page_shift = 24; | ||
1273 | info->sps[1].enc[0].pte_enc = 0; | ||
1274 | 1396 | ||
1397 | static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, | ||
1398 | struct kvm_memory_slot *memslot, | ||
1399 | struct kvm_userspace_memory_region *mem) | ||
1400 | { | ||
1275 | return 0; | 1401 | return 0; |
1276 | } | 1402 | } |
1277 | #endif /* CONFIG_PPC64 */ | ||
1278 | 1403 | ||
1279 | void kvmppc_core_free_memslot(struct kvm_memory_slot *free, | 1404 | static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, |
1280 | struct kvm_memory_slot *dont) | 1405 | struct kvm_userspace_memory_region *mem, |
1406 | const struct kvm_memory_slot *old) | ||
1281 | { | 1407 | { |
1408 | return; | ||
1282 | } | 1409 | } |
1283 | 1410 | ||
1284 | int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, | 1411 | static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free, |
1285 | unsigned long npages) | 1412 | struct kvm_memory_slot *dont) |
1286 | { | 1413 | { |
1287 | return 0; | 1414 | return; |
1288 | } | 1415 | } |
1289 | 1416 | ||
1290 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 1417 | static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot, |
1291 | struct kvm_memory_slot *memslot, | 1418 | unsigned long npages) |
1292 | struct kvm_userspace_memory_region *mem) | ||
1293 | { | 1419 | { |
1294 | return 0; | 1420 | return 0; |
1295 | } | 1421 | } |
1296 | 1422 | ||
1297 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | 1423 | |
1298 | struct kvm_userspace_memory_region *mem, | 1424 | #ifdef CONFIG_PPC64 |
1299 | const struct kvm_memory_slot *old) | 1425 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, |
1426 | struct kvm_ppc_smmu_info *info) | ||
1300 | { | 1427 | { |
1301 | } | 1428 | long int i; |
1429 | struct kvm_vcpu *vcpu; | ||
1430 | |||
1431 | info->flags = 0; | ||
1432 | |||
1433 | /* SLB is always 64 entries */ | ||
1434 | info->slb_size = 64; | ||
1435 | |||
1436 | /* Standard 4k base page size segment */ | ||
1437 | info->sps[0].page_shift = 12; | ||
1438 | info->sps[0].slb_enc = 0; | ||
1439 | info->sps[0].enc[0].page_shift = 12; | ||
1440 | info->sps[0].enc[0].pte_enc = 0; | ||
1441 | |||
1442 | /* | ||
1443 | * 64k large page size. | ||
1444 | * We only want to put this in if the CPUs we're emulating | ||
1445 | * support it, but unfortunately we don't have a vcpu easily | ||
1446 | * to hand here to test. Just pick the first vcpu, and if | ||
1447 | * that doesn't exist yet, report the minimum capability, | ||
1448 | * i.e., no 64k pages. | ||
1449 | * 1T segment support goes along with 64k pages. | ||
1450 | */ | ||
1451 | i = 1; | ||
1452 | vcpu = kvm_get_vcpu(kvm, 0); | ||
1453 | if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { | ||
1454 | info->flags = KVM_PPC_1T_SEGMENTS; | ||
1455 | info->sps[i].page_shift = 16; | ||
1456 | info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01; | ||
1457 | info->sps[i].enc[0].page_shift = 16; | ||
1458 | info->sps[i].enc[0].pte_enc = 1; | ||
1459 | ++i; | ||
1460 | } | ||
1461 | |||
1462 | /* Standard 16M large page size segment */ | ||
1463 | info->sps[i].page_shift = 24; | ||
1464 | info->sps[i].slb_enc = SLB_VSID_L; | ||
1465 | info->sps[i].enc[0].page_shift = 24; | ||
1466 | info->sps[i].enc[0].pte_enc = 0; | ||
1302 | 1467 | ||
1303 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) | 1468 | return 0; |
1469 | } | ||
1470 | #else | ||
1471 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, | ||
1472 | struct kvm_ppc_smmu_info *info) | ||
1304 | { | 1473 | { |
1474 | /* We should not get called */ | ||
1475 | BUG(); | ||
1305 | } | 1476 | } |
1477 | #endif /* CONFIG_PPC64 */ | ||
1306 | 1478 | ||
1307 | static unsigned int kvm_global_user_count = 0; | 1479 | static unsigned int kvm_global_user_count = 0; |
1308 | static DEFINE_SPINLOCK(kvm_global_user_count_lock); | 1480 | static DEFINE_SPINLOCK(kvm_global_user_count_lock); |
1309 | 1481 | ||
1310 | int kvmppc_core_init_vm(struct kvm *kvm) | 1482 | static int kvmppc_core_init_vm_pr(struct kvm *kvm) |
1311 | { | 1483 | { |
1312 | #ifdef CONFIG_PPC64 | 1484 | mutex_init(&kvm->arch.hpt_mutex); |
1313 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); | ||
1314 | INIT_LIST_HEAD(&kvm->arch.rtas_tokens); | ||
1315 | #endif | ||
1316 | 1485 | ||
1317 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { | 1486 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { |
1318 | spin_lock(&kvm_global_user_count_lock); | 1487 | spin_lock(&kvm_global_user_count_lock); |
@@ -1323,7 +1492,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) | |||
1323 | return 0; | 1492 | return 0; |
1324 | } | 1493 | } |
1325 | 1494 | ||
1326 | void kvmppc_core_destroy_vm(struct kvm *kvm) | 1495 | static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) |
1327 | { | 1496 | { |
1328 | #ifdef CONFIG_PPC64 | 1497 | #ifdef CONFIG_PPC64 |
1329 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | 1498 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); |
@@ -1338,26 +1507,81 @@ void kvmppc_core_destroy_vm(struct kvm *kvm) | |||
1338 | } | 1507 | } |
1339 | } | 1508 | } |
1340 | 1509 | ||
1341 | static int kvmppc_book3s_init(void) | 1510 | static int kvmppc_core_check_processor_compat_pr(void) |
1342 | { | 1511 | { |
1343 | int r; | 1512 | /* we are always compatible */ |
1513 | return 0; | ||
1514 | } | ||
1344 | 1515 | ||
1345 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, | 1516 | static long kvm_arch_vm_ioctl_pr(struct file *filp, |
1346 | THIS_MODULE); | 1517 | unsigned int ioctl, unsigned long arg) |
1518 | { | ||
1519 | return -ENOTTY; | ||
1520 | } | ||
1347 | 1521 | ||
1348 | if (r) | 1522 | static struct kvmppc_ops kvm_ops_pr = { |
1523 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, | ||
1524 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, | ||
1525 | .get_one_reg = kvmppc_get_one_reg_pr, | ||
1526 | .set_one_reg = kvmppc_set_one_reg_pr, | ||
1527 | .vcpu_load = kvmppc_core_vcpu_load_pr, | ||
1528 | .vcpu_put = kvmppc_core_vcpu_put_pr, | ||
1529 | .set_msr = kvmppc_set_msr_pr, | ||
1530 | .vcpu_run = kvmppc_vcpu_run_pr, | ||
1531 | .vcpu_create = kvmppc_core_vcpu_create_pr, | ||
1532 | .vcpu_free = kvmppc_core_vcpu_free_pr, | ||
1533 | .check_requests = kvmppc_core_check_requests_pr, | ||
1534 | .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr, | ||
1535 | .flush_memslot = kvmppc_core_flush_memslot_pr, | ||
1536 | .prepare_memory_region = kvmppc_core_prepare_memory_region_pr, | ||
1537 | .commit_memory_region = kvmppc_core_commit_memory_region_pr, | ||
1538 | .unmap_hva = kvm_unmap_hva_pr, | ||
1539 | .unmap_hva_range = kvm_unmap_hva_range_pr, | ||
1540 | .age_hva = kvm_age_hva_pr, | ||
1541 | .test_age_hva = kvm_test_age_hva_pr, | ||
1542 | .set_spte_hva = kvm_set_spte_hva_pr, | ||
1543 | .mmu_destroy = kvmppc_mmu_destroy_pr, | ||
1544 | .free_memslot = kvmppc_core_free_memslot_pr, | ||
1545 | .create_memslot = kvmppc_core_create_memslot_pr, | ||
1546 | .init_vm = kvmppc_core_init_vm_pr, | ||
1547 | .destroy_vm = kvmppc_core_destroy_vm_pr, | ||
1548 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, | ||
1549 | .emulate_op = kvmppc_core_emulate_op_pr, | ||
1550 | .emulate_mtspr = kvmppc_core_emulate_mtspr_pr, | ||
1551 | .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, | ||
1552 | .fast_vcpu_kick = kvm_vcpu_kick, | ||
1553 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, | ||
1554 | }; | ||
1555 | |||
1556 | |||
1557 | int kvmppc_book3s_init_pr(void) | ||
1558 | { | ||
1559 | int r; | ||
1560 | |||
1561 | r = kvmppc_core_check_processor_compat_pr(); | ||
1562 | if (r < 0) | ||
1349 | return r; | 1563 | return r; |
1350 | 1564 | ||
1351 | r = kvmppc_mmu_hpte_sysinit(); | 1565 | kvm_ops_pr.owner = THIS_MODULE; |
1566 | kvmppc_pr_ops = &kvm_ops_pr; | ||
1352 | 1567 | ||
1568 | r = kvmppc_mmu_hpte_sysinit(); | ||
1353 | return r; | 1569 | return r; |
1354 | } | 1570 | } |
1355 | 1571 | ||
1356 | static void kvmppc_book3s_exit(void) | 1572 | void kvmppc_book3s_exit_pr(void) |
1357 | { | 1573 | { |
1574 | kvmppc_pr_ops = NULL; | ||
1358 | kvmppc_mmu_hpte_sysexit(); | 1575 | kvmppc_mmu_hpte_sysexit(); |
1359 | kvm_exit(); | ||
1360 | } | 1576 | } |
1361 | 1577 | ||
1362 | module_init(kvmppc_book3s_init); | 1578 | /* |
1363 | module_exit(kvmppc_book3s_exit); | 1579 | * We only support separate modules for book3s 64 |
1580 | */ | ||
1581 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
1582 | |||
1583 | module_init(kvmppc_book3s_init_pr); | ||
1584 | module_exit(kvmppc_book3s_exit_pr); | ||
1585 | |||
1586 | MODULE_LICENSE("GPL"); | ||
1587 | #endif | ||