diff options
author | Alexander Graf <agraf@suse.de> | 2013-01-11 09:22:45 -0500 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2013-01-24 13:23:31 -0500 |
commit | b71c9e2fb72cf538aadbc59ea719639a1e2191fa (patch) | |
tree | 5dc0827d2fe0ae15172f04f299d57fe564d5701e | |
parent | 9d98b3ff949dab3bafa2c50856ce9e1f88497f9a (diff) |
KVM: PPC: E500: Split host and guest MMU parts
This patch splits the file e500_tlb.c into e500_mmu.c (guest TLB handling)
and e500_mmu_host.c (host TLB handling).
The main benefit of this split is readability and maintainability. It's
just a lot harder to write dirty code :).
Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r-- | arch/powerpc/kvm/Makefile | 9 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_mmu.c (renamed from arch/powerpc/kvm/e500_tlb.c) | 627 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_mmu_host.c | 672 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_mmu_host.h | 20 |
4 files changed, 704 insertions, 624 deletions
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 1e473d46322c..b772eded8c26 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -10,7 +10,8 @@ common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o \ | |||
10 | eventfd.o) | 10 | eventfd.o) |
11 | 11 | ||
12 | CFLAGS_44x_tlb.o := -I. | 12 | CFLAGS_44x_tlb.o := -I. |
13 | CFLAGS_e500_tlb.o := -I. | 13 | CFLAGS_e500_mmu.o := -I. |
14 | CFLAGS_e500_mmu_host.o := -I. | ||
14 | CFLAGS_emulate.o := -I. | 15 | CFLAGS_emulate.o := -I. |
15 | 16 | ||
16 | common-objs-y += powerpc.o emulate.o | 17 | common-objs-y += powerpc.o emulate.o |
@@ -35,7 +36,8 @@ kvm-e500-objs := \ | |||
35 | booke_emulate.o \ | 36 | booke_emulate.o \ |
36 | booke_interrupts.o \ | 37 | booke_interrupts.o \ |
37 | e500.o \ | 38 | e500.o \ |
38 | e500_tlb.o \ | 39 | e500_mmu.o \ |
40 | e500_mmu_host.o \ | ||
39 | e500_emulate.o | 41 | e500_emulate.o |
40 | kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs) | 42 | kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs) |
41 | 43 | ||
@@ -45,7 +47,8 @@ kvm-e500mc-objs := \ | |||
45 | booke_emulate.o \ | 47 | booke_emulate.o \ |
46 | bookehv_interrupts.o \ | 48 | bookehv_interrupts.o \ |
47 | e500mc.o \ | 49 | e500mc.o \ |
48 | e500_tlb.o \ | 50 | e500_mmu.o \ |
51 | e500_mmu_host.o \ | ||
49 | e500_emulate.o | 52 | e500_emulate.o |
50 | kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) | 53 | kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) |
51 | 54 | ||
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_mmu.c index 48d1a4f1f5ff..c3d1721aa1b8 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_mmu.c | |||
@@ -1,10 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. | 2 | * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * Author: Yu Liu, yu.liu@freescale.com | 4 | * Author: Yu Liu, yu.liu@freescale.com |
5 | * Scott Wood, scottwood@freescale.com | 5 | * Scott Wood, scottwood@freescale.com |
6 | * Ashish Kalra, ashish.kalra@freescale.com | 6 | * Ashish Kalra, ashish.kalra@freescale.com |
7 | * Varun Sethi, varun.sethi@freescale.com | 7 | * Varun Sethi, varun.sethi@freescale.com |
8 | * Alexander Graf, agraf@suse.de | ||
8 | * | 9 | * |
9 | * Description: | 10 | * Description: |
10 | * This file is based on arch/powerpc/kvm/44x_tlb.c, | 11 | * This file is based on arch/powerpc/kvm/44x_tlb.c, |
@@ -33,10 +34,7 @@ | |||
33 | #include "e500.h" | 34 | #include "e500.h" |
34 | #include "trace.h" | 35 | #include "trace.h" |
35 | #include "timing.h" | 36 | #include "timing.h" |
36 | 37 | #include "e500_mmu_host.h" | |
37 | #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) | ||
38 | |||
39 | static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; | ||
40 | 38 | ||
41 | static inline unsigned int gtlb0_get_next_victim( | 39 | static inline unsigned int gtlb0_get_next_victim( |
42 | struct kvmppc_vcpu_e500 *vcpu_e500) | 40 | struct kvmppc_vcpu_e500 *vcpu_e500) |
@@ -50,197 +48,6 @@ static inline unsigned int gtlb0_get_next_victim( | |||
50 | return victim; | 48 | return victim; |
51 | } | 49 | } |
52 | 50 | ||
53 | static inline unsigned int tlb1_max_shadow_size(void) | ||
54 | { | ||
55 | /* reserve one entry for magic page */ | ||
56 | return host_tlb_params[1].entries - tlbcam_index - 1; | ||
57 | } | ||
58 | |||
59 | static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) | ||
60 | { | ||
61 | return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); | ||
62 | } | ||
63 | |||
64 | static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) | ||
65 | { | ||
66 | /* Mask off reserved bits. */ | ||
67 | mas3 &= MAS3_ATTRIB_MASK; | ||
68 | |||
69 | #ifndef CONFIG_KVM_BOOKE_HV | ||
70 | if (!usermode) { | ||
71 | /* Guest is in supervisor mode, | ||
72 | * so we need to translate guest | ||
73 | * supervisor permissions into user permissions. */ | ||
74 | mas3 &= ~E500_TLB_USER_PERM_MASK; | ||
75 | mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; | ||
76 | } | ||
77 | mas3 |= E500_TLB_SUPER_PERM_MASK; | ||
78 | #endif | ||
79 | return mas3; | ||
80 | } | ||
81 | |||
82 | static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) | ||
83 | { | ||
84 | #ifdef CONFIG_SMP | ||
85 | return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; | ||
86 | #else | ||
87 | return mas2 & MAS2_ATTRIB_MASK; | ||
88 | #endif | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * writing shadow tlb entry to host TLB | ||
93 | */ | ||
94 | static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, | ||
95 | uint32_t mas0) | ||
96 | { | ||
97 | unsigned long flags; | ||
98 | |||
99 | local_irq_save(flags); | ||
100 | mtspr(SPRN_MAS0, mas0); | ||
101 | mtspr(SPRN_MAS1, stlbe->mas1); | ||
102 | mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); | ||
103 | mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); | ||
104 | mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); | ||
105 | #ifdef CONFIG_KVM_BOOKE_HV | ||
106 | mtspr(SPRN_MAS8, stlbe->mas8); | ||
107 | #endif | ||
108 | asm volatile("isync; tlbwe" : : : "memory"); | ||
109 | |||
110 | #ifdef CONFIG_KVM_BOOKE_HV | ||
111 | /* Must clear mas8 for other host tlbwe's */ | ||
112 | mtspr(SPRN_MAS8, 0); | ||
113 | isync(); | ||
114 | #endif | ||
115 | local_irq_restore(flags); | ||
116 | |||
117 | trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, | ||
118 | stlbe->mas2, stlbe->mas7_3); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Acquire a mas0 with victim hint, as if we just took a TLB miss. | ||
123 | * | ||
124 | * We don't care about the address we're searching for, other than that it's | ||
125 | * in the right set and is not present in the TLB. Using a zero PID and a | ||
126 | * userspace address means we don't have to set and then restore MAS5, or | ||
127 | * calculate a proper MAS6 value. | ||
128 | */ | ||
129 | static u32 get_host_mas0(unsigned long eaddr) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | u32 mas0; | ||
133 | |||
134 | local_irq_save(flags); | ||
135 | mtspr(SPRN_MAS6, 0); | ||
136 | asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); | ||
137 | mas0 = mfspr(SPRN_MAS0); | ||
138 | local_irq_restore(flags); | ||
139 | |||
140 | return mas0; | ||
141 | } | ||
142 | |||
143 | /* sesel is for tlb1 only */ | ||
144 | static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
145 | int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) | ||
146 | { | ||
147 | u32 mas0; | ||
148 | |||
149 | if (tlbsel == 0) { | ||
150 | mas0 = get_host_mas0(stlbe->mas2); | ||
151 | __write_host_tlbe(stlbe, mas0); | ||
152 | } else { | ||
153 | __write_host_tlbe(stlbe, | ||
154 | MAS0_TLBSEL(1) | | ||
155 | MAS0_ESEL(to_htlb1_esel(sesel))); | ||
156 | } | ||
157 | } | ||
158 | |||
159 | /* sesel is for tlb1 only */ | ||
160 | static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
161 | struct kvm_book3e_206_tlb_entry *gtlbe, | ||
162 | struct kvm_book3e_206_tlb_entry *stlbe, | ||
163 | int stlbsel, int sesel) | ||
164 | { | ||
165 | int stid; | ||
166 | |||
167 | preempt_disable(); | ||
168 | stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); | ||
169 | |||
170 | stlbe->mas1 |= MAS1_TID(stid); | ||
171 | write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); | ||
172 | preempt_enable(); | ||
173 | } | ||
174 | |||
175 | #ifdef CONFIG_KVM_E500V2 | ||
176 | void kvmppc_map_magic(struct kvm_vcpu *vcpu) | ||
177 | { | ||
178 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
179 | struct kvm_book3e_206_tlb_entry magic; | ||
180 | ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; | ||
181 | unsigned int stid; | ||
182 | pfn_t pfn; | ||
183 | |||
184 | pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; | ||
185 | get_page(pfn_to_page(pfn)); | ||
186 | |||
187 | preempt_disable(); | ||
188 | stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); | ||
189 | |||
190 | magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | | ||
191 | MAS1_TSIZE(BOOK3E_PAGESZ_4K); | ||
192 | magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; | ||
193 | magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | | ||
194 | MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; | ||
195 | magic.mas8 = 0; | ||
196 | |||
197 | __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); | ||
198 | preempt_enable(); | ||
199 | } | ||
200 | #endif | ||
201 | |||
202 | static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
203 | int tlbsel, int esel) | ||
204 | { | ||
205 | struct kvm_book3e_206_tlb_entry *gtlbe = | ||
206 | get_entry(vcpu_e500, tlbsel, esel); | ||
207 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; | ||
208 | |||
209 | /* Don't bother with unmapped entries */ | ||
210 | if (!(ref->flags & E500_TLB_VALID)) | ||
211 | return; | ||
212 | |||
213 | if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { | ||
214 | u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; | ||
215 | int hw_tlb_indx; | ||
216 | unsigned long flags; | ||
217 | |||
218 | local_irq_save(flags); | ||
219 | while (tmp) { | ||
220 | hw_tlb_indx = __ilog2_u64(tmp & -tmp); | ||
221 | mtspr(SPRN_MAS0, | ||
222 | MAS0_TLBSEL(1) | | ||
223 | MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); | ||
224 | mtspr(SPRN_MAS1, 0); | ||
225 | asm volatile("tlbwe"); | ||
226 | vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; | ||
227 | tmp &= tmp - 1; | ||
228 | } | ||
229 | mb(); | ||
230 | vcpu_e500->g2h_tlb1_map[esel] = 0; | ||
231 | ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); | ||
232 | local_irq_restore(flags); | ||
233 | |||
234 | return; | ||
235 | } | ||
236 | |||
237 | /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ | ||
238 | kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); | ||
239 | |||
240 | /* Mark the TLB as not backed by the host anymore */ | ||
241 | ref->flags &= ~E500_TLB_VALID; | ||
242 | } | ||
243 | |||
244 | static int tlb0_set_base(gva_t addr, int sets, int ways) | 51 | static int tlb0_set_base(gva_t addr, int sets, int ways) |
245 | { | 52 | { |
246 | int set_base; | 53 | int set_base; |
@@ -319,70 +126,6 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
319 | return -1; | 126 | return -1; |
320 | } | 127 | } |
321 | 128 | ||
322 | static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, | ||
323 | struct kvm_book3e_206_tlb_entry *gtlbe, | ||
324 | pfn_t pfn) | ||
325 | { | ||
326 | ref->pfn = pfn; | ||
327 | ref->flags = E500_TLB_VALID; | ||
328 | |||
329 | if (tlbe_is_writable(gtlbe)) | ||
330 | kvm_set_pfn_dirty(pfn); | ||
331 | } | ||
332 | |||
333 | static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) | ||
334 | { | ||
335 | if (ref->flags & E500_TLB_VALID) { | ||
336 | trace_kvm_booke206_ref_release(ref->pfn, ref->flags); | ||
337 | ref->flags = 0; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
342 | { | ||
343 | if (vcpu_e500->g2h_tlb1_map) | ||
344 | memset(vcpu_e500->g2h_tlb1_map, 0, | ||
345 | sizeof(u64) * vcpu_e500->gtlb_params[1].entries); | ||
346 | if (vcpu_e500->h2g_tlb1_rmap) | ||
347 | memset(vcpu_e500->h2g_tlb1_rmap, 0, | ||
348 | sizeof(unsigned int) * host_tlb_params[1].entries); | ||
349 | } | ||
350 | |||
351 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
352 | { | ||
353 | int tlbsel = 0; | ||
354 | int i; | ||
355 | |||
356 | for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { | ||
357 | struct tlbe_ref *ref = | ||
358 | &vcpu_e500->gtlb_priv[tlbsel][i].ref; | ||
359 | kvmppc_e500_ref_release(ref); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
364 | { | ||
365 | int stlbsel = 1; | ||
366 | int i; | ||
367 | |||
368 | kvmppc_e500_tlbil_all(vcpu_e500); | ||
369 | |||
370 | for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { | ||
371 | struct tlbe_ref *ref = | ||
372 | &vcpu_e500->tlb_refs[stlbsel][i]; | ||
373 | kvmppc_e500_ref_release(ref); | ||
374 | } | ||
375 | |||
376 | clear_tlb_privs(vcpu_e500); | ||
377 | } | ||
378 | |||
379 | void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) | ||
380 | { | ||
381 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
382 | clear_tlb_refs(vcpu_e500); | ||
383 | clear_tlb1_bitmap(vcpu_e500); | ||
384 | } | ||
385 | |||
386 | static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, | 129 | static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, |
387 | unsigned int eaddr, int as) | 130 | unsigned int eaddr, int as) |
388 | { | 131 | { |
@@ -408,234 +151,6 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, | |||
408 | | (as ? MAS6_SAS : 0); | 151 | | (as ? MAS6_SAS : 0); |
409 | } | 152 | } |
410 | 153 | ||
411 | /* TID must be supplied by the caller */ | ||
412 | static inline void kvmppc_e500_setup_stlbe( | ||
413 | struct kvm_vcpu *vcpu, | ||
414 | struct kvm_book3e_206_tlb_entry *gtlbe, | ||
415 | int tsize, struct tlbe_ref *ref, u64 gvaddr, | ||
416 | struct kvm_book3e_206_tlb_entry *stlbe) | ||
417 | { | ||
418 | pfn_t pfn = ref->pfn; | ||
419 | u32 pr = vcpu->arch.shared->msr & MSR_PR; | ||
420 | |||
421 | BUG_ON(!(ref->flags & E500_TLB_VALID)); | ||
422 | |||
423 | /* Force IPROT=0 for all guest mappings. */ | ||
424 | stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; | ||
425 | stlbe->mas2 = (gvaddr & MAS2_EPN) | | ||
426 | e500_shadow_mas2_attrib(gtlbe->mas2, pr); | ||
427 | stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | | ||
428 | e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); | ||
429 | |||
430 | #ifdef CONFIG_KVM_BOOKE_HV | ||
431 | stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid; | ||
432 | #endif | ||
433 | } | ||
434 | |||
435 | static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
436 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, | ||
437 | int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, | ||
438 | struct tlbe_ref *ref) | ||
439 | { | ||
440 | struct kvm_memory_slot *slot; | ||
441 | unsigned long pfn = 0; /* silence GCC warning */ | ||
442 | unsigned long hva; | ||
443 | int pfnmap = 0; | ||
444 | int tsize = BOOK3E_PAGESZ_4K; | ||
445 | |||
446 | /* | ||
447 | * Translate guest physical to true physical, acquiring | ||
448 | * a page reference if it is normal, non-reserved memory. | ||
449 | * | ||
450 | * gfn_to_memslot() must succeed because otherwise we wouldn't | ||
451 | * have gotten this far. Eventually we should just pass the slot | ||
452 | * pointer through from the first lookup. | ||
453 | */ | ||
454 | slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); | ||
455 | hva = gfn_to_hva_memslot(slot, gfn); | ||
456 | |||
457 | if (tlbsel == 1) { | ||
458 | struct vm_area_struct *vma; | ||
459 | down_read(¤t->mm->mmap_sem); | ||
460 | |||
461 | vma = find_vma(current->mm, hva); | ||
462 | if (vma && hva >= vma->vm_start && | ||
463 | (vma->vm_flags & VM_PFNMAP)) { | ||
464 | /* | ||
465 | * This VMA is a physically contiguous region (e.g. | ||
466 | * /dev/mem) that bypasses normal Linux page | ||
467 | * management. Find the overlap between the | ||
468 | * vma and the memslot. | ||
469 | */ | ||
470 | |||
471 | unsigned long start, end; | ||
472 | unsigned long slot_start, slot_end; | ||
473 | |||
474 | pfnmap = 1; | ||
475 | |||
476 | start = vma->vm_pgoff; | ||
477 | end = start + | ||
478 | ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); | ||
479 | |||
480 | pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); | ||
481 | |||
482 | slot_start = pfn - (gfn - slot->base_gfn); | ||
483 | slot_end = slot_start + slot->npages; | ||
484 | |||
485 | if (start < slot_start) | ||
486 | start = slot_start; | ||
487 | if (end > slot_end) | ||
488 | end = slot_end; | ||
489 | |||
490 | tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> | ||
491 | MAS1_TSIZE_SHIFT; | ||
492 | |||
493 | /* | ||
494 | * e500 doesn't implement the lowest tsize bit, | ||
495 | * or 1K pages. | ||
496 | */ | ||
497 | tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); | ||
498 | |||
499 | /* | ||
500 | * Now find the largest tsize (up to what the guest | ||
501 | * requested) that will cover gfn, stay within the | ||
502 | * range, and for which gfn and pfn are mutually | ||
503 | * aligned. | ||
504 | */ | ||
505 | |||
506 | for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { | ||
507 | unsigned long gfn_start, gfn_end, tsize_pages; | ||
508 | tsize_pages = 1 << (tsize - 2); | ||
509 | |||
510 | gfn_start = gfn & ~(tsize_pages - 1); | ||
511 | gfn_end = gfn_start + tsize_pages; | ||
512 | |||
513 | if (gfn_start + pfn - gfn < start) | ||
514 | continue; | ||
515 | if (gfn_end + pfn - gfn > end) | ||
516 | continue; | ||
517 | if ((gfn & (tsize_pages - 1)) != | ||
518 | (pfn & (tsize_pages - 1))) | ||
519 | continue; | ||
520 | |||
521 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); | ||
522 | pfn &= ~(tsize_pages - 1); | ||
523 | break; | ||
524 | } | ||
525 | } else if (vma && hva >= vma->vm_start && | ||
526 | (vma->vm_flags & VM_HUGETLB)) { | ||
527 | unsigned long psize = vma_kernel_pagesize(vma); | ||
528 | |||
529 | tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> | ||
530 | MAS1_TSIZE_SHIFT; | ||
531 | |||
532 | /* | ||
533 | * Take the largest page size that satisfies both host | ||
534 | * and guest mapping | ||
535 | */ | ||
536 | tsize = min(__ilog2(psize) - 10, tsize); | ||
537 | |||
538 | /* | ||
539 | * e500 doesn't implement the lowest tsize bit, | ||
540 | * or 1K pages. | ||
541 | */ | ||
542 | tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); | ||
543 | } | ||
544 | |||
545 | up_read(¤t->mm->mmap_sem); | ||
546 | } | ||
547 | |||
548 | if (likely(!pfnmap)) { | ||
549 | unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); | ||
550 | pfn = gfn_to_pfn_memslot(slot, gfn); | ||
551 | if (is_error_noslot_pfn(pfn)) { | ||
552 | printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", | ||
553 | (long)gfn); | ||
554 | return -EINVAL; | ||
555 | } | ||
556 | |||
557 | /* Align guest and physical address to page map boundaries */ | ||
558 | pfn &= ~(tsize_pages - 1); | ||
559 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); | ||
560 | } | ||
561 | |||
562 | /* Drop old ref and setup new one. */ | ||
563 | kvmppc_e500_ref_release(ref); | ||
564 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); | ||
565 | |||
566 | kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, | ||
567 | ref, gvaddr, stlbe); | ||
568 | |||
569 | /* Clear i-cache for new pages */ | ||
570 | kvmppc_mmu_flush_icache(pfn); | ||
571 | |||
572 | /* Drop refcount on page, so that mmu notifiers can clear it */ | ||
573 | kvm_release_pfn_clean(pfn); | ||
574 | |||
575 | return 0; | ||
576 | } | ||
577 | |||
578 | /* XXX only map the one-one case, for now use TLB0 */ | ||
579 | static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
580 | int esel, | ||
581 | struct kvm_book3e_206_tlb_entry *stlbe) | ||
582 | { | ||
583 | struct kvm_book3e_206_tlb_entry *gtlbe; | ||
584 | struct tlbe_ref *ref; | ||
585 | int stlbsel = 0; | ||
586 | int sesel = 0; | ||
587 | int r; | ||
588 | |||
589 | gtlbe = get_entry(vcpu_e500, 0, esel); | ||
590 | ref = &vcpu_e500->gtlb_priv[0][esel].ref; | ||
591 | |||
592 | r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), | ||
593 | get_tlb_raddr(gtlbe) >> PAGE_SHIFT, | ||
594 | gtlbe, 0, stlbe, ref); | ||
595 | if (r) | ||
596 | return r; | ||
597 | |||
598 | write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); | ||
599 | |||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | /* Caller must ensure that the specified guest TLB entry is safe to insert into | ||
604 | * the shadow TLB. */ | ||
605 | /* XXX for both one-one and one-to-many , for now use TLB1 */ | ||
606 | static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
607 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, | ||
608 | struct kvm_book3e_206_tlb_entry *stlbe, int esel) | ||
609 | { | ||
610 | struct tlbe_ref *ref; | ||
611 | unsigned int sesel; | ||
612 | int r; | ||
613 | int stlbsel = 1; | ||
614 | |||
615 | sesel = vcpu_e500->host_tlb1_nv++; | ||
616 | |||
617 | if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) | ||
618 | vcpu_e500->host_tlb1_nv = 0; | ||
619 | |||
620 | ref = &vcpu_e500->tlb_refs[1][sesel]; | ||
621 | r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, | ||
622 | ref); | ||
623 | if (r) | ||
624 | return r; | ||
625 | |||
626 | vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; | ||
627 | vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; | ||
628 | if (vcpu_e500->h2g_tlb1_rmap[sesel]) { | ||
629 | unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; | ||
630 | vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); | ||
631 | } | ||
632 | vcpu_e500->h2g_tlb1_rmap[sesel] = esel; | ||
633 | |||
634 | write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); | ||
635 | |||
636 | return 0; | ||
637 | } | ||
638 | |||
639 | static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) | 154 | static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) |
640 | { | 155 | { |
641 | int size = vcpu_e500->gtlb_params[1].entries; | 156 | int size = vcpu_e500->gtlb_params[1].entries; |
@@ -1020,85 +535,6 @@ void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | |||
1020 | { | 535 | { |
1021 | } | 536 | } |
1022 | 537 | ||
1023 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | ||
1024 | unsigned int index) | ||
1025 | { | ||
1026 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
1027 | struct tlbe_priv *priv; | ||
1028 | struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; | ||
1029 | int tlbsel = tlbsel_of(index); | ||
1030 | int esel = esel_of(index); | ||
1031 | |||
1032 | gtlbe = get_entry(vcpu_e500, tlbsel, esel); | ||
1033 | |||
1034 | switch (tlbsel) { | ||
1035 | case 0: | ||
1036 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; | ||
1037 | |||
1038 | /* Triggers after clear_tlb_refs or on initial mapping */ | ||
1039 | if (!(priv->ref.flags & E500_TLB_VALID)) { | ||
1040 | kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); | ||
1041 | } else { | ||
1042 | kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, | ||
1043 | &priv->ref, eaddr, &stlbe); | ||
1044 | write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); | ||
1045 | } | ||
1046 | break; | ||
1047 | |||
1048 | case 1: { | ||
1049 | gfn_t gfn = gpaddr >> PAGE_SHIFT; | ||
1050 | kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, | ||
1051 | esel); | ||
1052 | break; | ||
1053 | } | ||
1054 | |||
1055 | default: | ||
1056 | BUG(); | ||
1057 | break; | ||
1058 | } | ||
1059 | } | ||
1060 | |||
1061 | /************* MMU Notifiers *************/ | ||
1062 | |||
1063 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | ||
1064 | { | ||
1065 | trace_kvm_unmap_hva(hva); | ||
1066 | |||
1067 | /* | ||
1068 | * Flush all shadow tlb entries everywhere. This is slow, but | ||
1069 | * we are 100% sure that we catch the to be unmapped page | ||
1070 | */ | ||
1071 | kvm_flush_remote_tlbs(kvm); | ||
1072 | |||
1073 | return 0; | ||
1074 | } | ||
1075 | |||
1076 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | ||
1077 | { | ||
1078 | /* kvm_unmap_hva flushes everything anyways */ | ||
1079 | kvm_unmap_hva(kvm, start); | ||
1080 | |||
1081 | return 0; | ||
1082 | } | ||
1083 | |||
1084 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | ||
1085 | { | ||
1086 | /* XXX could be more clever ;) */ | ||
1087 | return 0; | ||
1088 | } | ||
1089 | |||
1090 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | ||
1091 | { | ||
1092 | /* XXX could be more clever ;) */ | ||
1093 | return 0; | ||
1094 | } | ||
1095 | |||
1096 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | ||
1097 | { | ||
1098 | /* The page will get remapped properly on its next fault */ | ||
1099 | kvm_unmap_hva(kvm, hva); | ||
1100 | } | ||
1101 | |||
1102 | /*****************************************/ | 538 | /*****************************************/ |
1103 | 539 | ||
1104 | static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) | 540 | static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) |
@@ -1309,37 +745,8 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
1309 | int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); | 745 | int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); |
1310 | int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; | 746 | int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; |
1311 | 747 | ||
1312 | host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; | 748 | if (e500_mmu_host_init(vcpu_e500)) |
1313 | host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; | 749 | goto err; |
1314 | |||
1315 | /* | ||
1316 | * This should never happen on real e500 hardware, but is | ||
1317 | * architecturally possible -- e.g. in some weird nested | ||
1318 | * virtualization case. | ||
1319 | */ | ||
1320 | if (host_tlb_params[0].entries == 0 || | ||
1321 | host_tlb_params[1].entries == 0) { | ||
1322 | pr_err("%s: need to know host tlb size\n", __func__); | ||
1323 | return -ENODEV; | ||
1324 | } | ||
1325 | |||
1326 | host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> | ||
1327 | TLBnCFG_ASSOC_SHIFT; | ||
1328 | host_tlb_params[1].ways = host_tlb_params[1].entries; | ||
1329 | |||
1330 | if (!is_power_of_2(host_tlb_params[0].entries) || | ||
1331 | !is_power_of_2(host_tlb_params[0].ways) || | ||
1332 | host_tlb_params[0].entries < host_tlb_params[0].ways || | ||
1333 | host_tlb_params[0].ways == 0) { | ||
1334 | pr_err("%s: bad tlb0 host config: %u entries %u ways\n", | ||
1335 | __func__, host_tlb_params[0].entries, | ||
1336 | host_tlb_params[0].ways); | ||
1337 | return -ENODEV; | ||
1338 | } | ||
1339 | |||
1340 | host_tlb_params[0].sets = | ||
1341 | host_tlb_params[0].entries / host_tlb_params[0].ways; | ||
1342 | host_tlb_params[1].sets = 1; | ||
1343 | 750 | ||
1344 | vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; | 751 | vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; |
1345 | vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; | 752 | vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; |
@@ -1358,18 +765,6 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
1358 | vcpu_e500->gtlb_offset[0] = 0; | 765 | vcpu_e500->gtlb_offset[0] = 0; |
1359 | vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; | 766 | vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; |
1360 | 767 | ||
1361 | vcpu_e500->tlb_refs[0] = | ||
1362 | kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, | ||
1363 | GFP_KERNEL); | ||
1364 | if (!vcpu_e500->tlb_refs[0]) | ||
1365 | goto err; | ||
1366 | |||
1367 | vcpu_e500->tlb_refs[1] = | ||
1368 | kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, | ||
1369 | GFP_KERNEL); | ||
1370 | if (!vcpu_e500->tlb_refs[1]) | ||
1371 | goto err; | ||
1372 | |||
1373 | vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) * | 768 | vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) * |
1374 | vcpu_e500->gtlb_params[0].entries, | 769 | vcpu_e500->gtlb_params[0].entries, |
1375 | GFP_KERNEL); | 770 | GFP_KERNEL); |
@@ -1388,12 +783,6 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
1388 | if (!vcpu_e500->g2h_tlb1_map) | 783 | if (!vcpu_e500->g2h_tlb1_map) |
1389 | goto err; | 784 | goto err; |
1390 | 785 | ||
1391 | vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * | ||
1392 | host_tlb_params[1].entries, | ||
1393 | GFP_KERNEL); | ||
1394 | if (!vcpu_e500->h2g_tlb1_rmap) | ||
1395 | goto err; | ||
1396 | |||
1397 | /* Init TLB configuration register */ | 786 | /* Init TLB configuration register */ |
1398 | vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & | 787 | vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & |
1399 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 788 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); |
@@ -1412,15 +801,11 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
1412 | 801 | ||
1413 | err: | 802 | err: |
1414 | free_gtlb(vcpu_e500); | 803 | free_gtlb(vcpu_e500); |
1415 | kfree(vcpu_e500->tlb_refs[0]); | ||
1416 | kfree(vcpu_e500->tlb_refs[1]); | ||
1417 | return -1; | 804 | return -1; |
1418 | } | 805 | } |
1419 | 806 | ||
1420 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) | 807 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) |
1421 | { | 808 | { |
1422 | free_gtlb(vcpu_e500); | 809 | free_gtlb(vcpu_e500); |
1423 | kfree(vcpu_e500->h2g_tlb1_rmap); | 810 | e500_mmu_host_uninit(vcpu_e500); |
1424 | kfree(vcpu_e500->tlb_refs[0]); | ||
1425 | kfree(vcpu_e500->tlb_refs[1]); | ||
1426 | } | 811 | } |
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c new file mode 100644 index 000000000000..4c32d6510133 --- /dev/null +++ b/arch/powerpc/kvm/e500_mmu_host.c | |||
@@ -0,0 +1,672 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: Yu Liu, yu.liu@freescale.com | ||
5 | * Scott Wood, scottwood@freescale.com | ||
6 | * Ashish Kalra, ashish.kalra@freescale.com | ||
7 | * Varun Sethi, varun.sethi@freescale.com | ||
8 | * Alexander Graf, agraf@suse.de | ||
9 | * | ||
10 | * Description: | ||
11 | * This file is based on arch/powerpc/kvm/44x_tlb.c, | ||
12 | * by Hollis Blanchard <hollisb@us.ibm.com>. | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License, version 2, as | ||
16 | * published by the Free Software Foundation. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/kvm.h> | ||
24 | #include <linux/kvm_host.h> | ||
25 | #include <linux/highmem.h> | ||
26 | #include <linux/log2.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/rwsem.h> | ||
30 | #include <linux/vmalloc.h> | ||
31 | #include <linux/hugetlb.h> | ||
32 | #include <asm/kvm_ppc.h> | ||
33 | |||
34 | #include "e500.h" | ||
35 | #include "trace.h" | ||
36 | #include "timing.h" | ||
37 | #include "e500_mmu_host.h" | ||
38 | |||
39 | #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) | ||
40 | |||
41 | static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; | ||
42 | |||
43 | static inline unsigned int tlb1_max_shadow_size(void) | ||
44 | { | ||
45 | /* reserve one entry for magic page */ | ||
46 | return host_tlb_params[1].entries - tlbcam_index - 1; | ||
47 | } | ||
48 | |||
49 | static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) | ||
50 | { | ||
51 | /* Mask off reserved bits. */ | ||
52 | mas3 &= MAS3_ATTRIB_MASK; | ||
53 | |||
54 | #ifndef CONFIG_KVM_BOOKE_HV | ||
55 | if (!usermode) { | ||
56 | /* Guest is in supervisor mode, | ||
57 | * so we need to translate guest | ||
58 | * supervisor permissions into user permissions. */ | ||
59 | mas3 &= ~E500_TLB_USER_PERM_MASK; | ||
60 | mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; | ||
61 | } | ||
62 | mas3 |= E500_TLB_SUPER_PERM_MASK; | ||
63 | #endif | ||
64 | return mas3; | ||
65 | } | ||
66 | |||
67 | static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) | ||
68 | { | ||
69 | #ifdef CONFIG_SMP | ||
70 | return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; | ||
71 | #else | ||
72 | return mas2 & MAS2_ATTRIB_MASK; | ||
73 | #endif | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * writing shadow tlb entry to host TLB | ||
78 | */ | ||
79 | static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, | ||
80 | uint32_t mas0) | ||
81 | { | ||
82 | unsigned long flags; | ||
83 | |||
84 | local_irq_save(flags); | ||
85 | mtspr(SPRN_MAS0, mas0); | ||
86 | mtspr(SPRN_MAS1, stlbe->mas1); | ||
87 | mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); | ||
88 | mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); | ||
89 | mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); | ||
90 | #ifdef CONFIG_KVM_BOOKE_HV | ||
91 | mtspr(SPRN_MAS8, stlbe->mas8); | ||
92 | #endif | ||
93 | asm volatile("isync; tlbwe" : : : "memory"); | ||
94 | |||
95 | #ifdef CONFIG_KVM_BOOKE_HV | ||
96 | /* Must clear mas8 for other host tlbwe's */ | ||
97 | mtspr(SPRN_MAS8, 0); | ||
98 | isync(); | ||
99 | #endif | ||
100 | local_irq_restore(flags); | ||
101 | |||
102 | trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, | ||
103 | stlbe->mas2, stlbe->mas7_3); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Acquire a mas0 with victim hint, as if we just took a TLB miss. | ||
108 | * | ||
109 | * We don't care about the address we're searching for, other than that it's | ||
110 | * in the right set and is not present in the TLB. Using a zero PID and a | ||
111 | * userspace address means we don't have to set and then restore MAS5, or | ||
112 | * calculate a proper MAS6 value. | ||
113 | */ | ||
114 | static u32 get_host_mas0(unsigned long eaddr) | ||
115 | { | ||
116 | unsigned long flags; | ||
117 | u32 mas0; | ||
118 | |||
119 | local_irq_save(flags); | ||
120 | mtspr(SPRN_MAS6, 0); | ||
121 | asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); | ||
122 | mas0 = mfspr(SPRN_MAS0); | ||
123 | local_irq_restore(flags); | ||
124 | |||
125 | return mas0; | ||
126 | } | ||
127 | |||
128 | /* sesel is for tlb1 only */ | ||
129 | static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
130 | int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) | ||
131 | { | ||
132 | u32 mas0; | ||
133 | |||
134 | if (tlbsel == 0) { | ||
135 | mas0 = get_host_mas0(stlbe->mas2); | ||
136 | __write_host_tlbe(stlbe, mas0); | ||
137 | } else { | ||
138 | __write_host_tlbe(stlbe, | ||
139 | MAS0_TLBSEL(1) | | ||
140 | MAS0_ESEL(to_htlb1_esel(sesel))); | ||
141 | } | ||
142 | } | ||
143 | |||
144 | /* sesel is for tlb1 only */ | ||
145 | static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
146 | struct kvm_book3e_206_tlb_entry *gtlbe, | ||
147 | struct kvm_book3e_206_tlb_entry *stlbe, | ||
148 | int stlbsel, int sesel) | ||
149 | { | ||
150 | int stid; | ||
151 | |||
152 | preempt_disable(); | ||
153 | stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); | ||
154 | |||
155 | stlbe->mas1 |= MAS1_TID(stid); | ||
156 | write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); | ||
157 | preempt_enable(); | ||
158 | } | ||
159 | |||
160 | #ifdef CONFIG_KVM_E500V2 | ||
161 | /* XXX should be a hook in the gva2hpa translation */ | ||
162 | void kvmppc_map_magic(struct kvm_vcpu *vcpu) | ||
163 | { | ||
164 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
165 | struct kvm_book3e_206_tlb_entry magic; | ||
166 | ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; | ||
167 | unsigned int stid; | ||
168 | pfn_t pfn; | ||
169 | |||
170 | pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; | ||
171 | get_page(pfn_to_page(pfn)); | ||
172 | |||
173 | preempt_disable(); | ||
174 | stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); | ||
175 | |||
176 | magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | | ||
177 | MAS1_TSIZE(BOOK3E_PAGESZ_4K); | ||
178 | magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; | ||
179 | magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | | ||
180 | MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; | ||
181 | magic.mas8 = 0; | ||
182 | |||
183 | __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); | ||
184 | preempt_enable(); | ||
185 | } | ||
186 | #endif | ||
187 | |||
188 | void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, | ||
189 | int esel) | ||
190 | { | ||
191 | struct kvm_book3e_206_tlb_entry *gtlbe = | ||
192 | get_entry(vcpu_e500, tlbsel, esel); | ||
193 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; | ||
194 | |||
195 | /* Don't bother with unmapped entries */ | ||
196 | if (!(ref->flags & E500_TLB_VALID)) | ||
197 | return; | ||
198 | |||
199 | if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { | ||
200 | u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; | ||
201 | int hw_tlb_indx; | ||
202 | unsigned long flags; | ||
203 | |||
204 | local_irq_save(flags); | ||
205 | while (tmp) { | ||
206 | hw_tlb_indx = __ilog2_u64(tmp & -tmp); | ||
207 | mtspr(SPRN_MAS0, | ||
208 | MAS0_TLBSEL(1) | | ||
209 | MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); | ||
210 | mtspr(SPRN_MAS1, 0); | ||
211 | asm volatile("tlbwe"); | ||
212 | vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; | ||
213 | tmp &= tmp - 1; | ||
214 | } | ||
215 | mb(); | ||
216 | vcpu_e500->g2h_tlb1_map[esel] = 0; | ||
217 | ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); | ||
218 | local_irq_restore(flags); | ||
219 | |||
220 | return; | ||
221 | } | ||
222 | |||
223 | /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ | ||
224 | kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); | ||
225 | |||
226 | /* Mark the TLB as not backed by the host anymore */ | ||
227 | ref->flags &= ~E500_TLB_VALID; | ||
228 | } | ||
229 | |||
230 | static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) | ||
231 | { | ||
232 | return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); | ||
233 | } | ||
234 | |||
235 | static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, | ||
236 | struct kvm_book3e_206_tlb_entry *gtlbe, | ||
237 | pfn_t pfn) | ||
238 | { | ||
239 | ref->pfn = pfn; | ||
240 | ref->flags = E500_TLB_VALID; | ||
241 | |||
242 | if (tlbe_is_writable(gtlbe)) | ||
243 | kvm_set_pfn_dirty(pfn); | ||
244 | } | ||
245 | |||
246 | static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) | ||
247 | { | ||
248 | if (ref->flags & E500_TLB_VALID) { | ||
249 | trace_kvm_booke206_ref_release(ref->pfn, ref->flags); | ||
250 | ref->flags = 0; | ||
251 | } | ||
252 | } | ||
253 | |||
254 | void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
255 | { | ||
256 | if (vcpu_e500->g2h_tlb1_map) | ||
257 | memset(vcpu_e500->g2h_tlb1_map, 0, | ||
258 | sizeof(u64) * vcpu_e500->gtlb_params[1].entries); | ||
259 | if (vcpu_e500->h2g_tlb1_rmap) | ||
260 | memset(vcpu_e500->h2g_tlb1_rmap, 0, | ||
261 | sizeof(unsigned int) * host_tlb_params[1].entries); | ||
262 | } | ||
263 | |||
264 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
265 | { | ||
266 | int tlbsel = 0; | ||
267 | int i; | ||
268 | |||
269 | for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { | ||
270 | struct tlbe_ref *ref = | ||
271 | &vcpu_e500->gtlb_priv[tlbsel][i].ref; | ||
272 | kvmppc_e500_ref_release(ref); | ||
273 | } | ||
274 | } | ||
275 | |||
276 | void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
277 | { | ||
278 | int stlbsel = 1; | ||
279 | int i; | ||
280 | |||
281 | kvmppc_e500_tlbil_all(vcpu_e500); | ||
282 | |||
283 | for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { | ||
284 | struct tlbe_ref *ref = | ||
285 | &vcpu_e500->tlb_refs[stlbsel][i]; | ||
286 | kvmppc_e500_ref_release(ref); | ||
287 | } | ||
288 | |||
289 | clear_tlb_privs(vcpu_e500); | ||
290 | } | ||
291 | |||
292 | void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) | ||
293 | { | ||
294 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
295 | clear_tlb_refs(vcpu_e500); | ||
296 | clear_tlb1_bitmap(vcpu_e500); | ||
297 | } | ||
298 | |||
299 | /* TID must be supplied by the caller */ | ||
300 | static void kvmppc_e500_setup_stlbe( | ||
301 | struct kvm_vcpu *vcpu, | ||
302 | struct kvm_book3e_206_tlb_entry *gtlbe, | ||
303 | int tsize, struct tlbe_ref *ref, u64 gvaddr, | ||
304 | struct kvm_book3e_206_tlb_entry *stlbe) | ||
305 | { | ||
306 | pfn_t pfn = ref->pfn; | ||
307 | u32 pr = vcpu->arch.shared->msr & MSR_PR; | ||
308 | |||
309 | BUG_ON(!(ref->flags & E500_TLB_VALID)); | ||
310 | |||
311 | /* Force IPROT=0 for all guest mappings. */ | ||
312 | stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; | ||
313 | stlbe->mas2 = (gvaddr & MAS2_EPN) | | ||
314 | e500_shadow_mas2_attrib(gtlbe->mas2, pr); | ||
315 | stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | | ||
316 | e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); | ||
317 | |||
318 | #ifdef CONFIG_KVM_BOOKE_HV | ||
319 | stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid; | ||
320 | #endif | ||
321 | } | ||
322 | |||
323 | static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
324 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, | ||
325 | int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, | ||
326 | struct tlbe_ref *ref) | ||
327 | { | ||
328 | struct kvm_memory_slot *slot; | ||
329 | unsigned long pfn = 0; /* silence GCC warning */ | ||
330 | unsigned long hva; | ||
331 | int pfnmap = 0; | ||
332 | int tsize = BOOK3E_PAGESZ_4K; | ||
333 | |||
334 | /* | ||
335 | * Translate guest physical to true physical, acquiring | ||
336 | * a page reference if it is normal, non-reserved memory. | ||
337 | * | ||
338 | * gfn_to_memslot() must succeed because otherwise we wouldn't | ||
339 | * have gotten this far. Eventually we should just pass the slot | ||
340 | * pointer through from the first lookup. | ||
341 | */ | ||
342 | slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); | ||
343 | hva = gfn_to_hva_memslot(slot, gfn); | ||
344 | |||
345 | if (tlbsel == 1) { | ||
346 | struct vm_area_struct *vma; | ||
347 | down_read(¤t->mm->mmap_sem); | ||
348 | |||
349 | vma = find_vma(current->mm, hva); | ||
350 | if (vma && hva >= vma->vm_start && | ||
351 | (vma->vm_flags & VM_PFNMAP)) { | ||
352 | /* | ||
353 | * This VMA is a physically contiguous region (e.g. | ||
354 | * /dev/mem) that bypasses normal Linux page | ||
355 | * management. Find the overlap between the | ||
356 | * vma and the memslot. | ||
357 | */ | ||
358 | |||
359 | unsigned long start, end; | ||
360 | unsigned long slot_start, slot_end; | ||
361 | |||
362 | pfnmap = 1; | ||
363 | |||
364 | start = vma->vm_pgoff; | ||
365 | end = start + | ||
366 | ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); | ||
367 | |||
368 | pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); | ||
369 | |||
370 | slot_start = pfn - (gfn - slot->base_gfn); | ||
371 | slot_end = slot_start + slot->npages; | ||
372 | |||
373 | if (start < slot_start) | ||
374 | start = slot_start; | ||
375 | if (end > slot_end) | ||
376 | end = slot_end; | ||
377 | |||
378 | tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> | ||
379 | MAS1_TSIZE_SHIFT; | ||
380 | |||
381 | /* | ||
382 | * e500 doesn't implement the lowest tsize bit, | ||
383 | * or 1K pages. | ||
384 | */ | ||
385 | tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); | ||
386 | |||
387 | /* | ||
388 | * Now find the largest tsize (up to what the guest | ||
389 | * requested) that will cover gfn, stay within the | ||
390 | * range, and for which gfn and pfn are mutually | ||
391 | * aligned. | ||
392 | */ | ||
393 | |||
394 | for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { | ||
395 | unsigned long gfn_start, gfn_end, tsize_pages; | ||
396 | tsize_pages = 1 << (tsize - 2); | ||
397 | |||
398 | gfn_start = gfn & ~(tsize_pages - 1); | ||
399 | gfn_end = gfn_start + tsize_pages; | ||
400 | |||
401 | if (gfn_start + pfn - gfn < start) | ||
402 | continue; | ||
403 | if (gfn_end + pfn - gfn > end) | ||
404 | continue; | ||
405 | if ((gfn & (tsize_pages - 1)) != | ||
406 | (pfn & (tsize_pages - 1))) | ||
407 | continue; | ||
408 | |||
409 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); | ||
410 | pfn &= ~(tsize_pages - 1); | ||
411 | break; | ||
412 | } | ||
413 | } else if (vma && hva >= vma->vm_start && | ||
414 | (vma->vm_flags & VM_HUGETLB)) { | ||
415 | unsigned long psize = vma_kernel_pagesize(vma); | ||
416 | |||
417 | tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> | ||
418 | MAS1_TSIZE_SHIFT; | ||
419 | |||
420 | /* | ||
421 | * Take the largest page size that satisfies both host | ||
422 | * and guest mapping | ||
423 | */ | ||
424 | tsize = min(__ilog2(psize) - 10, tsize); | ||
425 | |||
426 | /* | ||
427 | * e500 doesn't implement the lowest tsize bit, | ||
428 | * or 1K pages. | ||
429 | */ | ||
430 | tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); | ||
431 | } | ||
432 | |||
433 | up_read(¤t->mm->mmap_sem); | ||
434 | } | ||
435 | |||
436 | if (likely(!pfnmap)) { | ||
437 | unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); | ||
438 | pfn = gfn_to_pfn_memslot(slot, gfn); | ||
439 | if (is_error_noslot_pfn(pfn)) { | ||
440 | printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", | ||
441 | (long)gfn); | ||
442 | return -EINVAL; | ||
443 | } | ||
444 | |||
445 | /* Align guest and physical address to page map boundaries */ | ||
446 | pfn &= ~(tsize_pages - 1); | ||
447 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); | ||
448 | } | ||
449 | |||
450 | /* Drop old ref and setup new one. */ | ||
451 | kvmppc_e500_ref_release(ref); | ||
452 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); | ||
453 | |||
454 | kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, | ||
455 | ref, gvaddr, stlbe); | ||
456 | |||
457 | /* Clear i-cache for new pages */ | ||
458 | kvmppc_mmu_flush_icache(pfn); | ||
459 | |||
460 | /* Drop refcount on page, so that mmu notifiers can clear it */ | ||
461 | kvm_release_pfn_clean(pfn); | ||
462 | |||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | /* XXX only map the one-one case, for now use TLB0 */ | ||
467 | static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel, | ||
468 | struct kvm_book3e_206_tlb_entry *stlbe) | ||
469 | { | ||
470 | struct kvm_book3e_206_tlb_entry *gtlbe; | ||
471 | struct tlbe_ref *ref; | ||
472 | int stlbsel = 0; | ||
473 | int sesel = 0; | ||
474 | int r; | ||
475 | |||
476 | gtlbe = get_entry(vcpu_e500, 0, esel); | ||
477 | ref = &vcpu_e500->gtlb_priv[0][esel].ref; | ||
478 | |||
479 | r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), | ||
480 | get_tlb_raddr(gtlbe) >> PAGE_SHIFT, | ||
481 | gtlbe, 0, stlbe, ref); | ||
482 | if (r) | ||
483 | return r; | ||
484 | |||
485 | write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); | ||
486 | |||
487 | return 0; | ||
488 | } | ||
489 | |||
490 | /* Caller must ensure that the specified guest TLB entry is safe to insert into | ||
491 | * the shadow TLB. */ | ||
492 | /* XXX for both one-one and one-to-many , for now use TLB1 */ | ||
493 | static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
494 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, | ||
495 | struct kvm_book3e_206_tlb_entry *stlbe, int esel) | ||
496 | { | ||
497 | struct tlbe_ref *ref; | ||
498 | unsigned int sesel; | ||
499 | int r; | ||
500 | int stlbsel = 1; | ||
501 | |||
502 | sesel = vcpu_e500->host_tlb1_nv++; | ||
503 | |||
504 | if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) | ||
505 | vcpu_e500->host_tlb1_nv = 0; | ||
506 | |||
507 | ref = &vcpu_e500->tlb_refs[1][sesel]; | ||
508 | r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, | ||
509 | ref); | ||
510 | if (r) | ||
511 | return r; | ||
512 | |||
513 | vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; | ||
514 | vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; | ||
515 | if (vcpu_e500->h2g_tlb1_rmap[sesel]) { | ||
516 | unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; | ||
517 | vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); | ||
518 | } | ||
519 | vcpu_e500->h2g_tlb1_rmap[sesel] = esel; | ||
520 | |||
521 | write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); | ||
522 | |||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | ||
527 | unsigned int index) | ||
528 | { | ||
529 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
530 | struct tlbe_priv *priv; | ||
531 | struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; | ||
532 | int tlbsel = tlbsel_of(index); | ||
533 | int esel = esel_of(index); | ||
534 | |||
535 | gtlbe = get_entry(vcpu_e500, tlbsel, esel); | ||
536 | |||
537 | switch (tlbsel) { | ||
538 | case 0: | ||
539 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; | ||
540 | |||
541 | /* Triggers after clear_tlb_refs or on initial mapping */ | ||
542 | if (!(priv->ref.flags & E500_TLB_VALID)) { | ||
543 | kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); | ||
544 | } else { | ||
545 | kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, | ||
546 | &priv->ref, eaddr, &stlbe); | ||
547 | write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); | ||
548 | } | ||
549 | break; | ||
550 | |||
551 | case 1: { | ||
552 | gfn_t gfn = gpaddr >> PAGE_SHIFT; | ||
553 | kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, | ||
554 | esel); | ||
555 | break; | ||
556 | } | ||
557 | |||
558 | default: | ||
559 | BUG(); | ||
560 | break; | ||
561 | } | ||
562 | } | ||
563 | |||
564 | /************* MMU Notifiers *************/ | ||
565 | |||
566 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | ||
567 | { | ||
568 | trace_kvm_unmap_hva(hva); | ||
569 | |||
570 | /* | ||
571 | * Flush all shadow tlb entries everywhere. This is slow, but | ||
572 | * we are 100% sure that we catch the to be unmapped page | ||
573 | */ | ||
574 | kvm_flush_remote_tlbs(kvm); | ||
575 | |||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | ||
580 | { | ||
581 | /* kvm_unmap_hva flushes everything anyways */ | ||
582 | kvm_unmap_hva(kvm, start); | ||
583 | |||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | ||
588 | { | ||
589 | /* XXX could be more clever ;) */ | ||
590 | return 0; | ||
591 | } | ||
592 | |||
593 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | ||
594 | { | ||
595 | /* XXX could be more clever ;) */ | ||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | ||
600 | { | ||
601 | /* The page will get remapped properly on its next fault */ | ||
602 | kvm_unmap_hva(kvm, hva); | ||
603 | } | ||
604 | |||
605 | /*****************************************/ | ||
606 | |||
607 | int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
608 | { | ||
609 | host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; | ||
610 | host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; | ||
611 | |||
612 | /* | ||
613 | * This should never happen on real e500 hardware, but is | ||
614 | * architecturally possible -- e.g. in some weird nested | ||
615 | * virtualization case. | ||
616 | */ | ||
617 | if (host_tlb_params[0].entries == 0 || | ||
618 | host_tlb_params[1].entries == 0) { | ||
619 | pr_err("%s: need to know host tlb size\n", __func__); | ||
620 | return -ENODEV; | ||
621 | } | ||
622 | |||
623 | host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> | ||
624 | TLBnCFG_ASSOC_SHIFT; | ||
625 | host_tlb_params[1].ways = host_tlb_params[1].entries; | ||
626 | |||
627 | if (!is_power_of_2(host_tlb_params[0].entries) || | ||
628 | !is_power_of_2(host_tlb_params[0].ways) || | ||
629 | host_tlb_params[0].entries < host_tlb_params[0].ways || | ||
630 | host_tlb_params[0].ways == 0) { | ||
631 | pr_err("%s: bad tlb0 host config: %u entries %u ways\n", | ||
632 | __func__, host_tlb_params[0].entries, | ||
633 | host_tlb_params[0].ways); | ||
634 | return -ENODEV; | ||
635 | } | ||
636 | |||
637 | host_tlb_params[0].sets = | ||
638 | host_tlb_params[0].entries / host_tlb_params[0].ways; | ||
639 | host_tlb_params[1].sets = 1; | ||
640 | |||
641 | vcpu_e500->tlb_refs[0] = | ||
642 | kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, | ||
643 | GFP_KERNEL); | ||
644 | if (!vcpu_e500->tlb_refs[0]) | ||
645 | goto err; | ||
646 | |||
647 | vcpu_e500->tlb_refs[1] = | ||
648 | kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, | ||
649 | GFP_KERNEL); | ||
650 | if (!vcpu_e500->tlb_refs[1]) | ||
651 | goto err; | ||
652 | |||
653 | vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * | ||
654 | host_tlb_params[1].entries, | ||
655 | GFP_KERNEL); | ||
656 | if (!vcpu_e500->h2g_tlb1_rmap) | ||
657 | goto err; | ||
658 | |||
659 | return 0; | ||
660 | |||
661 | err: | ||
662 | kfree(vcpu_e500->tlb_refs[0]); | ||
663 | kfree(vcpu_e500->tlb_refs[1]); | ||
664 | return -EINVAL; | ||
665 | } | ||
666 | |||
667 | void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
668 | { | ||
669 | kfree(vcpu_e500->h2g_tlb1_rmap); | ||
670 | kfree(vcpu_e500->tlb_refs[0]); | ||
671 | kfree(vcpu_e500->tlb_refs[1]); | ||
672 | } | ||
diff --git a/arch/powerpc/kvm/e500_mmu_host.h b/arch/powerpc/kvm/e500_mmu_host.h new file mode 100644 index 000000000000..9e4d4a20e694 --- /dev/null +++ b/arch/powerpc/kvm/e500_mmu_host.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License, version 2, as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef KVM_E500_MMU_HOST_H | ||
10 | #define KVM_E500_MMU_HOST_H | ||
11 | |||
12 | void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, | ||
13 | int esel); | ||
14 | |||
15 | void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500); | ||
16 | void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500); | ||
17 | int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500); | ||
18 | void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); | ||
19 | |||
20 | #endif /* KVM_E500_MMU_HOST_H */ | ||