aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSanjay Lal <sanjayl@kymasys.com>2012-11-21 21:34:05 -0500
committerRalf Baechle <ralf@linux-mips.org>2013-05-07 21:55:35 -0400
commit858dd5d4573353e2593f7ec6080bf09feeabcfc8 (patch)
tree56b69ca06adf9706659b2463cf63c6ec6d46ee78 /arch
parente685c689f3a84e5e24a5867afc5e7b5857efa3e4 (diff)
KVM/MIPS32: MMU/TLB operations for the Guest.
- Note that this file is statically linked with the rest of the host kernel (KSEG0). This is because kernel modules are loaded into mapped space on MIPS and we want to make sure that we don't get any host kernel TLB faults while manipulating TLBs. - Virtual Guest TLBs are implemented as 64 entry array regardless of the number of host TLB entries. - Shadow TLBs map Guest virtual addresses to Host physical addresses. - TLB miss handling details: Guest KSEG0 TLBMISS (0x40000000 – 0x60000000): Transparent to the Guest. Guest KSEG2/3 (0x60000000 – 0x80000000) & Guest UM TLBMISS (0x00000000 – 0x40000000) Lookup in Guest/Virtual TLB If an entry doesn’t match deliver appropriate TLBMISS LD/ST exception to the guest If entry does exist in the Guest TLB and is NOT Valid Deliver TLB invalid exception to the guest If entry does exist in the Guest TLB and is VALID Inject the TLB entry into the Shadow TLB Signed-off-by: Sanjay Lal <sanjayl@kymasys.com> Cc: kvm@vger.kernel.org Cc: linux-mips@linux-mips.org Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/kvm/kvm_tlb.c932
1 files changed, 932 insertions, 0 deletions
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
new file mode 100644
index 000000000000..e3f0d9b8b6c5
--- /dev/null
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -0,0 +1,932 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7* TLB handlers run from KSEG0
8*
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/
12
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/smp.h>
16#include <linux/mm.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/kvm_host.h>
20
21#include <asm/cpu.h>
22#include <asm/bootinfo.h>
23#include <asm/mmu_context.h>
24#include <asm/pgtable.h>
25#include <asm/cacheflush.h>
26
27#undef CONFIG_MIPS_MT
28#include <asm/r4kcache.h>
29#define CONFIG_MIPS_MT
30
31#define KVM_GUEST_PC_TLB 0
32#define KVM_GUEST_SP_TLB 1
33
34#define PRIx64 "llx"
35
36/* Use VZ EntryHi.EHINV to invalidate TLB entries */
37#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
38
39atomic_t kvm_mips_instance;
40EXPORT_SYMBOL(kvm_mips_instance);
41
42/* These function pointers are initialized once the KVM module is loaded */
43pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
44EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
45
46void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
47EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
48
49bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
50EXPORT_SYMBOL(kvm_mips_is_error_pfn);
51
52uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53{
54 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
55}
56
57
58uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
59{
60 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
61}
62
63inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
64{
65 return vcpu->kvm->arch.commpage_tlb;
66}
67
68
69/*
70 * Structure defining an tlb entry data set.
71 */
72
73void kvm_mips_dump_host_tlbs(void)
74{
75 unsigned long old_entryhi;
76 unsigned long old_pagemask;
77 struct kvm_mips_tlb tlb;
78 unsigned long flags;
79 int i;
80
81 local_irq_save(flags);
82
83 old_entryhi = read_c0_entryhi();
84 old_pagemask = read_c0_pagemask();
85
86 printk("HOST TLBs:\n");
87 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
88
89 for (i = 0; i < current_cpu_data.tlbsize; i++) {
90 write_c0_index(i);
91 mtc0_tlbw_hazard();
92
93 tlb_read();
94 tlbw_use_hazard();
95
96 tlb.tlb_hi = read_c0_entryhi();
97 tlb.tlb_lo0 = read_c0_entrylo0();
98 tlb.tlb_lo1 = read_c0_entrylo1();
99 tlb.tlb_mask = read_c0_pagemask();
100
101 printk("TLB%c%3d Hi 0x%08lx ",
102 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
103 i, tlb.tlb_hi);
104 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
105 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
106 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
107 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
108 (tlb.tlb_lo0 >> 3) & 7);
109 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
110 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
111 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
112 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
113 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
114 }
115 write_c0_entryhi(old_entryhi);
116 write_c0_pagemask(old_pagemask);
117 mtc0_tlbw_hazard();
118 local_irq_restore(flags);
119}
120
121void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
122{
123 struct mips_coproc *cop0 = vcpu->arch.cop0;
124 struct kvm_mips_tlb tlb;
125 int i;
126
127 printk("Guest TLBs:\n");
128 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
129
130 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
131 tlb = vcpu->arch.guest_tlb[i];
132 printk("TLB%c%3d Hi 0x%08lx ",
133 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
134 i, tlb.tlb_hi);
135 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
136 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
137 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
138 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
139 (tlb.tlb_lo0 >> 3) & 7);
140 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
141 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
142 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
143 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
144 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
145 }
146}
147
148void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
149{
150 int i;
151 volatile struct kvm_mips_tlb tlb;
152
153 printk("Shadow TLBs:\n");
154 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
155 tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
156 printk("TLB%c%3d Hi 0x%08lx ",
157 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
158 i, tlb.tlb_hi);
159 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
160 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
161 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
162 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
163 (tlb.tlb_lo0 >> 3) & 7);
164 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
165 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
166 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
167 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
168 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
169 }
170}
171
172static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
173{
174 pfn_t pfn;
175
176 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
177 return;
178
179 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
180
181 if (kvm_mips_is_error_pfn(pfn)) {
182 panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
183 }
184
185 kvm->arch.guest_pmap[gfn] = pfn;
186 return;
187}
188
189/* Translate guest KSEG0 addresses to Host PA */
190unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
191 unsigned long gva)
192{
193 gfn_t gfn;
194 uint32_t offset = gva & ~PAGE_MASK;
195 struct kvm *kvm = vcpu->kvm;
196
197 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
198 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
199 __builtin_return_address(0), gva);
200 return KVM_INVALID_PAGE;
201 }
202
203 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
204
205 if (gfn >= kvm->arch.guest_pmap_npages) {
206 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
207 gva);
208 return KVM_INVALID_PAGE;
209 }
210 kvm_mips_map_page(vcpu->kvm, gfn);
211 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
212}
213
214/* XXXKYMA: Must be called with interrupts disabled */
215/* set flush_dcache_mask == 0 if no dcache flush required */
216int
217kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
218 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
219{
220 unsigned long flags;
221 unsigned long old_entryhi;
222 volatile int idx;
223
224 local_irq_save(flags);
225
226
227 old_entryhi = read_c0_entryhi();
228 write_c0_entryhi(entryhi);
229 mtc0_tlbw_hazard();
230
231 tlb_probe();
232 tlb_probe_hazard();
233 idx = read_c0_index();
234
235 if (idx > current_cpu_data.tlbsize) {
236 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
237 kvm_mips_dump_host_tlbs();
238 return -1;
239 }
240
241 if (idx < 0) {
242 idx = read_c0_random() % current_cpu_data.tlbsize;
243 write_c0_index(idx);
244 mtc0_tlbw_hazard();
245 }
246 write_c0_entrylo0(entrylo0);
247 write_c0_entrylo1(entrylo1);
248 mtc0_tlbw_hazard();
249
250 tlb_write_indexed();
251 tlbw_use_hazard();
252
253#ifdef DEBUG
254 if (debug) {
255 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
256 "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
257 vcpu->arch.pc, idx, read_c0_entryhi(),
258 read_c0_entrylo0(), read_c0_entrylo1());
259 }
260#endif
261
262 /* Flush D-cache */
263 if (flush_dcache_mask) {
264 if (entrylo0 & MIPS3_PG_V) {
265 ++vcpu->stat.flush_dcache_exits;
266 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
267 }
268 if (entrylo1 & MIPS3_PG_V) {
269 ++vcpu->stat.flush_dcache_exits;
270 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
271 (0x1 << PAGE_SHIFT));
272 }
273 }
274
275 /* Restore old ASID */
276 write_c0_entryhi(old_entryhi);
277 mtc0_tlbw_hazard();
278 tlbw_use_hazard();
279 local_irq_restore(flags);
280 return 0;
281}
282
283
284/* XXXKYMA: Must be called with interrupts disabled */
285int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
286 struct kvm_vcpu *vcpu)
287{
288 gfn_t gfn;
289 pfn_t pfn0, pfn1;
290 unsigned long vaddr = 0;
291 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
292 int even;
293 struct kvm *kvm = vcpu->kvm;
294 const int flush_dcache_mask = 0;
295
296
297 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
298 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
299 kvm_mips_dump_host_tlbs();
300 return -1;
301 }
302
303 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
304 if (gfn >= kvm->arch.guest_pmap_npages) {
305 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
306 gfn, badvaddr);
307 kvm_mips_dump_host_tlbs();
308 return -1;
309 }
310 even = !(gfn & 0x1);
311 vaddr = badvaddr & (PAGE_MASK << 1);
312
313 kvm_mips_map_page(vcpu->kvm, gfn);
314 kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
315
316 if (even) {
317 pfn0 = kvm->arch.guest_pmap[gfn];
318 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
319 } else {
320 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
321 pfn1 = kvm->arch.guest_pmap[gfn];
322 }
323
324 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
325 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
326 (0x1 << 1);
327 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
328 (0x1 << 1);
329
330 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
331 flush_dcache_mask);
332}
333
334int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
335 struct kvm_vcpu *vcpu)
336{
337 pfn_t pfn0, pfn1;
338 unsigned long flags, old_entryhi = 0, vaddr = 0;
339 unsigned long entrylo0 = 0, entrylo1 = 0;
340
341
342 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
343 pfn1 = 0;
344 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
345 (0x1 << 1);
346 entrylo1 = 0;
347
348 local_irq_save(flags);
349
350 old_entryhi = read_c0_entryhi();
351 vaddr = badvaddr & (PAGE_MASK << 1);
352 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
353 mtc0_tlbw_hazard();
354 write_c0_entrylo0(entrylo0);
355 mtc0_tlbw_hazard();
356 write_c0_entrylo1(entrylo1);
357 mtc0_tlbw_hazard();
358 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
359 mtc0_tlbw_hazard();
360 tlb_write_indexed();
361 mtc0_tlbw_hazard();
362 tlbw_use_hazard();
363
364#ifdef DEBUG
365 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
366 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
367 read_c0_entrylo0(), read_c0_entrylo1());
368#endif
369
370 /* Restore old ASID */
371 write_c0_entryhi(old_entryhi);
372 mtc0_tlbw_hazard();
373 tlbw_use_hazard();
374 local_irq_restore(flags);
375
376 return 0;
377}
378
379int
380kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
381 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
382{
383 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
384 struct kvm *kvm = vcpu->kvm;
385 pfn_t pfn0, pfn1;
386
387
388 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
389 pfn0 = 0;
390 pfn1 = 0;
391 } else {
392 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
393 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
394
395 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
396 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
397 }
398
399 if (hpa0)
400 *hpa0 = pfn0 << PAGE_SHIFT;
401
402 if (hpa1)
403 *hpa1 = pfn1 << PAGE_SHIFT;
404
405 /* Get attributes from the Guest TLB */
406 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
407 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
408 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
409 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
410 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
411 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
412
413#ifdef DEBUG
414 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
415 tlb->tlb_lo0, tlb->tlb_lo1);
416#endif
417
418 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
419 tlb->tlb_mask);
420}
421
422int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
423{
424 int i;
425 int index = -1;
426 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
427
428
429 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
430 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
431 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
432 index = i;
433 break;
434 }
435 }
436
437#ifdef DEBUG
438 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
439 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
440#endif
441
442 return index;
443}
444
445int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
446{
447 unsigned long old_entryhi, flags;
448 volatile int idx;
449
450
451 local_irq_save(flags);
452
453 old_entryhi = read_c0_entryhi();
454
455 if (KVM_GUEST_KERNEL_MODE(vcpu))
456 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
457 else {
458 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
459 }
460
461 mtc0_tlbw_hazard();
462
463 tlb_probe();
464 tlb_probe_hazard();
465 idx = read_c0_index();
466
467 /* Restore old ASID */
468 write_c0_entryhi(old_entryhi);
469 mtc0_tlbw_hazard();
470 tlbw_use_hazard();
471
472 local_irq_restore(flags);
473
474#ifdef DEBUG
475 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
476#endif
477
478 return idx;
479}
480
481int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
482{
483 int idx;
484 unsigned long flags, old_entryhi;
485
486 local_irq_save(flags);
487
488
489 old_entryhi = read_c0_entryhi();
490
491 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
492 mtc0_tlbw_hazard();
493
494 tlb_probe();
495 tlb_probe_hazard();
496 idx = read_c0_index();
497
498 if (idx >= current_cpu_data.tlbsize)
499 BUG();
500
501 if (idx > 0) {
502 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
503 mtc0_tlbw_hazard();
504
505 write_c0_entrylo0(0);
506 mtc0_tlbw_hazard();
507
508 write_c0_entrylo1(0);
509 mtc0_tlbw_hazard();
510
511 tlb_write_indexed();
512 mtc0_tlbw_hazard();
513 }
514
515 write_c0_entryhi(old_entryhi);
516 mtc0_tlbw_hazard();
517 tlbw_use_hazard();
518
519 local_irq_restore(flags);
520
521#ifdef DEBUG
522 if (idx > 0) {
523 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
524 (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
525 }
526#endif
527
528 return 0;
529}
530
531/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
532int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
533{
534 unsigned long flags, old_entryhi;
535
536 if (index >= current_cpu_data.tlbsize)
537 BUG();
538
539 local_irq_save(flags);
540
541
542 old_entryhi = read_c0_entryhi();
543
544 write_c0_entryhi(UNIQUE_ENTRYHI(index));
545 mtc0_tlbw_hazard();
546
547 write_c0_index(index);
548 mtc0_tlbw_hazard();
549
550 write_c0_entrylo0(0);
551 mtc0_tlbw_hazard();
552
553 write_c0_entrylo1(0);
554 mtc0_tlbw_hazard();
555
556 tlb_write_indexed();
557 mtc0_tlbw_hazard();
558 tlbw_use_hazard();
559
560 write_c0_entryhi(old_entryhi);
561 mtc0_tlbw_hazard();
562 tlbw_use_hazard();
563
564 local_irq_restore(flags);
565
566 return 0;
567}
568
569void kvm_mips_flush_host_tlb(int skip_kseg0)
570{
571 unsigned long flags;
572 unsigned long old_entryhi, entryhi;
573 unsigned long old_pagemask;
574 int entry = 0;
575 int maxentry = current_cpu_data.tlbsize;
576
577
578 local_irq_save(flags);
579
580 old_entryhi = read_c0_entryhi();
581 old_pagemask = read_c0_pagemask();
582
583 /* Blast 'em all away. */
584 for (entry = 0; entry < maxentry; entry++) {
585
586 write_c0_index(entry);
587 mtc0_tlbw_hazard();
588
589 if (skip_kseg0) {
590 tlb_read();
591 tlbw_use_hazard();
592
593 entryhi = read_c0_entryhi();
594
595 /* Don't blow away guest kernel entries */
596 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
597 continue;
598 }
599 }
600
601 /* Make sure all entries differ. */
602 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
603 mtc0_tlbw_hazard();
604 write_c0_entrylo0(0);
605 mtc0_tlbw_hazard();
606 write_c0_entrylo1(0);
607 mtc0_tlbw_hazard();
608
609 tlb_write_indexed();
610 mtc0_tlbw_hazard();
611 }
612
613 tlbw_use_hazard();
614
615 write_c0_entryhi(old_entryhi);
616 write_c0_pagemask(old_pagemask);
617 mtc0_tlbw_hazard();
618 tlbw_use_hazard();
619
620 local_irq_restore(flags);
621}
622
623void
624kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
625 struct kvm_vcpu *vcpu)
626{
627 unsigned long asid = asid_cache(cpu);
628
629 if (!((asid += ASID_INC) & ASID_MASK)) {
630 if (cpu_has_vtag_icache) {
631 flush_icache_all();
632 }
633
634 kvm_local_flush_tlb_all(); /* start new asid cycle */
635
636 if (!asid) /* fix version if needed */
637 asid = ASID_FIRST_VERSION;
638 }
639
640 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
641}
642
643void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
644{
645 unsigned long flags;
646 unsigned long old_entryhi;
647 unsigned long old_pagemask;
648 int entry = 0;
649 int cpu = smp_processor_id();
650
651 local_irq_save(flags);
652
653 old_entryhi = read_c0_entryhi();
654 old_pagemask = read_c0_pagemask();
655
656 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
657 write_c0_index(entry);
658 mtc0_tlbw_hazard();
659 tlb_read();
660 tlbw_use_hazard();
661
662 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
663 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
664 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
665 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
666 }
667
668 write_c0_entryhi(old_entryhi);
669 write_c0_pagemask(old_pagemask);
670 mtc0_tlbw_hazard();
671
672 local_irq_restore(flags);
673
674}
675
676void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
677{
678 unsigned long flags;
679 unsigned long old_ctx;
680 int entry;
681 int cpu = smp_processor_id();
682
683 local_irq_save(flags);
684
685 old_ctx = read_c0_entryhi();
686
687 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
688 write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
689 mtc0_tlbw_hazard();
690 write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
691 write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
692
693 write_c0_index(entry);
694 mtc0_tlbw_hazard();
695
696 tlb_write_indexed();
697 tlbw_use_hazard();
698 }
699
700 tlbw_use_hazard();
701 write_c0_entryhi(old_ctx);
702 mtc0_tlbw_hazard();
703 local_irq_restore(flags);
704}
705
706
707void kvm_local_flush_tlb_all(void)
708{
709 unsigned long flags;
710 unsigned long old_ctx;
711 int entry = 0;
712
713 local_irq_save(flags);
714 /* Save old context and create impossible VPN2 value */
715 old_ctx = read_c0_entryhi();
716 write_c0_entrylo0(0);
717 write_c0_entrylo1(0);
718
719 /* Blast 'em all away. */
720 while (entry < current_cpu_data.tlbsize) {
721 /* Make sure all entries differ. */
722 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
723 write_c0_index(entry);
724 mtc0_tlbw_hazard();
725 tlb_write_indexed();
726 entry++;
727 }
728 tlbw_use_hazard();
729 write_c0_entryhi(old_ctx);
730 mtc0_tlbw_hazard();
731
732 local_irq_restore(flags);
733}
734
735void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
736{
737 int cpu, entry;
738
739 for_each_possible_cpu(cpu) {
740 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
741 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
742 UNIQUE_ENTRYHI(entry);
743 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
744 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
745 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
746 read_c0_pagemask();
747#ifdef DEBUG
748 kvm_debug
749 ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
750 cpu, entry,
751 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
752 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
753 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
754#endif
755 }
756 }
757}
758
759/* Restore ASID once we are scheduled back after preemption */
760void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
761{
762 unsigned long flags;
763 int newasid = 0;
764
765#ifdef DEBUG
766 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
767#endif
768
769 /* Alocate new kernel and user ASIDs if needed */
770
771 local_irq_save(flags);
772
773 if (((vcpu->arch.
774 guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
775 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
776 vcpu->arch.guest_kernel_asid[cpu] =
777 vcpu->arch.guest_kernel_mm.context.asid[cpu];
778 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
779 vcpu->arch.guest_user_asid[cpu] =
780 vcpu->arch.guest_user_mm.context.asid[cpu];
781 newasid++;
782
783 kvm_info("[%d]: cpu_context: %#lx\n", cpu,
784 cpu_context(cpu, current->mm));
785 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
786 cpu, vcpu->arch.guest_kernel_asid[cpu]);
787 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
788 vcpu->arch.guest_user_asid[cpu]);
789 }
790
791 if (vcpu->arch.last_sched_cpu != cpu) {
792 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
793 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
794 }
795
796 /* Only reload shadow host TLB if new ASIDs haven't been allocated */
797#if 0
798 if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
799 kvm_mips_flush_host_tlb(0);
800 kvm_shadow_tlb_load(vcpu);
801 }
802#endif
803
804 if (!newasid) {
805 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
806 if (current->flags & PF_VCPU) {
807 write_c0_entryhi(vcpu->arch.
808 preempt_entryhi & ASID_MASK);
809 ehb();
810 }
811 } else {
812 /* New ASIDs were allocated for the VM */
813
814 /* Were we in guest context? If so then the pre-empted ASID is no longer
815 * valid, we need to set it to what it should be based on the mode of
816 * the Guest (Kernel/User)
817 */
818 if (current->flags & PF_VCPU) {
819 if (KVM_GUEST_KERNEL_MODE(vcpu))
820 write_c0_entryhi(vcpu->arch.
821 guest_kernel_asid[cpu] &
822 ASID_MASK);
823 else
824 write_c0_entryhi(vcpu->arch.
825 guest_user_asid[cpu] &
826 ASID_MASK);
827 ehb();
828 }
829 }
830
831 local_irq_restore(flags);
832
833}
834
835/* ASID can change if another task is scheduled during preemption */
836void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
837{
838 unsigned long flags;
839 uint32_t cpu;
840
841 local_irq_save(flags);
842
843 cpu = smp_processor_id();
844
845
846 vcpu->arch.preempt_entryhi = read_c0_entryhi();
847 vcpu->arch.last_sched_cpu = cpu;
848
849#if 0
850 if ((atomic_read(&kvm_mips_instance) > 1)) {
851 kvm_shadow_tlb_put(vcpu);
852 }
853#endif
854
855 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
856 ASID_VERSION_MASK)) {
857 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
858 cpu_context(cpu, current->mm));
859 drop_mmu_context(current->mm, cpu);
860 }
861 write_c0_entryhi(cpu_asid(cpu, current->mm));
862 ehb();
863
864 local_irq_restore(flags);
865}
866
867uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
868{
869 struct mips_coproc *cop0 = vcpu->arch.cop0;
870 unsigned long paddr, flags;
871 uint32_t inst;
872 int index;
873
874 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
875 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
876 local_irq_save(flags);
877 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
878 if (index >= 0) {
879 inst = *(opc);
880 } else {
881 index =
882 kvm_mips_guest_tlb_lookup(vcpu,
883 ((unsigned long) opc & VPN2_MASK)
884 |
885 (kvm_read_c0_guest_entryhi
886 (cop0) & ASID_MASK));
887 if (index < 0) {
888 kvm_err
889 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
890 __func__, opc, vcpu, read_c0_entryhi());
891 kvm_mips_dump_host_tlbs();
892 local_irq_restore(flags);
893 return KVM_INVALID_INST;
894 }
895 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
896 &vcpu->arch.
897 guest_tlb[index],
898 NULL, NULL);
899 inst = *(opc);
900 }
901 local_irq_restore(flags);
902 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
903 paddr =
904 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
905 (unsigned long) opc);
906 inst = *(uint32_t *) CKSEG0ADDR(paddr);
907 } else {
908 kvm_err("%s: illegal address: %p\n", __func__, opc);
909 return KVM_INVALID_INST;
910 }
911
912 return inst;
913}
914
915EXPORT_SYMBOL(kvm_local_flush_tlb_all);
916EXPORT_SYMBOL(kvm_shadow_tlb_put);
917EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
918EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
919EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
920EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
921EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
922EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
923EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
924EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
925EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
926EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
927EXPORT_SYMBOL(kvm_shadow_tlb_load);
928EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
929EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
930EXPORT_SYMBOL(kvm_get_inst);
931EXPORT_SYMBOL(kvm_arch_vcpu_load);
932EXPORT_SYMBOL(kvm_arch_vcpu_put);