aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-10-07 20:15:19 -0400
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 10:20:58 -0500
commit7e3d2a750bcb0a7fed84e14e562d752c39fdf542 (patch)
tree615c2276a5a7a60fedc31bc5d80a59fb0244ef78
parentfb99589391a9ed2e505dc7c3d02651a1a7b9f72b (diff)
KVM: MIPS/MMU: Convert TLB mapped faults to page tables
Now that we have GVA page tables and an optimised TLB refill handler in place, convert the handling of page faults in TLB mapped segment from the guest to fill a single GVA page table entry and invalidate the TLB entry, rather than filling a TLB entry pair directly. Also remove the now unused kvm_mips_get_{kernel,user}_asid() functions in mmu.c and kvm_mips_host_tlb_write() in tlb.c. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/include/asm/kvm_host.h7
-rw-r--r--arch/mips/kvm/emulate.c6
-rw-r--r--arch/mips/kvm/mmu.c93
-rw-r--r--arch/mips/kvm/tlb.c64
4 files changed, 40 insertions, 130 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 40aab4f5007c..f7680999e28a 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -599,7 +599,8 @@ extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
599 struct kvm_vcpu *vcpu); 599 struct kvm_vcpu *vcpu);
600 600
601extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, 601extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
602 struct kvm_mips_tlb *tlb); 602 struct kvm_mips_tlb *tlb,
603 unsigned long gva);
603 604
604extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, 605extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
605 u32 *opc, 606 u32 *opc,
@@ -613,10 +614,6 @@ extern enum emulation_result kvm_mips_handle_tlbmod(u32 cause,
613 614
614extern void kvm_mips_dump_host_tlbs(void); 615extern void kvm_mips_dump_host_tlbs(void);
615extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); 616extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
616extern int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
617 unsigned long entrylo0,
618 unsigned long entrylo1,
619 int flush_dcache_mask);
620extern void kvm_mips_flush_host_tlb(int skip_kseg0); 617extern void kvm_mips_flush_host_tlb(int skip_kseg0);
621extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi, 618extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
622 bool user, bool kernel); 619 bool user, bool kernel);
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 19eaeda6975c..3ced662e012e 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1770,7 +1770,8 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1770 * We fault an entry from the guest tlb to the 1770 * We fault an entry from the guest tlb to the
1771 * shadow host TLB 1771 * shadow host TLB
1772 */ 1772 */
1773 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) { 1773 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1774 va)) {
1774 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", 1775 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1775 __func__, va, index, vcpu, 1776 __func__, va, index, vcpu,
1776 read_c0_entryhi()); 1777 read_c0_entryhi());
@@ -2746,7 +2747,8 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
2746 * OK we have a Guest TLB entry, now inject it into the 2747 * OK we have a Guest TLB entry, now inject it into the
2747 * shadow host TLB 2748 * shadow host TLB
2748 */ 2749 */
2749 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) { 2750 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
2751 va)) {
2750 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", 2752 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2751 __func__, va, index, vcpu, 2753 __func__, va, index, vcpu,
2752 read_c0_entryhi()); 2754 read_c0_entryhi());
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index afb47f21d8bc..62122d297e52 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -61,22 +61,6 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
61 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); 61 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
62} 62}
63 63
64static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
65{
66 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
67 int cpu = smp_processor_id();
68
69 return cpu_asid(cpu, kern_mm);
70}
71
72static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
73{
74 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
75 int cpu = smp_processor_id();
76
77 return cpu_asid(cpu, user_mm);
78}
79
80/** 64/**
81 * kvm_mips_walk_pgd() - Walk page table with optional allocation. 65 * kvm_mips_walk_pgd() - Walk page table with optional allocation.
82 * @pgd: Page directory pointer. 66 * @pgd: Page directory pointer.
@@ -411,67 +395,58 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
411} 395}
412 396
413int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, 397int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
414 struct kvm_mips_tlb *tlb) 398 struct kvm_mips_tlb *tlb,
399 unsigned long gva)
415{ 400{
416 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
417 struct kvm *kvm = vcpu->kvm; 401 struct kvm *kvm = vcpu->kvm;
418 kvm_pfn_t pfn0, pfn1; 402 kvm_pfn_t pfn;
419 gfn_t gfn0, gfn1; 403 gfn_t gfn;
420 long tlb_lo[2]; 404 long tlb_lo = 0;
421 int ret; 405 pte_t *ptep_gva;
422 406 unsigned int idx;
423 tlb_lo[0] = tlb->tlb_lo[0]; 407 bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
424 tlb_lo[1] = tlb->tlb_lo[1];
425 408
426 /* 409 /*
427 * The commpage address must not be mapped to anything else if the guest 410 * The commpage address must not be mapped to anything else if the guest
428 * TLB contains entries nearby, or commpage accesses will break. 411 * TLB contains entries nearby, or commpage accesses will break.
429 */ 412 */
430 if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) & 413 idx = TLB_LO_IDX(*tlb, gva);
431 VPN2_MASK & (PAGE_MASK << 1))) 414 if ((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & PAGE_MASK)
432 tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0; 415 tlb_lo = tlb->tlb_lo[idx];
433 416
434 gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT; 417 /* Find host PFN */
435 gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT; 418 gfn = mips3_tlbpfn_to_paddr(tlb_lo) >> PAGE_SHIFT;
436 if (gfn0 >= kvm->arch.guest_pmap_npages || 419 if (gfn >= kvm->arch.guest_pmap_npages) {
437 gfn1 >= kvm->arch.guest_pmap_npages) { 420 kvm_err("%s: Invalid gfn: %#llx, EHi: %#lx\n",
438 kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n", 421 __func__, gfn, tlb->tlb_hi);
439 __func__, gfn0, gfn1, tlb->tlb_hi);
440 kvm_mips_dump_guest_tlbs(vcpu); 422 kvm_mips_dump_guest_tlbs(vcpu);
441 return -1; 423 return -1;
442 } 424 }
443 425 if (kvm_mips_map_page(kvm, gfn) < 0)
444 if (kvm_mips_map_page(kvm, gfn0) < 0)
445 return -1; 426 return -1;
427 pfn = kvm->arch.guest_pmap[gfn];
446 428
447 if (kvm_mips_map_page(kvm, gfn1) < 0) 429 /* Find GVA page table entry */
430 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva);
431 if (!ptep_gva) {
432 kvm_err("No ptep for gva %lx\n", gva);
448 return -1; 433 return -1;
434 }
449 435
450 pfn0 = kvm->arch.guest_pmap[gfn0]; 436 /* Write PFN into GVA page table, taking attributes from Guest TLB */
451 pfn1 = kvm->arch.guest_pmap[gfn1]; 437 *ptep_gva = pfn_pte(pfn, (!(tlb_lo & ENTRYLO_V)) ? __pgprot(0) :
438 (tlb_lo & ENTRYLO_D) ? PAGE_SHARED :
439 PAGE_READONLY);
440 if (pte_present(*ptep_gva))
441 *ptep_gva = pte_mkyoung(pte_mkdirty(*ptep_gva));
452 442
453 /* Get attributes from the Guest TLB */ 443 /* Invalidate this entry in the TLB, current guest mode ASID only */
454 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | 444 kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
455 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
456 (tlb_lo[0] & ENTRYLO_D) |
457 (tlb_lo[0] & ENTRYLO_V);
458 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
459 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
460 (tlb_lo[1] & ENTRYLO_D) |
461 (tlb_lo[1] & ENTRYLO_V);
462 445
463 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, 446 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
464 tlb->tlb_lo[0], tlb->tlb_lo[1]); 447 tlb->tlb_lo[0], tlb->tlb_lo[1]);
465 448
466 preempt_disable(); 449 return 0;
467 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
468 kvm_mips_get_kernel_asid(vcpu) :
469 kvm_mips_get_user_asid(vcpu));
470 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
471 tlb->tlb_mask);
472 preempt_enable();
473
474 return ret;
475} 450}
476 451
477void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, 452void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
@@ -582,7 +557,7 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
582 return KVM_INVALID_INST; 557 return KVM_INVALID_INST;
583 } 558 }
584 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, 559 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
585 &vcpu->arch.guest_tlb[index])) { 560 &vcpu->arch.guest_tlb[index], va)) {
586 kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n", 561 kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
587 __func__, opc, index, vcpu, 562 __func__, opc, index, vcpu,
588 read_c0_entryhi()); 563 read_c0_entryhi());
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 06ee9a1d78a5..2fb76869d017 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -104,70 +104,6 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
104} 104}
105EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); 105EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
106 106
107/* XXXKYMA: Must be called with interrupts disabled */
108/* set flush_dcache_mask == 0 if no dcache flush required */
109int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
110 unsigned long entrylo0, unsigned long entrylo1,
111 int flush_dcache_mask)
112{
113 unsigned long flags;
114 unsigned long old_entryhi;
115 int idx;
116
117 local_irq_save(flags);
118
119 old_entryhi = read_c0_entryhi();
120 write_c0_entryhi(entryhi);
121 mtc0_tlbw_hazard();
122
123 tlb_probe();
124 tlb_probe_hazard();
125 idx = read_c0_index();
126
127 if (idx > current_cpu_data.tlbsize) {
128 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
129 kvm_mips_dump_host_tlbs();
130 local_irq_restore(flags);
131 return -1;
132 }
133
134 write_c0_entrylo0(entrylo0);
135 write_c0_entrylo1(entrylo1);
136 mtc0_tlbw_hazard();
137
138 if (idx < 0)
139 tlb_write_random();
140 else
141 tlb_write_indexed();
142 tlbw_use_hazard();
143
144 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
145 vcpu->arch.pc, idx, read_c0_entryhi(),
146 read_c0_entrylo0(), read_c0_entrylo1());
147
148 /* Flush D-cache */
149 if (flush_dcache_mask) {
150 if (entrylo0 & ENTRYLO_V) {
151 ++vcpu->stat.flush_dcache_exits;
152 flush_data_cache_page((entryhi & VPN2_MASK) &
153 ~flush_dcache_mask);
154 }
155 if (entrylo1 & ENTRYLO_V) {
156 ++vcpu->stat.flush_dcache_exits;
157 flush_data_cache_page(((entryhi & VPN2_MASK) &
158 ~flush_dcache_mask) |
159 (0x1 << PAGE_SHIFT));
160 }
161 }
162
163 /* Restore old ASID */
164 write_c0_entryhi(old_entryhi);
165 mtc0_tlbw_hazard();
166 local_irq_restore(flags);
167 return 0;
168}
169EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write);
170
171int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, 107int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
172 struct kvm_vcpu *vcpu) 108 struct kvm_vcpu *vcpu)
173{ 109{