aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2017-01-05 05:44:38 -0500
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 10:20:57 -0500
commitfb99589391a9ed2e505dc7c3d02651a1a7b9f72b (patch)
treef9abc60b91e438386ca425eac068a5575e4bd3c0
parentaba8592950f1c698bb9c1b42d4f4dab07a145674 (diff)
KVM: MIPS/MMU: Convert KSeg0 faults to page tables
Now that we have GVA page tables and an optimised TLB refill handler in place, convert the handling of KSeg0 page faults from the guest to fill the GVA page tables and invalidate the TLB entry, rather than filling a TLB entry directly. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/kvm/mmu.c79
1 files changed, 64 insertions, 15 deletions
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index dbf2b55ee874..afb47f21d8bc 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -14,6 +14,33 @@
14#include <asm/mmu_context.h> 14#include <asm/mmu_context.h>
15#include <asm/pgalloc.h> 15#include <asm/pgalloc.h>
16 16
17/*
18 * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
19 * for which pages need to be cached.
20 */
21#if defined(__PAGETABLE_PMD_FOLDED)
22#define KVM_MMU_CACHE_MIN_PAGES 1
23#else
24#define KVM_MMU_CACHE_MIN_PAGES 2
25#endif
26
27static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
28 int min, int max)
29{
30 void *page;
31
32 BUG_ON(max > KVM_NR_MEM_OBJS);
33 if (cache->nobjs >= min)
34 return 0;
35 while (cache->nobjs < max) {
36 page = (void *)__get_free_page(GFP_KERNEL);
37 if (!page)
38 return -ENOMEM;
39 cache->objects[cache->nobjs++] = page;
40 }
41 return 0;
42}
43
17static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) 44static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
18{ 45{
19 while (mc->nobjs) 46 while (mc->nobjs)
@@ -151,6 +178,27 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
151 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; 178 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
152} 179}
153 180
181static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
182 unsigned long addr)
183{
184 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
185 pgd_t *pgdp;
186 int ret;
187
188 /* We need a minimum of cached pages ready for page table creation */
189 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
190 KVM_NR_MEM_OBJS);
191 if (ret)
192 return NULL;
193
194 if (KVM_GUEST_KERNEL_MODE(vcpu))
195 pgdp = vcpu->arch.guest_kernel_mm.pgd;
196 else
197 pgdp = vcpu->arch.guest_user_mm.pgd;
198
199 return kvm_mips_walk_pgd(pgdp, memcache, addr);
200}
201
154void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, 202void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
155 bool user) 203 bool user)
156{ 204{
@@ -316,10 +364,8 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
316 gfn_t gfn; 364 gfn_t gfn;
317 kvm_pfn_t pfn0, pfn1; 365 kvm_pfn_t pfn0, pfn1;
318 unsigned long vaddr = 0; 366 unsigned long vaddr = 0;
319 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
320 struct kvm *kvm = vcpu->kvm; 367 struct kvm *kvm = vcpu->kvm;
321 const int flush_dcache_mask = 0; 368 pte_t *ptep_gva;
322 int ret;
323 369
324 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { 370 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
325 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); 371 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
@@ -327,6 +373,8 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
327 return -1; 373 return -1;
328 } 374 }
329 375
376 /* Find host PFNs */
377
330 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); 378 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
331 if ((gfn | 1) >= kvm->arch.guest_pmap_npages) { 379 if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
332 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, 380 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
@@ -345,20 +393,21 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
345 pfn0 = kvm->arch.guest_pmap[gfn & ~0x1]; 393 pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
346 pfn1 = kvm->arch.guest_pmap[gfn | 0x1]; 394 pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
347 395
348 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | 396 /* Find GVA page table entry */
349 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
350 ENTRYLO_D | ENTRYLO_V;
351 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
352 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
353 ENTRYLO_D | ENTRYLO_V;
354 397
355 preempt_disable(); 398 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, vaddr);
356 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); 399 if (!ptep_gva) {
357 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, 400 kvm_err("No ptep for gva %lx\n", vaddr);
358 flush_dcache_mask); 401 return -1;
359 preempt_enable(); 402 }
360 403
361 return ret; 404 /* Write host PFNs into GVA page table */
405 ptep_gva[0] = pte_mkyoung(pte_mkdirty(pfn_pte(pfn0, PAGE_SHARED)));
406 ptep_gva[1] = pte_mkyoung(pte_mkdirty(pfn_pte(pfn1, PAGE_SHARED)));
407
408 /* Invalidate this entry in the TLB, guest kernel ASID only */
409 kvm_mips_host_tlb_inv(vcpu, vaddr, false, true);
410 return 0;
362} 411}
363 412
364int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, 413int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,