aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-12-16 10:57:00 -0500
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 10:20:56 -0500
commitaba8592950f1c698bb9c1b42d4f4dab07a145674 (patch)
treea02c3bfe5ce29de735f063db6ea03b0332ca2fe6
parenta31b50d741bd85a127d5ef2c21c0788041bc41a9 (diff)
KVM: MIPS/MMU: Invalidate stale GVA PTEs on TLBW
Implement invalidation of specific pairs of GVA page table entries in one or both of the GVA page tables. This is used when existing mappings are replaced in the guest TLB by emulated TLBWI/TLBWR instructions. Due to the sharing of page tables in the host kernel range, we should be careful not to allow host pages to be invalidated. Add a helper kvm_mips_walk_pgd() which can be used when walking of either GPA (future patches) or GVA page tables is needed, optionally with allocation of page tables along the way when they don't exist. GPA page table walking will need to be protected by the kvm->mmu_lock, so we also add a small MMU page cache in each KVM VCPU, like that found for other architectures but smaller. This allows enough pages to be pre-allocated to handle a single fault without holding the lock, allowing the helper to run with the lock held without having to handle allocation failures. Using the same mechanism for GVA allows the same code to be used, and allows it to use the same cache of allocated pages if the GPA walk didn't need to allocate any new tables. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/include/asm/kvm_host.h17
-rw-r--r--arch/mips/kvm/emulate.c6
-rw-r--r--arch/mips/kvm/mips.c1
-rw-r--r--arch/mips/kvm/mmu.c95
4 files changed, 119 insertions, 0 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index f5145dcab319..40aab4f5007c 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -261,6 +261,17 @@ struct kvm_mips_tlb {
261 long tlb_lo[2]; 261 long tlb_lo[2];
262}; 262};
263 263
264#define KVM_NR_MEM_OBJS 4
265
266/*
267 * We don't want allocation failures within the mmu code, so we preallocate
268 * enough memory for a single page fault in a cache.
269 */
270struct kvm_mmu_memory_cache {
271 int nobjs;
272 void *objects[KVM_NR_MEM_OBJS];
273};
274
264#define KVM_MIPS_AUX_FPU 0x1 275#define KVM_MIPS_AUX_FPU 0x1
265#define KVM_MIPS_AUX_MSA 0x2 276#define KVM_MIPS_AUX_MSA 0x2
266 277
@@ -327,6 +338,9 @@ struct kvm_vcpu_arch {
327 /* Guest ASID of last user mode execution */ 338 /* Guest ASID of last user mode execution */
328 unsigned int last_user_gasid; 339 unsigned int last_user_gasid;
329 340
341 /* Cache some mmu pages needed inside spinlock regions */
342 struct kvm_mmu_memory_cache mmu_page_cache;
343
330 int last_sched_cpu; 344 int last_sched_cpu;
331 345
332 /* WAIT executed */ 346 /* WAIT executed */
@@ -631,6 +645,9 @@ enum kvm_mips_flush {
631 KMF_GPA = 0x2, 645 KMF_GPA = 0x2,
632}; 646};
633void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags); 647void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
648void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
649void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
650 bool user);
634extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, 651extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
635 unsigned long gva); 652 unsigned long gva);
636extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, 653extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 1d399396e486..19eaeda6975c 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -864,11 +864,17 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
864 /* No need to flush for entries which are already invalid */ 864 /* No need to flush for entries which are already invalid */
865 if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) 865 if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
866 return; 866 return;
867 /* Don't touch host kernel page tables or TLB mappings */
868 if ((unsigned long)tlb->tlb_hi > 0x7fffffff)
869 return;
867 /* User address space doesn't need flushing for KSeg2/3 changes */ 870 /* User address space doesn't need flushing for KSeg2/3 changes */
868 user = tlb->tlb_hi < KVM_GUEST_KSEG0; 871 user = tlb->tlb_hi < KVM_GUEST_KSEG0;
869 872
870 preempt_disable(); 873 preempt_disable();
871 874
875 /* Invalidate page table entries */
876 kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user);
877
872 /* 878 /*
873 * Probe the shadow host TLB for the entry being overwritten, if one 879 * Probe the shadow host TLB for the entry being overwritten, if one
874 * matches, invalidate it 880 * matches, invalidate it
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index a687864de428..c369fdd19fbc 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -396,6 +396,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
396 396
397 kvm_mips_dump_stats(vcpu); 397 kvm_mips_dump_stats(vcpu);
398 398
399 kvm_mmu_free_memory_caches(vcpu);
399 kfree(vcpu->arch.guest_ebase); 400 kfree(vcpu->arch.guest_ebase);
400 kfree(vcpu->arch.kseg0_commpage); 401 kfree(vcpu->arch.kseg0_commpage);
401 kfree(vcpu); 402 kfree(vcpu);
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 09146b62552f..dbf2b55ee874 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -14,6 +14,26 @@
14#include <asm/mmu_context.h> 14#include <asm/mmu_context.h>
15#include <asm/pgalloc.h> 15#include <asm/pgalloc.h>
16 16
17static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
18{
19 while (mc->nobjs)
20 free_page((unsigned long)mc->objects[--mc->nobjs]);
21}
22
23static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
24{
25 void *p;
26
27 BUG_ON(!mc || !mc->nobjs);
28 p = mc->objects[--mc->nobjs];
29 return p;
30}
31
32void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
33{
34 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
35}
36
17static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 37static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
18{ 38{
19 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 39 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
@@ -30,6 +50,56 @@ static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
30 return cpu_asid(cpu, user_mm); 50 return cpu_asid(cpu, user_mm);
31} 51}
32 52
53/**
54 * kvm_mips_walk_pgd() - Walk page table with optional allocation.
55 * @pgd: Page directory pointer.
56 * @addr: Address to index page table using.
57 * @cache: MMU page cache to allocate new page tables from, or NULL.
58 *
59 * Walk the page tables pointed to by @pgd to find the PTE corresponding to the
60 * address @addr. If page tables don't exist for @addr, they will be created
61 * from the MMU cache if @cache is not NULL.
62 *
63 * Returns: Pointer to pte_t corresponding to @addr.
64 * NULL if a page table doesn't exist for @addr and !@cache.
65 * NULL if a page table allocation failed.
66 */
67static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
68 unsigned long addr)
69{
70 pud_t *pud;
71 pmd_t *pmd;
72
73 pgd += pgd_index(addr);
74 if (pgd_none(*pgd)) {
75 /* Not used on MIPS yet */
76 BUG();
77 return NULL;
78 }
79 pud = pud_offset(pgd, addr);
80 if (pud_none(*pud)) {
81 pmd_t *new_pmd;
82
83 if (!cache)
84 return NULL;
85 new_pmd = mmu_memory_cache_alloc(cache);
86 pmd_init((unsigned long)new_pmd,
87 (unsigned long)invalid_pte_table);
88 pud_populate(NULL, pud, new_pmd);
89 }
90 pmd = pmd_offset(pud, addr);
91 if (pmd_none(*pmd)) {
92 pte_t *new_pte;
93
94 if (!cache)
95 return NULL;
96 new_pte = mmu_memory_cache_alloc(cache);
97 clear_page(new_pte);
98 pmd_populate_kernel(NULL, pmd, new_pte);
99 }
100 return pte_offset(pmd, addr);
101}
102
33static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) 103static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
34{ 104{
35 int srcu_idx, err = 0; 105 int srcu_idx, err = 0;
@@ -81,6 +151,31 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
81 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; 151 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
82} 152}
83 153
154void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
155 bool user)
156{
157 pgd_t *pgdp;
158 pte_t *ptep;
159
160 addr &= PAGE_MASK << 1;
161
162 pgdp = vcpu->arch.guest_kernel_mm.pgd;
163 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
164 if (ptep) {
165 ptep[0] = pfn_pte(0, __pgprot(0));
166 ptep[1] = pfn_pte(0, __pgprot(0));
167 }
168
169 if (user) {
170 pgdp = vcpu->arch.guest_user_mm.pgd;
171 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
172 if (ptep) {
173 ptep[0] = pfn_pte(0, __pgprot(0));
174 ptep[1] = pfn_pte(0, __pgprot(0));
175 }
176 }
177}
178
84/* 179/*
85 * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}. 180 * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
86 * Flush a range of guest physical address space from the VM's GPA page tables. 181 * Flush a range of guest physical address space from the VM's GPA page tables.