aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-10-07 17:32:13 -0400
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 10:21:02 -0500
commit49ec508e3bd0b11aaf534af0d63e4a17e05594e4 (patch)
treec270f1567e466d6a67d22a76bcca3933b1a27bcb
parent8af0e3c2e89e56dc5b064e5854b87a19e70e2710 (diff)
KVM: MIPS/TLB: Drop kvm_local_flush_tlb_all()
Now that KVM no longer uses wired entries we can safely use local_flush_tlb_all() when we need to flush the entire TLB (on the start of a new ASID cycle). This doesn't flush wired entries, which allows other code to use them without KVM clobbering them all the time. It also is more up to date, knowing about the tlbinv architectural feature, flushing of micro TLB on cores where that is necessary (Loongson I believe), and knows to stop the HTW while doing so. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/include/asm/mmu_context.h5
-rw-r--r--arch/mips/kvm/mmu.c2
-rw-r--r--arch/mips/kvm/tlb.c29
4 files changed, 1 insertions, 36 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index a26504bee21c..1a83b6f85de2 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -641,7 +641,6 @@ void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
641 bool user); 641 bool user);
642extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, 642extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
643 struct kvm_vcpu *vcpu); 643 struct kvm_vcpu *vcpu);
644extern void kvm_local_flush_tlb_all(void);
645extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu); 644extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
646extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 645extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
647extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu); 646extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 16eb8521398e..2abf94f72c0a 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -99,17 +99,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
99static inline void 99static inline void
100get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 100get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
101{ 101{
102 extern void kvm_local_flush_tlb_all(void);
103 unsigned long asid = asid_cache(cpu); 102 unsigned long asid = asid_cache(cpu);
104 103
105 if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { 104 if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
106 if (cpu_has_vtag_icache) 105 if (cpu_has_vtag_icache)
107 flush_icache_all(); 106 flush_icache_all();
108#ifdef CONFIG_KVM
109 kvm_local_flush_tlb_all(); /* start new asid cycle */
110#else
111 local_flush_tlb_all(); /* start new asid cycle */ 107 local_flush_tlb_all(); /* start new asid cycle */
112#endif
113 if (!asid) /* fix version if needed */ 108 if (!asid) /* fix version if needed */
114 asid = asid_first_version(cpu); 109 asid = asid_first_version(cpu);
115 } 110 }
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index c4e9c65065ea..cf832ea963d8 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -453,7 +453,7 @@ void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
453 if (cpu_has_vtag_icache) 453 if (cpu_has_vtag_icache)
454 flush_icache_all(); 454 flush_icache_all();
455 455
456 kvm_local_flush_tlb_all(); /* start new asid cycle */ 456 local_flush_tlb_all(); /* start new asid cycle */
457 457
458 if (!asid) /* fix version if needed */ 458 if (!asid) /* fix version if needed */
459 asid = asid_first_version(cpu); 459 asid = asid_first_version(cpu);
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 8af5fd2cb107..51f4aee717e7 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -263,35 +263,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
263} 263}
264EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb); 264EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
265 265
266void kvm_local_flush_tlb_all(void)
267{
268 unsigned long flags;
269 unsigned long old_ctx;
270 int entry = 0;
271
272 local_irq_save(flags);
273 /* Save old context and create impossible VPN2 value */
274 old_ctx = read_c0_entryhi();
275 write_c0_entrylo0(0);
276 write_c0_entrylo1(0);
277
278 /* Blast 'em all away. */
279 while (entry < current_cpu_data.tlbsize) {
280 /* Make sure all entries differ. */
281 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
282 write_c0_index(entry);
283 mtc0_tlbw_hazard();
284 tlb_write_indexed();
285 tlbw_use_hazard();
286 entry++;
287 }
288 write_c0_entryhi(old_ctx);
289 mtc0_tlbw_hazard();
290
291 local_irq_restore(flags);
292}
293EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
294
295/** 266/**
296 * kvm_mips_suspend_mm() - Suspend the active mm. 267 * kvm_mips_suspend_mm() - Suspend the active mm.
297 * @cpu The CPU we're running on. 268 * @cpu The CPU we're running on.