aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm/kvm_tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm/kvm_tlb.c')
-rw-r--r--arch/mips/kvm/kvm_tlb.c135
1 files changed, 1 insertions, 134 deletions
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
index c777dd36d4a8..50ab9c4d4a5d 100644
--- a/arch/mips/kvm/kvm_tlb.c
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -10,7 +10,6 @@
10* Authors: Sanjay Lal <sanjayl@kymasys.com> 10* Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/ 11*/
12 12
13#include <linux/init.h>
14#include <linux/sched.h> 13#include <linux/sched.h>
15#include <linux/smp.h> 14#include <linux/smp.h>
16#include <linux/mm.h> 15#include <linux/mm.h>
@@ -25,6 +24,7 @@
25#include <asm/mmu_context.h> 24#include <asm/mmu_context.h>
26#include <asm/pgtable.h> 25#include <asm/pgtable.h>
27#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/tlb.h>
28 28
29#undef CONFIG_MIPS_MT 29#undef CONFIG_MIPS_MT
30#include <asm/r4kcache.h> 30#include <asm/r4kcache.h>
@@ -35,9 +35,6 @@
35 35
36#define PRIx64 "llx" 36#define PRIx64 "llx"
37 37
38/* Use VZ EntryHi.EHINV to invalidate TLB entries */
39#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
40
41atomic_t kvm_mips_instance; 38atomic_t kvm_mips_instance;
42EXPORT_SYMBOL(kvm_mips_instance); 39EXPORT_SYMBOL(kvm_mips_instance);
43 40
@@ -147,30 +144,6 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
147 } 144 }
148} 145}
149 146
150void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
151{
152 int i;
153 volatile struct kvm_mips_tlb tlb;
154
155 printk("Shadow TLBs:\n");
156 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
157 tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
158 printk("TLB%c%3d Hi 0x%08lx ",
159 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
160 i, tlb.tlb_hi);
161 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
162 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
163 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
164 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
165 (tlb.tlb_lo0 >> 3) & 7);
166 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
167 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
168 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
169 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
170 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
171 }
172}
173
174static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) 147static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
175{ 148{
176 int srcu_idx, err = 0; 149 int srcu_idx, err = 0;
@@ -657,70 +630,6 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
657 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 630 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
658} 631}
659 632
660void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
661{
662 unsigned long flags;
663 unsigned long old_entryhi;
664 unsigned long old_pagemask;
665 int entry = 0;
666 int cpu = smp_processor_id();
667
668 local_irq_save(flags);
669
670 old_entryhi = read_c0_entryhi();
671 old_pagemask = read_c0_pagemask();
672
673 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
674 write_c0_index(entry);
675 mtc0_tlbw_hazard();
676 tlb_read();
677 tlbw_use_hazard();
678
679 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
680 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
681 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
682 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
683 }
684
685 write_c0_entryhi(old_entryhi);
686 write_c0_pagemask(old_pagemask);
687 mtc0_tlbw_hazard();
688
689 local_irq_restore(flags);
690
691}
692
693void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
694{
695 unsigned long flags;
696 unsigned long old_ctx;
697 int entry;
698 int cpu = smp_processor_id();
699
700 local_irq_save(flags);
701
702 old_ctx = read_c0_entryhi();
703
704 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
705 write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
706 mtc0_tlbw_hazard();
707 write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
708 write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
709
710 write_c0_index(entry);
711 mtc0_tlbw_hazard();
712
713 tlb_write_indexed();
714 tlbw_use_hazard();
715 }
716
717 tlbw_use_hazard();
718 write_c0_entryhi(old_ctx);
719 mtc0_tlbw_hazard();
720 local_irq_restore(flags);
721}
722
723
724void kvm_local_flush_tlb_all(void) 633void kvm_local_flush_tlb_all(void)
725{ 634{
726 unsigned long flags; 635 unsigned long flags;
@@ -749,30 +658,6 @@ void kvm_local_flush_tlb_all(void)
749 local_irq_restore(flags); 658 local_irq_restore(flags);
750} 659}
751 660
752void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
753{
754 int cpu, entry;
755
756 for_each_possible_cpu(cpu) {
757 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
758 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
759 UNIQUE_ENTRYHI(entry);
760 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
761 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
762 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
763 read_c0_pagemask();
764#ifdef DEBUG
765 kvm_debug
766 ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
767 cpu, entry,
768 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
769 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
770 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
771#endif
772 }
773 }
774}
775
776/* Restore ASID once we are scheduled back after preemption */ 661/* Restore ASID once we are scheduled back after preemption */
777void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 662void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
778{ 663{
@@ -810,14 +695,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
810 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); 695 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
811 } 696 }
812 697
813 /* Only reload shadow host TLB if new ASIDs haven't been allocated */
814#if 0
815 if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
816 kvm_mips_flush_host_tlb(0);
817 kvm_shadow_tlb_load(vcpu);
818 }
819#endif
820
821 if (!newasid) { 698 if (!newasid) {
822 /* If we preempted while the guest was executing, then reload the pre-empted ASID */ 699 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
823 if (current->flags & PF_VCPU) { 700 if (current->flags & PF_VCPU) {
@@ -863,12 +740,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
863 vcpu->arch.preempt_entryhi = read_c0_entryhi(); 740 vcpu->arch.preempt_entryhi = read_c0_entryhi();
864 vcpu->arch.last_sched_cpu = cpu; 741 vcpu->arch.last_sched_cpu = cpu;
865 742
866#if 0
867 if ((atomic_read(&kvm_mips_instance) > 1)) {
868 kvm_shadow_tlb_put(vcpu);
869 }
870#endif
871
872 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & 743 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
873 ASID_VERSION_MASK)) { 744 ASID_VERSION_MASK)) {
874 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, 745 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
@@ -930,10 +801,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
930} 801}
931 802
932EXPORT_SYMBOL(kvm_local_flush_tlb_all); 803EXPORT_SYMBOL(kvm_local_flush_tlb_all);
933EXPORT_SYMBOL(kvm_shadow_tlb_put);
934EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); 804EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
935EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); 805EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
936EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
937EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); 806EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
938EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); 807EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
939EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); 808EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
@@ -941,8 +810,6 @@ EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
941EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); 810EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
942EXPORT_SYMBOL(kvm_mips_host_tlb_inv); 811EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
943EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); 812EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
944EXPORT_SYMBOL(kvm_shadow_tlb_load);
945EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
946EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); 813EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
947EXPORT_SYMBOL(kvm_get_inst); 814EXPORT_SYMBOL(kvm_get_inst);
948EXPORT_SYMBOL(kvm_arch_vcpu_load); 815EXPORT_SYMBOL(kvm_arch_vcpu_load);