diff options
-rw-r--r-- | arch/mips/include/asm/kvm_host.h | 7 | ||||
-rw-r--r-- | arch/mips/kvm/kvm_mips.c | 1 | ||||
-rw-r--r-- | arch/mips/kvm/kvm_tlb.c | 130 |
3 files changed, 0 insertions, 138 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 32966969f2f9..a995fce87791 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
@@ -391,9 +391,6 @@ struct kvm_vcpu_arch { | |||
391 | uint32_t guest_kernel_asid[NR_CPUS]; | 391 | uint32_t guest_kernel_asid[NR_CPUS]; |
392 | struct mm_struct guest_kernel_mm, guest_user_mm; | 392 | struct mm_struct guest_kernel_mm, guest_user_mm; |
393 | 393 | ||
394 | struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE]; | ||
395 | |||
396 | |||
397 | struct hrtimer comparecount_timer; | 394 | struct hrtimer comparecount_timer; |
398 | 395 | ||
399 | int last_sched_cpu; | 396 | int last_sched_cpu; |
@@ -529,7 +526,6 @@ extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, | |||
529 | 526 | ||
530 | extern void kvm_mips_dump_host_tlbs(void); | 527 | extern void kvm_mips_dump_host_tlbs(void); |
531 | extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); | 528 | extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); |
532 | extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu); | ||
533 | extern void kvm_mips_flush_host_tlb(int skip_kseg0); | 529 | extern void kvm_mips_flush_host_tlb(int skip_kseg0); |
534 | extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); | 530 | extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); |
535 | extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index); | 531 | extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index); |
@@ -541,10 +537,7 @@ extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu | |||
541 | unsigned long gva); | 537 | unsigned long gva); |
542 | extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, | 538 | extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, |
543 | struct kvm_vcpu *vcpu); | 539 | struct kvm_vcpu *vcpu); |
544 | extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu); | ||
545 | extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu); | ||
546 | extern void kvm_local_flush_tlb_all(void); | 540 | extern void kvm_local_flush_tlb_all(void); |
547 | extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu); | ||
548 | extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu); | 541 | extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu); |
549 | extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | 542 | extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
550 | extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu); | 543 | extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu); |
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index 73b34827826c..da5186fbd77a 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c | |||
@@ -1001,7 +1001,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1001 | hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, | 1001 | hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, |
1002 | HRTIMER_MODE_REL); | 1002 | HRTIMER_MODE_REL); |
1003 | vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; | 1003 | vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; |
1004 | kvm_mips_init_shadow_tlb(vcpu); | ||
1005 | return 0; | 1004 | return 0; |
1006 | } | 1005 | } |
1007 | 1006 | ||
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c index 52083ea7fddd..68e6563915cd 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/kvm_tlb.c | |||
@@ -145,30 +145,6 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) | |||
145 | } | 145 | } |
146 | } | 146 | } |
147 | 147 | ||
148 | void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu) | ||
149 | { | ||
150 | int i; | ||
151 | volatile struct kvm_mips_tlb tlb; | ||
152 | |||
153 | printk("Shadow TLBs:\n"); | ||
154 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { | ||
155 | tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i]; | ||
156 | printk("TLB%c%3d Hi 0x%08lx ", | ||
157 | (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', | ||
158 | i, tlb.tlb_hi); | ||
159 | printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", | ||
160 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), | ||
161 | (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', | ||
162 | (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', | ||
163 | (tlb.tlb_lo0 >> 3) & 7); | ||
164 | printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", | ||
165 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), | ||
166 | (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', | ||
167 | (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', | ||
168 | (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); | ||
169 | } | ||
170 | } | ||
171 | |||
172 | static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) | 148 | static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) |
173 | { | 149 | { |
174 | int srcu_idx, err = 0; | 150 | int srcu_idx, err = 0; |
@@ -655,70 +631,6 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, | |||
655 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; | 631 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; |
656 | } | 632 | } |
657 | 633 | ||
658 | void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu) | ||
659 | { | ||
660 | unsigned long flags; | ||
661 | unsigned long old_entryhi; | ||
662 | unsigned long old_pagemask; | ||
663 | int entry = 0; | ||
664 | int cpu = smp_processor_id(); | ||
665 | |||
666 | local_irq_save(flags); | ||
667 | |||
668 | old_entryhi = read_c0_entryhi(); | ||
669 | old_pagemask = read_c0_pagemask(); | ||
670 | |||
671 | for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { | ||
672 | write_c0_index(entry); | ||
673 | mtc0_tlbw_hazard(); | ||
674 | tlb_read(); | ||
675 | tlbw_use_hazard(); | ||
676 | |||
677 | vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi(); | ||
678 | vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0(); | ||
679 | vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1(); | ||
680 | vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask(); | ||
681 | } | ||
682 | |||
683 | write_c0_entryhi(old_entryhi); | ||
684 | write_c0_pagemask(old_pagemask); | ||
685 | mtc0_tlbw_hazard(); | ||
686 | |||
687 | local_irq_restore(flags); | ||
688 | |||
689 | } | ||
690 | |||
691 | void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu) | ||
692 | { | ||
693 | unsigned long flags; | ||
694 | unsigned long old_ctx; | ||
695 | int entry; | ||
696 | int cpu = smp_processor_id(); | ||
697 | |||
698 | local_irq_save(flags); | ||
699 | |||
700 | old_ctx = read_c0_entryhi(); | ||
701 | |||
702 | for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { | ||
703 | write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi); | ||
704 | mtc0_tlbw_hazard(); | ||
705 | write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0); | ||
706 | write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1); | ||
707 | |||
708 | write_c0_index(entry); | ||
709 | mtc0_tlbw_hazard(); | ||
710 | |||
711 | tlb_write_indexed(); | ||
712 | tlbw_use_hazard(); | ||
713 | } | ||
714 | |||
715 | tlbw_use_hazard(); | ||
716 | write_c0_entryhi(old_ctx); | ||
717 | mtc0_tlbw_hazard(); | ||
718 | local_irq_restore(flags); | ||
719 | } | ||
720 | |||
721 | |||
722 | void kvm_local_flush_tlb_all(void) | 634 | void kvm_local_flush_tlb_all(void) |
723 | { | 635 | { |
724 | unsigned long flags; | 636 | unsigned long flags; |
@@ -747,30 +659,6 @@ void kvm_local_flush_tlb_all(void) | |||
747 | local_irq_restore(flags); | 659 | local_irq_restore(flags); |
748 | } | 660 | } |
749 | 661 | ||
750 | void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu) | ||
751 | { | ||
752 | int cpu, entry; | ||
753 | |||
754 | for_each_possible_cpu(cpu) { | ||
755 | for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { | ||
756 | vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = | ||
757 | UNIQUE_ENTRYHI(entry); | ||
758 | vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0; | ||
759 | vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0; | ||
760 | vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = | ||
761 | read_c0_pagemask(); | ||
762 | #ifdef DEBUG | ||
763 | kvm_debug | ||
764 | ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n", | ||
765 | cpu, entry, | ||
766 | vcpu->arch.shadow_tlb[cpu][entry].tlb_hi, | ||
767 | vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0, | ||
768 | vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1); | ||
769 | #endif | ||
770 | } | ||
771 | } | ||
772 | } | ||
773 | |||
774 | /* Restore ASID once we are scheduled back after preemption */ | 662 | /* Restore ASID once we are scheduled back after preemption */ |
775 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 663 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
776 | { | 664 | { |
@@ -808,14 +696,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
808 | vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); | 696 | vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); |
809 | } | 697 | } |
810 | 698 | ||
811 | /* Only reload shadow host TLB if new ASIDs haven't been allocated */ | ||
812 | #if 0 | ||
813 | if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) { | ||
814 | kvm_mips_flush_host_tlb(0); | ||
815 | kvm_shadow_tlb_load(vcpu); | ||
816 | } | ||
817 | #endif | ||
818 | |||
819 | if (!newasid) { | 699 | if (!newasid) { |
820 | /* If we preempted while the guest was executing, then reload the pre-empted ASID */ | 700 | /* If we preempted while the guest was executing, then reload the pre-empted ASID */ |
821 | if (current->flags & PF_VCPU) { | 701 | if (current->flags & PF_VCPU) { |
@@ -861,12 +741,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
861 | vcpu->arch.preempt_entryhi = read_c0_entryhi(); | 741 | vcpu->arch.preempt_entryhi = read_c0_entryhi(); |
862 | vcpu->arch.last_sched_cpu = cpu; | 742 | vcpu->arch.last_sched_cpu = cpu; |
863 | 743 | ||
864 | #if 0 | ||
865 | if ((atomic_read(&kvm_mips_instance) > 1)) { | ||
866 | kvm_shadow_tlb_put(vcpu); | ||
867 | } | ||
868 | #endif | ||
869 | |||
870 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & | 744 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & |
871 | ASID_VERSION_MASK)) { | 745 | ASID_VERSION_MASK)) { |
872 | kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, | 746 | kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, |
@@ -928,10 +802,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) | |||
928 | } | 802 | } |
929 | 803 | ||
930 | EXPORT_SYMBOL(kvm_local_flush_tlb_all); | 804 | EXPORT_SYMBOL(kvm_local_flush_tlb_all); |
931 | EXPORT_SYMBOL(kvm_shadow_tlb_put); | ||
932 | EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); | 805 | EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); |
933 | EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); | 806 | EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); |
934 | EXPORT_SYMBOL(kvm_mips_init_shadow_tlb); | ||
935 | EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); | 807 | EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); |
936 | EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); | 808 | EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); |
937 | EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); | 809 | EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); |
@@ -939,8 +811,6 @@ EXPORT_SYMBOL(kvm_mips_flush_host_tlb); | |||
939 | EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); | 811 | EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); |
940 | EXPORT_SYMBOL(kvm_mips_host_tlb_inv); | 812 | EXPORT_SYMBOL(kvm_mips_host_tlb_inv); |
941 | EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); | 813 | EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); |
942 | EXPORT_SYMBOL(kvm_shadow_tlb_load); | ||
943 | EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs); | ||
944 | EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); | 814 | EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); |
945 | EXPORT_SYMBOL(kvm_get_inst); | 815 | EXPORT_SYMBOL(kvm_get_inst); |
946 | EXPORT_SYMBOL(kvm_arch_vcpu_load); | 816 | EXPORT_SYMBOL(kvm_arch_vcpu_load); |