diff options
author | David Daney <david.daney@cavium.com> | 2013-05-13 16:56:44 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2013-05-16 14:35:42 -0400 |
commit | 48c4ac976ae995f263cde8f09578de86bc8e9f1d (patch) | |
tree | 48e9b3753b951ea2c276546679264c3030a8ea7d /arch/mips/kvm/kvm_tlb.c | |
parent | 8ea6cd7af124ad070b44a7f60e225e45e3f38f79 (diff) |
Revert "MIPS: Allow ASID size to be determined at boot time."
This reverts commit d532f3d26716a39dfd4b88d687bd344fbe77e390.
The original commit has several problems:
1) Doesn't work with 64-bit kernels.
2) Calls TLBMISS_HANDLER_SETUP() before the code is generated.
3) Calls TLBMISS_HANDLER_SETUP() twice in per_cpu_trap_init() when
only one call is needed.
[ralf@linux-mips.org: Also revert the bits of the ASID patch which were
hidden in the KVM merge.]
Signed-off-by: David Daney <david.daney@cavium.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: "Steven J. Hill" <Steven.Hill@imgtec.com>
Cc: David Daney <david.daney@cavium.com>
Patchwork: https://patchwork.linux-mips.org/patch/5242/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kvm/kvm_tlb.c')
-rw-r--r-- | arch/mips/kvm/kvm_tlb.c | 26 |
1 files changed, 15 insertions, 11 deletions
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c index 89511a9258d3..e3f0d9b8b6c5 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/kvm_tlb.c | |||
@@ -51,13 +51,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn); | |||
51 | 51 | ||
52 | uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) | 52 | uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) |
53 | { | 53 | { |
54 | return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]); | 54 | return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; |
55 | } | 55 | } |
56 | 56 | ||
57 | 57 | ||
58 | uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) | 58 | uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) |
59 | { | 59 | { |
60 | return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]); | 60 | return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; |
61 | } | 61 | } |
62 | 62 | ||
63 | inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) | 63 | inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) |
@@ -84,7 +84,7 @@ void kvm_mips_dump_host_tlbs(void) | |||
84 | old_pagemask = read_c0_pagemask(); | 84 | old_pagemask = read_c0_pagemask(); |
85 | 85 | ||
86 | printk("HOST TLBs:\n"); | 86 | printk("HOST TLBs:\n"); |
87 | printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi())); | 87 | printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); |
88 | 88 | ||
89 | for (i = 0; i < current_cpu_data.tlbsize; i++) { | 89 | for (i = 0; i < current_cpu_data.tlbsize; i++) { |
90 | write_c0_index(i); | 90 | write_c0_index(i); |
@@ -428,7 +428,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | |||
428 | 428 | ||
429 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { | 429 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { |
430 | if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && | 430 | if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && |
431 | (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) { | 431 | (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { |
432 | index = i; | 432 | index = i; |
433 | break; | 433 | break; |
434 | } | 434 | } |
@@ -626,7 +626,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, | |||
626 | { | 626 | { |
627 | unsigned long asid = asid_cache(cpu); | 627 | unsigned long asid = asid_cache(cpu); |
628 | 628 | ||
629 | if (!(ASID_MASK(ASID_INC(asid)))) { | 629 | if (!((asid += ASID_INC) & ASID_MASK)) { |
630 | if (cpu_has_vtag_icache) { | 630 | if (cpu_has_vtag_icache) { |
631 | flush_icache_all(); | 631 | flush_icache_all(); |
632 | } | 632 | } |
@@ -804,7 +804,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
804 | if (!newasid) { | 804 | if (!newasid) { |
805 | /* If we preempted while the guest was executing, then reload the pre-empted ASID */ | 805 | /* If we preempted while the guest was executing, then reload the pre-empted ASID */ |
806 | if (current->flags & PF_VCPU) { | 806 | if (current->flags & PF_VCPU) { |
807 | write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi)); | 807 | write_c0_entryhi(vcpu->arch. |
808 | preempt_entryhi & ASID_MASK); | ||
808 | ehb(); | 809 | ehb(); |
809 | } | 810 | } |
810 | } else { | 811 | } else { |
@@ -816,11 +817,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
816 | */ | 817 | */ |
817 | if (current->flags & PF_VCPU) { | 818 | if (current->flags & PF_VCPU) { |
818 | if (KVM_GUEST_KERNEL_MODE(vcpu)) | 819 | if (KVM_GUEST_KERNEL_MODE(vcpu)) |
819 | write_c0_entryhi(ASID_MASK(vcpu->arch. | 820 | write_c0_entryhi(vcpu->arch. |
820 | guest_kernel_asid[cpu])); | 821 | guest_kernel_asid[cpu] & |
822 | ASID_MASK); | ||
821 | else | 823 | else |
822 | write_c0_entryhi(ASID_MASK(vcpu->arch. | 824 | write_c0_entryhi(vcpu->arch. |
823 | guest_user_asid[cpu])); | 825 | guest_user_asid[cpu] & |
826 | ASID_MASK); | ||
824 | ehb(); | 827 | ehb(); |
825 | } | 828 | } |
826 | } | 829 | } |
@@ -879,7 +882,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) | |||
879 | kvm_mips_guest_tlb_lookup(vcpu, | 882 | kvm_mips_guest_tlb_lookup(vcpu, |
880 | ((unsigned long) opc & VPN2_MASK) | 883 | ((unsigned long) opc & VPN2_MASK) |
881 | | | 884 | | |
882 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); | 885 | (kvm_read_c0_guest_entryhi |
886 | (cop0) & ASID_MASK)); | ||
883 | if (index < 0) { | 887 | if (index < 0) { |
884 | kvm_err | 888 | kvm_err |
885 | ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", | 889 | ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", |