diff options
Diffstat (limited to 'arch/mips/kvm/tlb.c')
-rw-r--r-- | arch/mips/kvm/tlb.c | 30 |
1 files changed, 19 insertions, 11 deletions
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index 52d87280f865..b9c52c1d35d6 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c | |||
@@ -49,12 +49,18 @@ EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn); | |||
49 | 49 | ||
50 | uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) | 50 | uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) |
51 | { | 51 | { |
52 | return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; | 52 | int cpu = smp_processor_id(); |
53 | |||
54 | return vcpu->arch.guest_kernel_asid[cpu] & | ||
55 | cpu_asid_mask(&cpu_data[cpu]); | ||
53 | } | 56 | } |
54 | 57 | ||
55 | uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) | 58 | uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) |
56 | { | 59 | { |
57 | return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; | 60 | int cpu = smp_processor_id(); |
61 | |||
62 | return vcpu->arch.guest_user_asid[cpu] & | ||
63 | cpu_asid_mask(&cpu_data[cpu]); | ||
58 | } | 64 | } |
59 | 65 | ||
60 | inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) | 66 | inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) |
@@ -78,7 +84,8 @@ void kvm_mips_dump_host_tlbs(void) | |||
78 | old_pagemask = read_c0_pagemask(); | 84 | old_pagemask = read_c0_pagemask(); |
79 | 85 | ||
80 | kvm_info("HOST TLBs:\n"); | 86 | kvm_info("HOST TLBs:\n"); |
81 | kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); | 87 | kvm_info("ASID: %#lx\n", read_c0_entryhi() & |
88 | cpu_asid_mask(¤t_cpu_data)); | ||
82 | 89 | ||
83 | for (i = 0; i < current_cpu_data.tlbsize; i++) { | 90 | for (i = 0; i < current_cpu_data.tlbsize; i++) { |
84 | write_c0_index(i); | 91 | write_c0_index(i); |
@@ -564,15 +571,15 @@ void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, | |||
564 | { | 571 | { |
565 | unsigned long asid = asid_cache(cpu); | 572 | unsigned long asid = asid_cache(cpu); |
566 | 573 | ||
567 | asid += ASID_INC; | 574 | asid += cpu_asid_inc(); |
568 | if (!(asid & ASID_MASK)) { | 575 | if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) { |
569 | if (cpu_has_vtag_icache) | 576 | if (cpu_has_vtag_icache) |
570 | flush_icache_all(); | 577 | flush_icache_all(); |
571 | 578 | ||
572 | kvm_local_flush_tlb_all(); /* start new asid cycle */ | 579 | kvm_local_flush_tlb_all(); /* start new asid cycle */ |
573 | 580 | ||
574 | if (!asid) /* fix version if needed */ | 581 | if (!asid) /* fix version if needed */ |
575 | asid = ASID_FIRST_VERSION; | 582 | asid = asid_first_version(cpu); |
576 | } | 583 | } |
577 | 584 | ||
578 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; | 585 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; |
@@ -627,6 +634,7 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu) | |||
627 | /* Restore ASID once we are scheduled back after preemption */ | 634 | /* Restore ASID once we are scheduled back after preemption */ |
628 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 635 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
629 | { | 636 | { |
637 | unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]); | ||
630 | unsigned long flags; | 638 | unsigned long flags; |
631 | int newasid = 0; | 639 | int newasid = 0; |
632 | 640 | ||
@@ -637,7 +645,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
637 | local_irq_save(flags); | 645 | local_irq_save(flags); |
638 | 646 | ||
639 | if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) & | 647 | if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) & |
640 | ASID_VERSION_MASK) { | 648 | asid_version_mask(cpu)) { |
641 | kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); | 649 | kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); |
642 | vcpu->arch.guest_kernel_asid[cpu] = | 650 | vcpu->arch.guest_kernel_asid[cpu] = |
643 | vcpu->arch.guest_kernel_mm.context.asid[cpu]; | 651 | vcpu->arch.guest_kernel_mm.context.asid[cpu]; |
@@ -672,7 +680,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
672 | */ | 680 | */ |
673 | if (current->flags & PF_VCPU) { | 681 | if (current->flags & PF_VCPU) { |
674 | write_c0_entryhi(vcpu->arch. | 682 | write_c0_entryhi(vcpu->arch. |
675 | preempt_entryhi & ASID_MASK); | 683 | preempt_entryhi & asid_mask); |
676 | ehb(); | 684 | ehb(); |
677 | } | 685 | } |
678 | } else { | 686 | } else { |
@@ -687,11 +695,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
687 | if (KVM_GUEST_KERNEL_MODE(vcpu)) | 695 | if (KVM_GUEST_KERNEL_MODE(vcpu)) |
688 | write_c0_entryhi(vcpu->arch. | 696 | write_c0_entryhi(vcpu->arch. |
689 | guest_kernel_asid[cpu] & | 697 | guest_kernel_asid[cpu] & |
690 | ASID_MASK); | 698 | asid_mask); |
691 | else | 699 | else |
692 | write_c0_entryhi(vcpu->arch. | 700 | write_c0_entryhi(vcpu->arch. |
693 | guest_user_asid[cpu] & | 701 | guest_user_asid[cpu] & |
694 | ASID_MASK); | 702 | asid_mask); |
695 | ehb(); | 703 | ehb(); |
696 | } | 704 | } |
697 | } | 705 | } |
@@ -721,7 +729,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
721 | kvm_mips_callbacks->vcpu_get_regs(vcpu); | 729 | kvm_mips_callbacks->vcpu_get_regs(vcpu); |
722 | 730 | ||
723 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & | 731 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & |
724 | ASID_VERSION_MASK)) { | 732 | asid_version_mask(cpu))) { |
725 | kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, | 733 | kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, |
726 | cpu_context(cpu, current->mm)); | 734 | cpu_context(cpu, current->mm)); |
727 | drop_mmu_context(current->mm, cpu); | 735 | drop_mmu_context(current->mm, cpu); |