diff options
Diffstat (limited to 'arch/mips/kvm/mmu.c')
-rw-r--r-- | arch/mips/kvm/mmu.c | 70 |
1 files changed, 45 insertions, 25 deletions
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 57319ee57c4f..121008c0fcc9 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c | |||
@@ -40,7 +40,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) | |||
40 | srcu_idx = srcu_read_lock(&kvm->srcu); | 40 | srcu_idx = srcu_read_lock(&kvm->srcu); |
41 | pfn = gfn_to_pfn(kvm, gfn); | 41 | pfn = gfn_to_pfn(kvm, gfn); |
42 | 42 | ||
43 | if (is_error_pfn(pfn)) { | 43 | if (is_error_noslot_pfn(pfn)) { |
44 | kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn); | 44 | kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn); |
45 | err = -EFAULT; | 45 | err = -EFAULT; |
46 | goto out; | 46 | goto out; |
@@ -99,7 +99,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, | |||
99 | } | 99 | } |
100 | 100 | ||
101 | gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); | 101 | gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); |
102 | if (gfn >= kvm->arch.guest_pmap_npages) { | 102 | if ((gfn | 1) >= kvm->arch.guest_pmap_npages) { |
103 | kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, | 103 | kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, |
104 | gfn, badvaddr); | 104 | gfn, badvaddr); |
105 | kvm_mips_dump_host_tlbs(); | 105 | kvm_mips_dump_host_tlbs(); |
@@ -138,35 +138,49 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | |||
138 | unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; | 138 | unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; |
139 | struct kvm *kvm = vcpu->kvm; | 139 | struct kvm *kvm = vcpu->kvm; |
140 | kvm_pfn_t pfn0, pfn1; | 140 | kvm_pfn_t pfn0, pfn1; |
141 | gfn_t gfn0, gfn1; | ||
142 | long tlb_lo[2]; | ||
141 | int ret; | 143 | int ret; |
142 | 144 | ||
143 | if ((tlb->tlb_hi & VPN2_MASK) == 0) { | 145 | tlb_lo[0] = tlb->tlb_lo[0]; |
144 | pfn0 = 0; | 146 | tlb_lo[1] = tlb->tlb_lo[1]; |
145 | pfn1 = 0; | 147 | |
146 | } else { | 148 | /* |
147 | if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) | 149 | * The commpage address must not be mapped to anything else if the guest |
148 | >> PAGE_SHIFT) < 0) | 150 | * TLB contains entries nearby, or commpage accesses will break. |
149 | return -1; | 151 | */ |
150 | 152 | if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) & | |
151 | if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) | 153 | VPN2_MASK & (PAGE_MASK << 1))) |
152 | >> PAGE_SHIFT) < 0) | 154 | tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0; |
153 | return -1; | 155 | |
154 | 156 | gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT; | |
155 | pfn0 = kvm->arch.guest_pmap[ | 157 | gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT; |
156 | mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT]; | 158 | if (gfn0 >= kvm->arch.guest_pmap_npages || |
157 | pfn1 = kvm->arch.guest_pmap[ | 159 | gfn1 >= kvm->arch.guest_pmap_npages) { |
158 | mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT]; | 160 | kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n", |
161 | __func__, gfn0, gfn1, tlb->tlb_hi); | ||
162 | kvm_mips_dump_guest_tlbs(vcpu); | ||
163 | return -1; | ||
159 | } | 164 | } |
160 | 165 | ||
166 | if (kvm_mips_map_page(kvm, gfn0) < 0) | ||
167 | return -1; | ||
168 | |||
169 | if (kvm_mips_map_page(kvm, gfn1) < 0) | ||
170 | return -1; | ||
171 | |||
172 | pfn0 = kvm->arch.guest_pmap[gfn0]; | ||
173 | pfn1 = kvm->arch.guest_pmap[gfn1]; | ||
174 | |||
161 | /* Get attributes from the Guest TLB */ | 175 | /* Get attributes from the Guest TLB */ |
162 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | | 176 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | |
163 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | | 177 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | |
164 | (tlb->tlb_lo[0] & ENTRYLO_D) | | 178 | (tlb_lo[0] & ENTRYLO_D) | |
165 | (tlb->tlb_lo[0] & ENTRYLO_V); | 179 | (tlb_lo[0] & ENTRYLO_V); |
166 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | | 180 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | |
167 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | | 181 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | |
168 | (tlb->tlb_lo[1] & ENTRYLO_D) | | 182 | (tlb_lo[1] & ENTRYLO_D) | |
169 | (tlb->tlb_lo[1] & ENTRYLO_V); | 183 | (tlb_lo[1] & ENTRYLO_V); |
170 | 184 | ||
171 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, | 185 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, |
172 | tlb->tlb_lo[0], tlb->tlb_lo[1]); | 186 | tlb->tlb_lo[0], tlb->tlb_lo[1]); |
@@ -354,9 +368,15 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu) | |||
354 | local_irq_restore(flags); | 368 | local_irq_restore(flags); |
355 | return KVM_INVALID_INST; | 369 | return KVM_INVALID_INST; |
356 | } | 370 | } |
357 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, | 371 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, |
358 | &vcpu->arch. | 372 | &vcpu->arch.guest_tlb[index])) { |
359 | guest_tlb[index]); | 373 | kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n", |
374 | __func__, opc, index, vcpu, | ||
375 | read_c0_entryhi()); | ||
376 | kvm_mips_dump_guest_tlbs(vcpu); | ||
377 | local_irq_restore(flags); | ||
378 | return KVM_INVALID_INST; | ||
379 | } | ||
360 | inst = *(opc); | 380 | inst = *(opc); |
361 | } | 381 | } |
362 | local_irq_restore(flags); | 382 | local_irq_restore(flags); |