aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
commit0cb7bf61b1e9f05027de58c80f9b46a714d24e35 (patch)
tree41fb55cf62d07b425122f9a8b96412c0d8eb99c5 /arch/mips/kvm
parentaa877175e7a9982233ed8f10cb4bfddd78d82741 (diff)
parent3eab887a55424fc2c27553b7bfe32330df83f7b8 (diff)
Merge branch 'linus' into smp/hotplug
Apply upstream changes to avoid conflicts with pending patches.
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r--arch/mips/kvm/emulate.c35
-rw-r--r--arch/mips/kvm/mmu.c70
2 files changed, 71 insertions, 34 deletions
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 6eb52b9c9818..e788515f766b 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1642,8 +1642,14 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1642 1642
1643 preempt_disable(); 1643 preempt_disable();
1644 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { 1644 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1645 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) 1645 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
1646 kvm_mips_handle_kseg0_tlb_fault(va, vcpu); 1646 kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
1647 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1648 __func__, va, vcpu, read_c0_entryhi());
1649 er = EMULATE_FAIL;
1650 preempt_enable();
1651 goto done;
1652 }
1647 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || 1653 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1648 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { 1654 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1649 int index; 1655 int index;
@@ -1680,12 +1686,18 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1680 run, vcpu); 1686 run, vcpu);
1681 preempt_enable(); 1687 preempt_enable();
1682 goto dont_update_pc; 1688 goto dont_update_pc;
1683 } else { 1689 }
1684 /* 1690 /*
1685 * We fault an entry from the guest tlb to the 1691 * We fault an entry from the guest tlb to the
1686 * shadow host TLB 1692 * shadow host TLB
1687 */ 1693 */
1688 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb); 1694 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
1695 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1696 __func__, va, index, vcpu,
1697 read_c0_entryhi());
1698 er = EMULATE_FAIL;
1699 preempt_enable();
1700 goto done;
1689 } 1701 }
1690 } 1702 }
1691 } else { 1703 } else {
@@ -2659,7 +2671,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
2659 * OK we have a Guest TLB entry, now inject it into the 2671 * OK we have a Guest TLB entry, now inject it into the
2660 * shadow host TLB 2672 * shadow host TLB
2661 */ 2673 */
2662 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb); 2674 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
2675 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2676 __func__, va, index, vcpu,
2677 read_c0_entryhi());
2678 er = EMULATE_FAIL;
2679 }
2663 } 2680 }
2664 } 2681 }
2665 2682
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 57319ee57c4f..121008c0fcc9 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -40,7 +40,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
40 srcu_idx = srcu_read_lock(&kvm->srcu); 40 srcu_idx = srcu_read_lock(&kvm->srcu);
41 pfn = gfn_to_pfn(kvm, gfn); 41 pfn = gfn_to_pfn(kvm, gfn);
42 42
43 if (is_error_pfn(pfn)) { 43 if (is_error_noslot_pfn(pfn)) {
44 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn); 44 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
45 err = -EFAULT; 45 err = -EFAULT;
46 goto out; 46 goto out;
@@ -99,7 +99,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
99 } 99 }
100 100
101 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); 101 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
102 if (gfn >= kvm->arch.guest_pmap_npages) { 102 if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
103 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, 103 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
104 gfn, badvaddr); 104 gfn, badvaddr);
105 kvm_mips_dump_host_tlbs(); 105 kvm_mips_dump_host_tlbs();
@@ -138,35 +138,49 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
138 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; 138 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
139 struct kvm *kvm = vcpu->kvm; 139 struct kvm *kvm = vcpu->kvm;
140 kvm_pfn_t pfn0, pfn1; 140 kvm_pfn_t pfn0, pfn1;
141 gfn_t gfn0, gfn1;
142 long tlb_lo[2];
141 int ret; 143 int ret;
142 144
143 if ((tlb->tlb_hi & VPN2_MASK) == 0) { 145 tlb_lo[0] = tlb->tlb_lo[0];
144 pfn0 = 0; 146 tlb_lo[1] = tlb->tlb_lo[1];
145 pfn1 = 0; 147
146 } else { 148 /*
147 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) 149 * The commpage address must not be mapped to anything else if the guest
148 >> PAGE_SHIFT) < 0) 150 * TLB contains entries nearby, or commpage accesses will break.
149 return -1; 151 */
150 152 if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
151 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) 153 VPN2_MASK & (PAGE_MASK << 1)))
152 >> PAGE_SHIFT) < 0) 154 tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
153 return -1; 155
154 156 gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
155 pfn0 = kvm->arch.guest_pmap[ 157 gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
156 mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT]; 158 if (gfn0 >= kvm->arch.guest_pmap_npages ||
157 pfn1 = kvm->arch.guest_pmap[ 159 gfn1 >= kvm->arch.guest_pmap_npages) {
158 mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT]; 160 kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
161 __func__, gfn0, gfn1, tlb->tlb_hi);
162 kvm_mips_dump_guest_tlbs(vcpu);
163 return -1;
159 } 164 }
160 165
166 if (kvm_mips_map_page(kvm, gfn0) < 0)
167 return -1;
168
169 if (kvm_mips_map_page(kvm, gfn1) < 0)
170 return -1;
171
172 pfn0 = kvm->arch.guest_pmap[gfn0];
173 pfn1 = kvm->arch.guest_pmap[gfn1];
174
161 /* Get attributes from the Guest TLB */ 175 /* Get attributes from the Guest TLB */
162 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | 176 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
163 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | 177 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
164 (tlb->tlb_lo[0] & ENTRYLO_D) | 178 (tlb_lo[0] & ENTRYLO_D) |
165 (tlb->tlb_lo[0] & ENTRYLO_V); 179 (tlb_lo[0] & ENTRYLO_V);
166 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | 180 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
167 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | 181 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
168 (tlb->tlb_lo[1] & ENTRYLO_D) | 182 (tlb_lo[1] & ENTRYLO_D) |
169 (tlb->tlb_lo[1] & ENTRYLO_V); 183 (tlb_lo[1] & ENTRYLO_V);
170 184
171 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, 185 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
172 tlb->tlb_lo[0], tlb->tlb_lo[1]); 186 tlb->tlb_lo[0], tlb->tlb_lo[1]);
@@ -354,9 +368,15 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
354 local_irq_restore(flags); 368 local_irq_restore(flags);
355 return KVM_INVALID_INST; 369 return KVM_INVALID_INST;
356 } 370 }
357 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, 371 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
358 &vcpu->arch. 372 &vcpu->arch.guest_tlb[index])) {
359 guest_tlb[index]); 373 kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
374 __func__, opc, index, vcpu,
375 read_c0_entryhi());
376 kvm_mips_dump_guest_tlbs(vcpu);
377 local_irq_restore(flags);
378 return KVM_INVALID_INST;
379 }
360 inst = *(opc); 380 inst = *(opc);
361 } 381 }
362 local_irq_restore(flags); 382 local_irq_restore(flags);