aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kurz <groug@kaod.org>2017-10-16 06:29:44 -0400
committerPaul Mackerras <paulus@ozlabs.org>2017-11-01 00:17:25 -0400
commitf4093ee9d0bd8299ccdb26120a35bfe293fe95e7 (patch)
tree62ec36bf9a9a9b50b7e6b61771af64ba9070f97f
parent00bb6ae5006205e041ce9784c819460562351d47 (diff)
KVM: PPC: Book3S PR: Only install valid SLBs during KVM_SET_SREGS
Userland passes an array of 64 SLB descriptors to KVM_SET_SREGS, some of which are valid (ie, SLB_ESID_V is set) and the rest are likely all-zeroes (with QEMU at least). Each of them is then passed to kvmppc_mmu_book3s_64_slbmte(), which assumes to find the SLB index in the 3 lower bits of its rb argument. When passed zeroed arguments, it happily overwrites the 0th SLB entry with zeroes. This is exactly what happens while doing live migration with QEMU when the destination pushes the incoming SLB descriptors to KVM PR. When reloading the SLBs at the next synchronization, QEMU first clears its SLB array and only restore valid ones, but the 0th one is now gone and we cannot access the corresponding memory anymore: (qemu) x/x $pc c0000000000b742c: Cannot access memory To avoid this, let's filter out non-valid SLB entries. While here, we also force a full SLB flush before installing new entries. Since SLB is for 64-bit only, we now build this path conditionally to avoid a build break on 32-bit, which doesn't define SLB_ESID_V. Signed-off-by: Greg Kurz <groug@kaod.org> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
-rw-r--r--arch/powerpc/kvm/book3s_pr.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 69a09444d46e..d0dc8624198f 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1326,12 +1326,22 @@ static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1326 kvmppc_set_pvr_pr(vcpu, sregs->pvr); 1326 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
1327 1327
1328 vcpu3s->sdr1 = sregs->u.s.sdr1; 1328 vcpu3s->sdr1 = sregs->u.s.sdr1;
1329#ifdef CONFIG_PPC_BOOK3S_64
1329 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { 1330 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1331 /* Flush all SLB entries */
1332 vcpu->arch.mmu.slbmte(vcpu, 0, 0);
1333 vcpu->arch.mmu.slbia(vcpu);
1334
1330 for (i = 0; i < 64; i++) { 1335 for (i = 0; i < 64; i++) {
1331 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, 1336 u64 rb = sregs->u.s.ppc64.slb[i].slbe;
1332 sregs->u.s.ppc64.slb[i].slbe); 1337 u64 rs = sregs->u.s.ppc64.slb[i].slbv;
1338
1339 if (rb & SLB_ESID_V)
1340 vcpu->arch.mmu.slbmte(vcpu, rs, rb);
1333 } 1341 }
1334 } else { 1342 } else
1343#endif
1344 {
1335 for (i = 0; i < 16; i++) { 1345 for (i = 0; i < 16; i++) {
1336 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); 1346 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1337 } 1347 }