aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2013-06-22 03:15:24 -0400
committerAlexander Graf <agraf@suse.de>2013-06-29 21:33:22 -0400
commit681562cd56f5336cbdf6dab0c4b2f6ef16ea89ed (patch)
treec18bfe66e7f4f84b513580a728fee764c1ed2337
parent0f296829b5a59d5a157699cbb23672ccfdd8df4c (diff)
KVM: PPC: Book3S PR: Invalidate SLB entries properly
At present, if the guest creates a valid SLB (segment lookaside buffer) entry with the slbmte instruction, then invalidates it with the slbie instruction, then reads the entry with the slbmfee/slbmfev instructions, the result of the slbmfee will have the valid bit set, even though the entry is not actually considered valid by the host. This is confusing, if not worse. This fixes it by zeroing out the orige and origv fields of the SLB entry structure when the entry is invalidated. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index ee435ba6b92a..739bfbadb85e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -397,6 +397,8 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
397 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid); 397 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
398 398
399 slbe->valid = false; 399 slbe->valid = false;
400 slbe->orige = 0;
401 slbe->origv = 0;
400 402
401 seg_size = 1ull << kvmppc_slb_sid_shift(slbe); 403 seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
402 kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size); 404 kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
@@ -408,8 +410,11 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
408 410
409 dprintk("KVM MMU: slbia()\n"); 411 dprintk("KVM MMU: slbia()\n");
410 412
411 for (i = 1; i < vcpu->arch.slb_nr; i++) 413 for (i = 1; i < vcpu->arch.slb_nr; i++) {
412 vcpu->arch.slb[i].valid = false; 414 vcpu->arch.slb[i].valid = false;
415 vcpu->arch.slb[i].orige = 0;
416 vcpu->arch.slb[i].origv = 0;
417 }
413 418
414 if (vcpu->arch.shared->msr & MSR_IR) { 419 if (vcpu->arch.shared->msr & MSR_IR) {
415 kvmppc_mmu_flush_segments(vcpu); 420 kvmppc_mmu_flush_segments(vcpu);