aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-03-24 16:48:20 -0400
committerAvi Kivity <avi@redhat.com>2010-05-17 05:16:54 -0400
commit4b389ca2e733b986c5282690e4e0314f000e6228 (patch)
treeba9ad5948a68352c0ba68d7fc8fc010f4bbc7ec3 /arch
parentc8027f165228b4c62bad31609d5c9e98ddfb8ef6 (diff)
KVM: PPC: Book3S_32 guest MMU fixes
This patch makes the VSID of mapped pages always reflecting all special cases we have, like split mode. It also changes the tlbie mask to 0x0ffff000 according to the spec. The mask we used before was incorrect. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h1
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c30
2 files changed, 24 insertions, 7 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 9f5a9921927e..b47b2f516eff 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -44,6 +44,7 @@ struct kvmppc_sr {
44 bool Ks; 44 bool Ks;
45 bool Kp; 45 bool Kp;
46 bool nx; 46 bool nx;
47 bool valid;
47}; 48};
48 49
49struct kvmppc_bat { 50struct kvmppc_bat {
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 1483a9bdddae..7071e22b42ff 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -57,6 +57,8 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
57 57
58static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 58static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
59 struct kvmppc_pte *pte, bool data); 59 struct kvmppc_pte *pte, bool data);
60static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
61 u64 *vsid);
60 62
61static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr) 63static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr)
62{ 64{
@@ -66,13 +68,14 @@ static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t e
66static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, 68static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
67 bool data) 69 bool data)
68{ 70{
69 struct kvmppc_sr *sre = find_sr(to_book3s(vcpu), eaddr); 71 u64 vsid;
70 struct kvmppc_pte pte; 72 struct kvmppc_pte pte;
71 73
72 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) 74 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
73 return pte.vpage; 75 return pte.vpage;
74 76
75 return (((u64)eaddr >> 12) & 0xffff) | (((u64)sre->vsid) << 16); 77 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
78 return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16);
76} 79}
77 80
78static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) 81static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
@@ -142,8 +145,13 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
142 bat->bepi_mask); 145 bat->bepi_mask);
143 } 146 }
144 if ((eaddr & bat->bepi_mask) == bat->bepi) { 147 if ((eaddr & bat->bepi_mask) == bat->bepi) {
148 u64 vsid;
149 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu,
150 eaddr >> SID_SHIFT, &vsid);
151 vsid <<= 16;
152 pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid;
153
145 pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask); 154 pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask);
146 pte->vpage = (eaddr >> 12) | VSID_BAT;
147 pte->may_read = bat->pp; 155 pte->may_read = bat->pp;
148 pte->may_write = bat->pp > 1; 156 pte->may_write = bat->pp > 1;
149 pte->may_execute = true; 157 pte->may_execute = true;
@@ -302,6 +310,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
302 /* And then put in the new SR */ 310 /* And then put in the new SR */
303 sre->raw = value; 311 sre->raw = value;
304 sre->vsid = (value & 0x0fffffff); 312 sre->vsid = (value & 0x0fffffff);
313 sre->valid = (value & 0x80000000) ? false : true;
305 sre->Ks = (value & 0x40000000) ? true : false; 314 sre->Ks = (value & 0x40000000) ? true : false;
306 sre->Kp = (value & 0x20000000) ? true : false; 315 sre->Kp = (value & 0x20000000) ? true : false;
307 sre->nx = (value & 0x10000000) ? true : false; 316 sre->nx = (value & 0x10000000) ? true : false;
@@ -312,7 +321,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
312 321
313static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) 322static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
314{ 323{
315 kvmppc_mmu_pte_flush(vcpu, ea, ~0xFFFULL); 324 kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
316} 325}
317 326
318static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, 327static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
@@ -333,15 +342,22 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
333 break; 342 break;
334 case MSR_DR|MSR_IR: 343 case MSR_DR|MSR_IR:
335 { 344 {
336 ulong ea; 345 ulong ea = esid << SID_SHIFT;
337 ea = esid << SID_SHIFT; 346 struct kvmppc_sr *sr = find_sr(to_book3s(vcpu), ea);
338 *vsid = find_sr(to_book3s(vcpu), ea)->vsid; 347
348 if (!sr->valid)
349 return -1;
350
351 *vsid = sr->vsid;
339 break; 352 break;
340 } 353 }
341 default: 354 default:
342 BUG(); 355 BUG();
343 } 356 }
344 357
358 if (vcpu->arch.msr & MSR_PR)
359 *vsid |= VSID_PR;
360
345 return 0; 361 return 0;
346} 362}
347 363