aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_64_mmu.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-01-09 21:27:47 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:50 -0500
commit4b5c9b7f9bdd76a3c860731db08bfc6758e96e29 (patch)
treeb2e5424d7c965e3cf343f685978814bd817b9066 /arch/powerpc/kvm/book3s_64_mmu.c
parent5f2b105a1d6a137c8cfb2792b79128db965880a8 (diff)
KVM: PPC: Make large pages work
An SLB entry contains two pieces of information related to size: 1) PTE size 2) SLB size The L bit defines the PTE be "large" (usually means 16MB), SLB_VSID_B_1T defines that the SLB should span 1 GB instead of the default 256MB. Apparently I messed things up and just put those two in one box, shaked it heavily and came up with the current code which handles large pages incorrectly, because it also treats large page SLB entries as "1TB" segment entries. This patch splits those two features apart, making Linux guests boot even when they have > 256MB. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index e4beeb371a73..512dcff77554 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -54,7 +54,7 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
54 if (!vcpu_book3s->slb[i].valid) 54 if (!vcpu_book3s->slb[i].valid)
55 continue; 55 continue;
56 56
57 if (vcpu_book3s->slb[i].large) 57 if (vcpu_book3s->slb[i].tb)
58 cmp_esid = esid_1t; 58 cmp_esid = esid_1t;
59 59
60 if (vcpu_book3s->slb[i].esid == cmp_esid) 60 if (vcpu_book3s->slb[i].esid == cmp_esid)
@@ -65,9 +65,10 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
65 eaddr, esid, esid_1t); 65 eaddr, esid, esid_1t);
66 for (i = 0; i < vcpu_book3s->slb_nr; i++) { 66 for (i = 0; i < vcpu_book3s->slb_nr; i++) {
67 if (vcpu_book3s->slb[i].vsid) 67 if (vcpu_book3s->slb[i].vsid)
68 dprintk(" %d: %c%c %llx %llx\n", i, 68 dprintk(" %d: %c%c%c %llx %llx\n", i,
69 vcpu_book3s->slb[i].valid ? 'v' : ' ', 69 vcpu_book3s->slb[i].valid ? 'v' : ' ',
70 vcpu_book3s->slb[i].large ? 'l' : ' ', 70 vcpu_book3s->slb[i].large ? 'l' : ' ',
71 vcpu_book3s->slb[i].tb ? 't' : ' ',
71 vcpu_book3s->slb[i].esid, 72 vcpu_book3s->slb[i].esid,
72 vcpu_book3s->slb[i].vsid); 73 vcpu_book3s->slb[i].vsid);
73 } 74 }
@@ -84,7 +85,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
84 if (!slb) 85 if (!slb)
85 return 0; 86 return 0;
86 87
87 if (slb->large) 88 if (slb->tb)
88 return (((u64)eaddr >> 12) & 0xfffffff) | 89 return (((u64)eaddr >> 12) & 0xfffffff) |
89 (((u64)slb->vsid) << 28); 90 (((u64)slb->vsid) << 28);
90 91
@@ -309,7 +310,8 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
309 slbe = &vcpu_book3s->slb[slb_nr]; 310 slbe = &vcpu_book3s->slb[slb_nr];
310 311
311 slbe->large = (rs & SLB_VSID_L) ? 1 : 0; 312 slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
312 slbe->esid = slbe->large ? esid_1t : esid; 313 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
314 slbe->esid = slbe->tb ? esid_1t : esid;
313 slbe->vsid = rs >> 12; 315 slbe->vsid = rs >> 12;
314 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; 316 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
315 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; 317 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;