aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_64_mmu.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2013-06-22 03:16:32 -0400
committerAlexander Graf <agraf@suse.de>2013-06-29 21:33:22 -0400
commit0f296829b5a59d5a157699cbb23672ccfdd8df4c (patch)
tree1df3b1f6c3982a95f179ecfd621b3871d17958ec /arch/powerpc/kvm/book3s_64_mmu.c
parent6ed1485f65f0eb17aa7b649d5abe0b011b92f718 (diff)
KVM: PPC: Book3S PR: Allow guest to use 1TB segments
With this, the guest can use 1TB segments as well as 256MB segments. Since we now have the situation where a single emulated guest segment could correspond to multiple shadow segments (as the shadow segments are still 256MB segments), this adds a new kvmppc_mmu_flush_segment() to scan for all shadow segments that need to be removed. This restructures the guest HPT (hashed page table) lookup code to use the correct hashing and matching functions for HPTEs within a 1TB segment. We use the standard hpt_hash() function instead of open-coding the hash calculation, and we use HPTE_V_COMPARE() with an AVPN value that has the B (segment size) field included. The calculation of avpn is done a little earlier since it doesn't change in the loop starting at the do_second label. The computation in kvmppc_mmu_book3s_64_esid_to_vsid() changes so that it returns a 256MB VSID even if the guest SLB entry is a 1TB entry. This is because the users of this function are creating 256MB SLB entries. We set a new VSID_1T flag so that entries created from 1T segments don't collide with entries from 256MB segments. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c60
1 files changed, 44 insertions, 16 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 2e93bb50a71c..ee435ba6b92a 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -26,6 +26,7 @@
26#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
27#include <asm/kvm_ppc.h> 27#include <asm/kvm_ppc.h>
28#include <asm/kvm_book3s.h> 28#include <asm/kvm_book3s.h>
29#include <asm/mmu-hash64.h>
29 30
30/* #define DEBUG_MMU */ 31/* #define DEBUG_MMU */
31 32
@@ -76,6 +77,24 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
76 return NULL; 77 return NULL;
77} 78}
78 79
80static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
81{
82 return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
83}
84
85static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
86{
87 return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
88}
89
90static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
91{
92 eaddr &= kvmppc_slb_offset_mask(slb);
93
94 return (eaddr >> VPN_SHIFT) |
95 ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT));
96}
97
79static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, 98static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
80 bool data) 99 bool data)
81{ 100{
@@ -85,11 +104,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
85 if (!slb) 104 if (!slb)
86 return 0; 105 return 0;
87 106
88 if (slb->tb) 107 return kvmppc_slb_calc_vpn(slb, eaddr);
89 return (((u64)eaddr >> 12) & 0xfffffff) |
90 (((u64)slb->vsid) << 28);
91
92 return (((u64)eaddr >> 12) & 0xffff) | (((u64)slb->vsid) << 16);
93} 108}
94 109
95static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) 110static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
@@ -100,7 +115,8 @@ static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
100static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) 115static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
101{ 116{
102 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); 117 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
103 return ((eaddr & 0xfffffff) >> p); 118
119 return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
104} 120}
105 121
106static hva_t kvmppc_mmu_book3s_64_get_pteg( 122static hva_t kvmppc_mmu_book3s_64_get_pteg(
@@ -109,13 +125,15 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg(
109 bool second) 125 bool second)
110{ 126{
111 u64 hash, pteg, htabsize; 127 u64 hash, pteg, htabsize;
112 u32 page; 128 u32 ssize;
113 hva_t r; 129 hva_t r;
130 u64 vpn;
114 131
115 page = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
116 htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1); 132 htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
117 133
118 hash = slbe->vsid ^ page; 134 vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
135 ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
136 hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
119 if (second) 137 if (second)
120 hash = ~hash; 138 hash = ~hash;
121 hash &= ((1ULL << 39ULL) - 1ULL); 139 hash &= ((1ULL << 39ULL) - 1ULL);
@@ -146,7 +164,7 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
146 u64 avpn; 164 u64 avpn;
147 165
148 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); 166 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
149 avpn |= slbe->vsid << (28 - p); 167 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
150 168
151 if (p < 24) 169 if (p < 24)
152 avpn >>= ((80 - p) - 56) - 8; 170 avpn >>= ((80 - p) - 56) - 8;
@@ -189,13 +207,15 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
189 if (!slbe) 207 if (!slbe)
190 goto no_seg_found; 208 goto no_seg_found;
191 209
210 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
211 if (slbe->tb)
212 avpn |= SLB_VSID_B_1T;
213
192do_second: 214do_second:
193 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); 215 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
194 if (kvm_is_error_hva(ptegp)) 216 if (kvm_is_error_hva(ptegp))
195 goto no_page_found; 217 goto no_page_found;
196 218
197 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
198
199 if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) { 219 if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
200 printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp); 220 printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp);
201 goto no_page_found; 221 goto no_page_found;
@@ -218,7 +238,7 @@ do_second:
218 continue; 238 continue;
219 239
220 /* AVPN compare */ 240 /* AVPN compare */
221 if (HPTE_V_AVPN_VAL(avpn) == HPTE_V_AVPN_VAL(v)) { 241 if (HPTE_V_COMPARE(avpn, v)) {
222 u8 pp = (r & HPTE_R_PP) | key; 242 u8 pp = (r & HPTE_R_PP) | key;
223 int eaddr_mask = 0xFFF; 243 int eaddr_mask = 0xFFF;
224 244
@@ -324,7 +344,7 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
324 slbe->large = (rs & SLB_VSID_L) ? 1 : 0; 344 slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
325 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; 345 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
326 slbe->esid = slbe->tb ? esid_1t : esid; 346 slbe->esid = slbe->tb ? esid_1t : esid;
327 slbe->vsid = rs >> 12; 347 slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
328 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; 348 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
329 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; 349 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
330 slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0; 350 slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0;
@@ -365,6 +385,7 @@ static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
365static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) 385static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
366{ 386{
367 struct kvmppc_slb *slbe; 387 struct kvmppc_slb *slbe;
388 u64 seg_size;
368 389
369 dprintk("KVM MMU: slbie(0x%llx)\n", ea); 390 dprintk("KVM MMU: slbie(0x%llx)\n", ea);
370 391
@@ -377,7 +398,8 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
377 398
378 slbe->valid = false; 399 slbe->valid = false;
379 400
380 kvmppc_mmu_map_segment(vcpu, ea); 401 seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
402 kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
381} 403}
382 404
383static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) 405static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
@@ -457,8 +479,14 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
457 479
458 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 480 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
459 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); 481 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
460 if (slb) 482 if (slb) {
461 gvsid = slb->vsid; 483 gvsid = slb->vsid;
484 if (slb->tb) {
485 gvsid <<= SID_SHIFT_1T - SID_SHIFT;
486 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
487 gvsid |= VSID_1T;
488 }
489 }
462 } 490 }
463 491
464 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 492 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {