aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2013-09-20 00:52:45 -0400
committerAlexander Graf <agraf@suse.de>2013-10-17 08:45:03 -0400
commitc9029c341da646ab0c9911ea4c118eaa0a2eb0fa (patch)
treeb2474961ed358c115d3dbd4dc9a29791b67e1a75 /arch
parenta4a0f2524acc2c602cadd8e743be19d86f3a746b (diff)
KVM: PPC: Book3S PR: Use 64k host pages where possible
Currently, PR KVM uses 4k pages for the host-side mappings of guest memory, regardless of the host page size. When the host page size is 64kB, we might as well use 64k host page mappings for guest mappings of 64kB and larger pages and for guest real-mode mappings. However, the magic page has to remain a 4k page. To implement this, we first add another flag bit to the guest VSID values we use, to indicate that this segment is one where host pages should be mapped using 64k pages. For segments with this bit set we set the bits in the shadow SLB entry to indicate a 64k base page size. When faulting in host HPTEs for this segment, we make them 64k HPTEs instead of 4k. We record the pagesize in struct hpte_cache for use when invalidating the HPTE. For now we restrict the segment containing the magic page (if any) to 4k pages. It should be possible to lift this restriction in future by ensuring that the magic 4k page is appropriately positioned within a host 64k page. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h6
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c35
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c27
-rw-r--r--arch/powerpc/kvm/book3s_pr.c1
5 files changed, 57 insertions, 13 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 1d4a1202e2d5..6bf20b4a2841 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -66,6 +66,7 @@ struct hpte_cache {
66 u64 pfn; 66 u64 pfn;
67 ulong slot; 67 ulong slot;
68 struct kvmppc_pte pte; 68 struct kvmppc_pte pte;
69 int pagesize;
69}; 70};
70 71
71struct kvmppc_vcpu_book3s { 72struct kvmppc_vcpu_book3s {
@@ -113,8 +114,9 @@ struct kvmppc_vcpu_book3s {
113#define CONTEXT_GUEST 1 114#define CONTEXT_GUEST 1
114#define CONTEXT_GUEST_END 2 115#define CONTEXT_GUEST_END 2
115 116
116#define VSID_REAL 0x0fffffffffc00000ULL 117#define VSID_REAL 0x07ffffffffc00000ULL
117#define VSID_BAT 0x0fffffffffb00000ULL 118#define VSID_BAT 0x07ffffffffb00000ULL
119#define VSID_64K 0x0800000000000000ULL
118#define VSID_1T 0x1000000000000000ULL 120#define VSID_1T 0x1000000000000000ULL
119#define VSID_REAL_DR 0x2000000000000000ULL 121#define VSID_REAL_DR 0x2000000000000000ULL
120#define VSID_REAL_IR 0x4000000000000000ULL 122#define VSID_REAL_IR 0x4000000000000000ULL
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index c8cefdd15fd8..af045533e685 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -308,6 +308,7 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
308 ulong mp_ea = vcpu->arch.magic_page_ea; 308 ulong mp_ea = vcpu->arch.magic_page_ea;
309 309
310 pte->eaddr = eaddr; 310 pte->eaddr = eaddr;
311 pte->page_size = MMU_PAGE_4K;
311 312
312 /* Magic page override */ 313 /* Magic page override */
313 if (unlikely(mp_ea) && 314 if (unlikely(mp_ea) &&
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 8277264a0bc5..ffcde01cb995 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -542,6 +542,16 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
542 kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask); 542 kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
543} 543}
544 544
545#ifdef CONFIG_PPC_64K_PAGES
546static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
547{
548 ulong mp_ea = vcpu->arch.magic_page_ea;
549
550 return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
551 (mp_ea >> SID_SHIFT) == esid;
552}
553#endif
554
545static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 555static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
546 u64 *vsid) 556 u64 *vsid)
547{ 557{
@@ -549,11 +559,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
549 struct kvmppc_slb *slb; 559 struct kvmppc_slb *slb;
550 u64 gvsid = esid; 560 u64 gvsid = esid;
551 ulong mp_ea = vcpu->arch.magic_page_ea; 561 ulong mp_ea = vcpu->arch.magic_page_ea;
562 int pagesize = MMU_PAGE_64K;
552 563
553 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 564 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
554 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); 565 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
555 if (slb) { 566 if (slb) {
556 gvsid = slb->vsid; 567 gvsid = slb->vsid;
568 pagesize = slb->base_page_size;
557 if (slb->tb) { 569 if (slb->tb) {
558 gvsid <<= SID_SHIFT_1T - SID_SHIFT; 570 gvsid <<= SID_SHIFT_1T - SID_SHIFT;
559 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1); 571 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
@@ -564,28 +576,41 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
564 576
565 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 577 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
566 case 0: 578 case 0:
567 *vsid = VSID_REAL | esid; 579 gvsid = VSID_REAL | esid;
568 break; 580 break;
569 case MSR_IR: 581 case MSR_IR:
570 *vsid = VSID_REAL_IR | gvsid; 582 gvsid |= VSID_REAL_IR;
571 break; 583 break;
572 case MSR_DR: 584 case MSR_DR:
573 *vsid = VSID_REAL_DR | gvsid; 585 gvsid |= VSID_REAL_DR;
574 break; 586 break;
575 case MSR_DR|MSR_IR: 587 case MSR_DR|MSR_IR:
576 if (!slb) 588 if (!slb)
577 goto no_slb; 589 goto no_slb;
578 590
579 *vsid = gvsid;
580 break; 591 break;
581 default: 592 default:
582 BUG(); 593 BUG();
583 break; 594 break;
584 } 595 }
585 596
597#ifdef CONFIG_PPC_64K_PAGES
598 /*
599 * Mark this as a 64k segment if the host is using
600 * 64k pages, the host MMU supports 64k pages and
601 * the guest segment page size is >= 64k,
602 * but not if this segment contains the magic page.
603 */
604 if (pagesize >= MMU_PAGE_64K &&
605 mmu_psize_defs[MMU_PAGE_64K].shift &&
606 !segment_contains_magic_page(vcpu, esid))
607 gvsid |= VSID_64K;
608#endif
609
586 if (vcpu->arch.shared->msr & MSR_PR) 610 if (vcpu->arch.shared->msr & MSR_PR)
587 *vsid |= VSID_PR; 611 gvsid |= VSID_PR;
588 612
613 *vsid = gvsid;
589 return 0; 614 return 0;
590 615
591no_slb: 616no_slb:
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index e5240524bf6c..6bda504ceda7 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -34,7 +34,7 @@
34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
35{ 35{
36 ppc_md.hpte_invalidate(pte->slot, pte->host_vpn, 36 ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
37 MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M, 37 pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
38 false); 38 false);
39} 39}
40 40
@@ -90,6 +90,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
90 int attempt = 0; 90 int attempt = 0;
91 struct kvmppc_sid_map *map; 91 struct kvmppc_sid_map *map;
92 int r = 0; 92 int r = 0;
93 int hpsize = MMU_PAGE_4K;
93 94
94 /* Get host physical address for gpa */ 95 /* Get host physical address for gpa */
95 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 96 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
@@ -99,7 +100,6 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
99 goto out; 100 goto out;
100 } 101 }
101 hpaddr <<= PAGE_SHIFT; 102 hpaddr <<= PAGE_SHIFT;
102 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
103 103
104 /* and write the mapping ea -> hpa into the pt */ 104 /* and write the mapping ea -> hpa into the pt */
105 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 105 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
@@ -117,8 +117,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
117 goto out; 117 goto out;
118 } 118 }
119 119
120 vsid = map->host_vsid; 120 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
121 vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
122 121
123 if (!orig_pte->may_write) 122 if (!orig_pte->may_write)
124 rflags |= HPTE_R_PP; 123 rflags |= HPTE_R_PP;
@@ -130,7 +129,16 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
130 else 129 else
131 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); 130 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
132 131
133 hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M); 132 /*
133 * Use 64K pages if possible; otherwise, on 64K page kernels,
134 * we need to transfer 4 more bits from guest real to host real addr.
135 */
136 if (vsid & VSID_64K)
137 hpsize = MMU_PAGE_64K;
138 else
139 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
140
141 hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
134 142
135map_again: 143map_again:
136 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 144 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
@@ -143,7 +151,7 @@ map_again:
143 } 151 }
144 152
145 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, 153 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
146 MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M); 154 hpsize, hpsize, MMU_SEGSIZE_256M);
147 155
148 if (ret < 0) { 156 if (ret < 0) {
149 /* If we couldn't map a primary PTE, try a secondary */ 157 /* If we couldn't map a primary PTE, try a secondary */
@@ -168,6 +176,7 @@ map_again:
168 pte->host_vpn = vpn; 176 pte->host_vpn = vpn;
169 pte->pte = *orig_pte; 177 pte->pte = *orig_pte;
170 pte->pfn = hpaddr >> PAGE_SHIFT; 178 pte->pfn = hpaddr >> PAGE_SHIFT;
179 pte->pagesize = hpsize;
171 180
172 kvmppc_mmu_hpte_cache_map(vcpu, pte); 181 kvmppc_mmu_hpte_cache_map(vcpu, pte);
173 } 182 }
@@ -291,6 +300,12 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
291 slb_vsid &= ~SLB_VSID_KP; 300 slb_vsid &= ~SLB_VSID_KP;
292 slb_esid |= slb_index; 301 slb_esid |= slb_index;
293 302
303#ifdef CONFIG_PPC_64K_PAGES
304 /* Set host segment base page size to 64K if possible */
305 if (gvsid & VSID_64K)
306 slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
307#endif
308
294 svcpu->slb[slb_index].esid = slb_esid; 309 svcpu->slb[slb_index].esid = slb_esid;
295 svcpu->slb[slb_index].vsid = slb_vsid; 310 svcpu->slb[slb_index].vsid = slb_vsid;
296 311
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 6cc99583ed39..e9e8c748e673 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -422,6 +422,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
422 pte.raddr = eaddr & KVM_PAM; 422 pte.raddr = eaddr & KVM_PAM;
423 pte.eaddr = eaddr; 423 pte.eaddr = eaddr;
424 pte.vpage = eaddr >> 12; 424 pte.vpage = eaddr >> 12;
425 pte.page_size = MMU_PAGE_64K;
425 } 426 }
426 427
427 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 428 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {