aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-07-29 08:47:54 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:50:48 -0400
commite8508940a88691ad3d1c46608cd968eb4be9cbc5 (patch)
treec14d6cbc9c947cdbf69a255f04d44024a79ed6e6 /arch/powerpc/kvm
parentbeb03f14da9ceff76ff08cbb8af064b52dc21f7e (diff)
KVM: PPC: Magic Page Book3s support
We need to override EA as well as PA lookups for the magic page. When the guest tells us to project it, the magic page overrides any guest mappings. In order to reflect that, we need to hook into all the MMU layers of KVM to force map the magic page if necessary. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s.c35
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c16
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c30
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c9
5 files changed, 80 insertions, 12 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 0ed5376df82..eee97b5a740 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -419,6 +419,25 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
419 } 419 }
420} 420}
421 421
422pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
423{
424 ulong mp_pa = vcpu->arch.magic_page_pa;
425
426 /* Magic page override */
427 if (unlikely(mp_pa) &&
428 unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
429 ((mp_pa & PAGE_MASK) & KVM_PAM))) {
430 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
431 pfn_t pfn;
432
433 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
434 get_page(pfn_to_page(pfn));
435 return pfn;
436 }
437
438 return gfn_to_pfn(vcpu->kvm, gfn);
439}
440
422/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To 441/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
423 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to 442 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
424 * emulate 32 bytes dcbz length. 443 * emulate 32 bytes dcbz length.
@@ -554,6 +573,13 @@ mmio:
554 573
555static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 574static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
556{ 575{
576 ulong mp_pa = vcpu->arch.magic_page_pa;
577
578 if (unlikely(mp_pa) &&
579 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
580 return 1;
581 }
582
557 return kvm_is_visible_gfn(vcpu->kvm, gfn); 583 return kvm_is_visible_gfn(vcpu->kvm, gfn);
558} 584}
559 585
@@ -1257,6 +1283,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1257 struct kvmppc_vcpu_book3s *vcpu_book3s; 1283 struct kvmppc_vcpu_book3s *vcpu_book3s;
1258 struct kvm_vcpu *vcpu; 1284 struct kvm_vcpu *vcpu;
1259 int err = -ENOMEM; 1285 int err = -ENOMEM;
1286 unsigned long p;
1260 1287
1261 vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); 1288 vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
1262 if (!vcpu_book3s) 1289 if (!vcpu_book3s)
@@ -1274,8 +1301,10 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1274 if (err) 1301 if (err)
1275 goto free_shadow_vcpu; 1302 goto free_shadow_vcpu;
1276 1303
1277 vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); 1304 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1278 if (!vcpu->arch.shared) 1305 /* the real shared page fills the last 4k of our page */
1306 vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
1307 if (!p)
1279 goto uninit_vcpu; 1308 goto uninit_vcpu;
1280 1309
1281 vcpu->arch.host_retip = kvm_return_point; 1310 vcpu->arch.host_retip = kvm_return_point;
@@ -1322,7 +1351,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1322{ 1351{
1323 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 1352 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1324 1353
1325 free_page((unsigned long)vcpu->arch.shared); 1354 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1326 kvm_vcpu_uninit(vcpu); 1355 kvm_vcpu_uninit(vcpu);
1327 kfree(vcpu_book3s->shadow_vcpu); 1356 kfree(vcpu_book3s->shadow_vcpu);
1328 vfree(vcpu_book3s); 1357 vfree(vcpu_book3s);
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 449bce5f021..a7d121adc84 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -281,8 +281,24 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
281 struct kvmppc_pte *pte, bool data) 281 struct kvmppc_pte *pte, bool data)
282{ 282{
283 int r; 283 int r;
284 ulong mp_ea = vcpu->arch.magic_page_ea;
284 285
285 pte->eaddr = eaddr; 286 pte->eaddr = eaddr;
287
288 /* Magic page override */
289 if (unlikely(mp_ea) &&
290 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
291 !(vcpu->arch.shared->msr & MSR_PR)) {
292 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
293 pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff);
294 pte->raddr &= KVM_PAM;
295 pte->may_execute = true;
296 pte->may_read = true;
297 pte->may_write = true;
298
299 return 0;
300 }
301
286 r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); 302 r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data);
287 if (r < 0) 303 if (r < 0)
288 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); 304 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 67b8c38d932..05e8c9eb0e1 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -147,7 +147,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
147 struct hpte_cache *pte; 147 struct hpte_cache *pte;
148 148
149 /* Get host physical address for gpa */ 149 /* Get host physical address for gpa */
150 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 150 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
151 if (kvm_is_error_hva(hpaddr)) { 151 if (kvm_is_error_hva(hpaddr)) {
152 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 152 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
153 orig_pte->eaddr); 153 orig_pte->eaddr);
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 58aa8409dae..d7889ef3211 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -163,6 +163,22 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
163 bool found = false; 163 bool found = false;
164 bool perm_err = false; 164 bool perm_err = false;
165 int second = 0; 165 int second = 0;
166 ulong mp_ea = vcpu->arch.magic_page_ea;
167
168 /* Magic page override */
169 if (unlikely(mp_ea) &&
170 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
171 !(vcpu->arch.shared->msr & MSR_PR)) {
172 gpte->eaddr = eaddr;
173 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
174 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
175 gpte->raddr &= KVM_PAM;
176 gpte->may_execute = true;
177 gpte->may_read = true;
178 gpte->may_write = true;
179
180 return 0;
181 }
166 182
167 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); 183 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr);
168 if (!slbe) 184 if (!slbe)
@@ -445,6 +461,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
445 ulong ea = esid << SID_SHIFT; 461 ulong ea = esid << SID_SHIFT;
446 struct kvmppc_slb *slb; 462 struct kvmppc_slb *slb;
447 u64 gvsid = esid; 463 u64 gvsid = esid;
464 ulong mp_ea = vcpu->arch.magic_page_ea;
448 465
449 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 466 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
450 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); 467 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
@@ -464,7 +481,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
464 break; 481 break;
465 case MSR_DR|MSR_IR: 482 case MSR_DR|MSR_IR:
466 if (!slb) 483 if (!slb)
467 return -ENOENT; 484 goto no_slb;
468 485
469 *vsid = gvsid; 486 *vsid = gvsid;
470 break; 487 break;
@@ -477,6 +494,17 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
477 *vsid |= VSID_PR; 494 *vsid |= VSID_PR;
478 495
479 return 0; 496 return 0;
497
498no_slb:
499 /* Catch magic page case */
500 if (unlikely(mp_ea) &&
501 unlikely(esid == (mp_ea >> SID_SHIFT)) &&
502 !(vcpu->arch.shared->msr & MSR_PR)) {
503 *vsid = VSID_REAL | esid;
504 return 0;
505 }
506
507 return -EINVAL;
480} 508}
481 509
482static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) 510static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 71c1f9027ab..6cdd19a82bd 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -101,18 +101,13 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
101 struct kvmppc_sid_map *map; 101 struct kvmppc_sid_map *map;
102 102
103 /* Get host physical address for gpa */ 103 /* Get host physical address for gpa */
104 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 104 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
105 if (kvm_is_error_hva(hpaddr)) { 105 if (kvm_is_error_hva(hpaddr)) {
106 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); 106 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
107 return -EINVAL; 107 return -EINVAL;
108 } 108 }
109 hpaddr <<= PAGE_SHIFT; 109 hpaddr <<= PAGE_SHIFT;
110#if PAGE_SHIFT == 12 110 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
111#elif PAGE_SHIFT == 16
112 hpaddr |= orig_pte->raddr & 0xf000;
113#else
114#error Unknown page size
115#endif
116 111
117 /* and write the mapping ea -> hpa into the pt */ 112 /* and write the mapping ea -> hpa into the pt */
118 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 113 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);