aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-04-19 20:49:48 -0400
committerAvi Kivity <avi@redhat.com>2010-05-17 05:18:58 -0400
commitf7bc74e1c306636a659a04805474b2f8fcbd1f7e (patch)
tree5aa5c5c9676d577b55bbc700f1d5a6ee5c137a27 /arch/powerpc
parent7fdaec997cc8ef77e8da7ed70f3d9f074b61c31f (diff)
KVM: PPC: Improve split mode
When in split mode, instruction relocation and data relocation are not equal. So far we implemented this mode by reserving a special pseudo-VSID for the two cases and flushing all PTEs when going into split mode, which is slow. Unfortunately 32bit Linux and Mac OS X use split mode extensively. So to not slow down things too much, I came up with a different idea: Mark the split mode with a bit in the VSID and then treat it like any other segment. This means we can just flush the shadow segment cache, but keep the PTEs intact. I verified that this works with ppc32 Linux and Mac OS X 10.4 guests and does speed them up. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h9
-rw-r--r--arch/powerpc/kvm/book3s.c28
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c21
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c27
4 files changed, 46 insertions, 39 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 5d3bd0cc4116..6f74d93725a0 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -100,11 +100,10 @@ struct kvmppc_vcpu_book3s {
100#define CONTEXT_GUEST 1 100#define CONTEXT_GUEST 1
101#define CONTEXT_GUEST_END 2 101#define CONTEXT_GUEST_END 2
102 102
103#define VSID_REAL_DR 0x7ffffffffff00000ULL 103#define VSID_REAL 0x1fffffffffc00000ULL
104#define VSID_REAL_IR 0x7fffffffffe00000ULL 104#define VSID_BAT 0x1fffffffffb00000ULL
105#define VSID_SPLIT_MASK 0x7fffffffffe00000ULL 105#define VSID_REAL_DR 0x2000000000000000ULL
106#define VSID_REAL 0x7fffffffffc00000ULL 106#define VSID_REAL_IR 0x4000000000000000ULL
107#define VSID_BAT 0x7fffffffffb00000ULL
108#define VSID_PR 0x8000000000000000ULL 107#define VSID_PR 0x8000000000000000ULL
109 108
110extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask); 109extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index f66de7e518f7..397701d39ae7 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -148,16 +148,8 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
148 } 148 }
149 } 149 }
150 150
151 if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) || 151 if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) !=
152 (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) { 152 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
153 bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
154 bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
155
156 /* Flush split mode PTEs */
157 if (dr != ir)
158 kvmppc_mmu_pte_vflush(vcpu, VSID_SPLIT_MASK,
159 VSID_SPLIT_MASK);
160
161 kvmppc_mmu_flush_segments(vcpu); 153 kvmppc_mmu_flush_segments(vcpu);
162 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 154 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
163 } 155 }
@@ -535,6 +527,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
535 bool is_mmio = false; 527 bool is_mmio = false;
536 bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; 528 bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
537 bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; 529 bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
530 u64 vsid;
538 531
539 relocated = data ? dr : ir; 532 relocated = data ? dr : ir;
540 533
@@ -552,13 +545,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
552 545
553 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 546 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
554 case 0: 547 case 0:
555 pte.vpage |= VSID_REAL; 548 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
556 break; 549 break;
557 case MSR_DR: 550 case MSR_DR:
558 pte.vpage |= VSID_REAL_DR;
559 break;
560 case MSR_IR: 551 case MSR_IR:
561 pte.vpage |= VSID_REAL_IR; 552 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
553
554 if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR)
555 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
556 else
557 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
558 pte.vpage |= vsid;
559
560 if (vsid == -1)
561 page_found = -EINVAL;
562 break; 562 break;
563 } 563 }
564 564
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 33186b745c90..0b10503c8a4a 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -330,30 +330,35 @@ static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool lar
330static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 330static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
331 u64 *vsid) 331 u64 *vsid)
332{ 332{
333 ulong ea = esid << SID_SHIFT;
334 struct kvmppc_sr *sr;
335 u64 gvsid = esid;
336
337 if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
338 sr = find_sr(to_book3s(vcpu), ea);
339 if (sr->valid)
340 gvsid = sr->vsid;
341 }
342
333 /* In case we only have one of MSR_IR or MSR_DR set, let's put 343 /* In case we only have one of MSR_IR or MSR_DR set, let's put
334 that in the real-mode context (and hope RM doesn't access 344 that in the real-mode context (and hope RM doesn't access
335 high memory) */ 345 high memory) */
336 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 346 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
337 case 0: 347 case 0:
338 *vsid = (VSID_REAL >> 16) | esid; 348 *vsid = VSID_REAL | esid;
339 break; 349 break;
340 case MSR_IR: 350 case MSR_IR:
341 *vsid = (VSID_REAL_IR >> 16) | esid; 351 *vsid = VSID_REAL_IR | gvsid;
342 break; 352 break;
343 case MSR_DR: 353 case MSR_DR:
344 *vsid = (VSID_REAL_DR >> 16) | esid; 354 *vsid = VSID_REAL_DR | gvsid;
345 break; 355 break;
346 case MSR_DR|MSR_IR: 356 case MSR_DR|MSR_IR:
347 {
348 ulong ea = esid << SID_SHIFT;
349 struct kvmppc_sr *sr = find_sr(to_book3s(vcpu), ea);
350
351 if (!sr->valid) 357 if (!sr->valid)
352 return -1; 358 return -1;
353 359
354 *vsid = sr->vsid; 360 *vsid = sr->vsid;
355 break; 361 break;
356 }
357 default: 362 default:
358 BUG(); 363 BUG();
359 } 364 }
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index a9241e90a68b..612de6e4d74b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -442,29 +442,32 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
442static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 442static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
443 u64 *vsid) 443 u64 *vsid)
444{ 444{
445 ulong ea = esid << SID_SHIFT;
446 struct kvmppc_slb *slb;
447 u64 gvsid = esid;
448
449 if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
450 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
451 if (slb)
452 gvsid = slb->vsid;
453 }
454
445 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 455 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
446 case 0: 456 case 0:
447 *vsid = (VSID_REAL >> 16) | esid; 457 *vsid = VSID_REAL | esid;
448 break; 458 break;
449 case MSR_IR: 459 case MSR_IR:
450 *vsid = (VSID_REAL_IR >> 16) | esid; 460 *vsid = VSID_REAL_IR | gvsid;
451 break; 461 break;
452 case MSR_DR: 462 case MSR_DR:
453 *vsid = (VSID_REAL_DR >> 16) | esid; 463 *vsid = VSID_REAL_DR | gvsid;
454 break; 464 break;
455 case MSR_DR|MSR_IR: 465 case MSR_DR|MSR_IR:
456 { 466 if (!slb)
457 ulong ea;
458 struct kvmppc_slb *slb;
459 ea = esid << SID_SHIFT;
460 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
461 if (slb)
462 *vsid = slb->vsid;
463 else
464 return -ENOENT; 467 return -ENOENT;
465 468
469 *vsid = gvsid;
466 break; 470 break;
467 }
468 default: 471 default:
469 BUG(); 472 BUG();
470 break; 473 break;