aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_64_mmu.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-07-29 08:47:43 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:50:43 -0400
commit666e7252a15b7fc4a116e65deaf6da5e4ce660e3 (patch)
treee7a56f03cb4e181eacd4f481fb3e6e038ad05b82 /arch/powerpc/kvm/book3s_64_mmu.c
parent96bc451a153297bf1f99ef2d633d512ea349ae7a (diff)
KVM: PPC: Convert MSR to shared page
One of the most obvious registers to share with the guest directly is the MSR. The MSR contains the "interrupts enabled" flag which the guest has to toggle in critical sections. So in order to bring the overhead of interrupt en- and disabling down, let's put msr into the shared page. Keep in mind that even though you can fully read its contents, writing to it doesn't always update all state. There are a few safe fields that don't require hypervisor interaction. See the documentation for a list of MSR bits that are safe to be set from inside the guest. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 4025ea26b3c1..58aa8409dae0 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -180,9 +180,9 @@ do_second:
180 goto no_page_found; 180 goto no_page_found;
181 } 181 }
182 182
183 if ((vcpu->arch.msr & MSR_PR) && slbe->Kp) 183 if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp)
184 key = 4; 184 key = 4;
185 else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks) 185 else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks)
186 key = 4; 186 key = 4;
187 187
188 for (i=0; i<16; i+=2) { 188 for (i=0; i<16; i+=2) {
@@ -381,7 +381,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
381 for (i = 1; i < vcpu_book3s->slb_nr; i++) 381 for (i = 1; i < vcpu_book3s->slb_nr; i++)
382 vcpu_book3s->slb[i].valid = false; 382 vcpu_book3s->slb[i].valid = false;
383 383
384 if (vcpu->arch.msr & MSR_IR) { 384 if (vcpu->arch.shared->msr & MSR_IR) {
385 kvmppc_mmu_flush_segments(vcpu); 385 kvmppc_mmu_flush_segments(vcpu);
386 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 386 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
387 } 387 }
@@ -446,13 +446,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
446 struct kvmppc_slb *slb; 446 struct kvmppc_slb *slb;
447 u64 gvsid = esid; 447 u64 gvsid = esid;
448 448
449 if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 449 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
450 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); 450 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
451 if (slb) 451 if (slb)
452 gvsid = slb->vsid; 452 gvsid = slb->vsid;
453 } 453 }
454 454
455 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 455 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
456 case 0: 456 case 0:
457 *vsid = VSID_REAL | esid; 457 *vsid = VSID_REAL | esid;
458 break; 458 break;
@@ -473,7 +473,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
473 break; 473 break;
474 } 474 }
475 475
476 if (vcpu->arch.msr & MSR_PR) 476 if (vcpu->arch.shared->msr & MSR_PR)
477 *vsid |= VSID_PR; 477 *vsid |= VSID_PR;
478 478
479 return 0; 479 return 0;