aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_32_mmu.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-07-29 08:47:43 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:50:43 -0400
commit666e7252a15b7fc4a116e65deaf6da5e4ce660e3 (patch)
treee7a56f03cb4e181eacd4f481fb3e6e038ad05b82 /arch/powerpc/kvm/book3s_32_mmu.c
parent96bc451a153297bf1f99ef2d633d512ea349ae7a (diff)
KVM: PPC: Convert MSR to shared page
One of the most obvious registers to share with the guest directly is the MSR. The MSR contains the "interrupts enabled" flag which the guest has to toggle in critical sections. So in order to bring the overhead of interrupt en- and disabling down, let's put msr into the shared page. Keep in mind that even though you can fully read its contents, writing to it doesn't always update all state. There are a few safe fields that don't require hypervisor interaction. See the documentation for a list of MSR bits that are safe to be set from inside the guest. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_32_mmu.c')
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 3292d76101d2..449bce5f021a 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -133,7 +133,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
133 else 133 else
134 bat = &vcpu_book3s->ibat[i]; 134 bat = &vcpu_book3s->ibat[i];
135 135
136 if (vcpu->arch.msr & MSR_PR) { 136 if (vcpu->arch.shared->msr & MSR_PR) {
137 if (!bat->vp) 137 if (!bat->vp)
138 continue; 138 continue;
139 } else { 139 } else {
@@ -214,8 +214,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
214 pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF); 214 pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF);
215 pp = pteg[i+1] & 3; 215 pp = pteg[i+1] & 3;
216 216
217 if ((sre->Kp && (vcpu->arch.msr & MSR_PR)) || 217 if ((sre->Kp && (vcpu->arch.shared->msr & MSR_PR)) ||
218 (sre->Ks && !(vcpu->arch.msr & MSR_PR))) 218 (sre->Ks && !(vcpu->arch.shared->msr & MSR_PR)))
219 pp |= 4; 219 pp |= 4;
220 220
221 pte->may_write = false; 221 pte->may_write = false;
@@ -334,7 +334,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
334 struct kvmppc_sr *sr; 334 struct kvmppc_sr *sr;
335 u64 gvsid = esid; 335 u64 gvsid = esid;
336 336
337 if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 337 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
338 sr = find_sr(to_book3s(vcpu), ea); 338 sr = find_sr(to_book3s(vcpu), ea);
339 if (sr->valid) 339 if (sr->valid)
340 gvsid = sr->vsid; 340 gvsid = sr->vsid;
@@ -343,7 +343,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
343 /* In case we only have one of MSR_IR or MSR_DR set, let's put 343 /* In case we only have one of MSR_IR or MSR_DR set, let's put
344 that in the real-mode context (and hope RM doesn't access 344 that in the real-mode context (and hope RM doesn't access
345 high memory) */ 345 high memory) */
346 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 346 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
347 case 0: 347 case 0:
348 *vsid = VSID_REAL | esid; 348 *vsid = VSID_REAL | esid;
349 break; 349 break;
@@ -363,7 +363,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
363 BUG(); 363 BUG();
364 } 364 }
365 365
366 if (vcpu->arch.msr & MSR_PR) 366 if (vcpu->arch.shared->msr & MSR_PR)
367 *vsid |= VSID_PR; 367 *vsid |= VSID_PR;
368 368
369 return 0; 369 return 0;