aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/booke.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-07-29 08:47:43 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:50:43 -0400
commit666e7252a15b7fc4a116e65deaf6da5e4ce660e3 (patch)
treee7a56f03cb4e181eacd4f481fb3e6e038ad05b82 /arch/powerpc/kvm/booke.c
parent96bc451a153297bf1f99ef2d633d512ea349ae7a (diff)
KVM: PPC: Convert MSR to shared page
One of the most obvious registers to share with the guest directly is the MSR. The MSR contains the "interrupts enabled" flag which the guest has to toggle in critical sections. So in order to bring the overhead of interrupt en- and disabling down, let's put msr into the shared page. Keep in mind that even though you can fully read its contents, writing to it doesn't always update all state. There are a few safe fields that don't require hypervisor interaction. See the documentation for a list of MSR bits that are safe to be set from inside the guest. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/booke.c')
-rw-r--r--arch/powerpc/kvm/booke.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 8d4e35f5372c..4ec9d49a1cb9 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -62,7 +62,7 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
62{ 62{
63 int i; 63 int i;
64 64
65 printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr); 65 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
66 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); 66 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
67 printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1); 67 printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
68 68
@@ -169,34 +169,34 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
169 break; 169 break;
170 case BOOKE_IRQPRIO_CRITICAL: 170 case BOOKE_IRQPRIO_CRITICAL:
171 case BOOKE_IRQPRIO_WATCHDOG: 171 case BOOKE_IRQPRIO_WATCHDOG:
172 allowed = vcpu->arch.msr & MSR_CE; 172 allowed = vcpu->arch.shared->msr & MSR_CE;
173 msr_mask = MSR_ME; 173 msr_mask = MSR_ME;
174 break; 174 break;
175 case BOOKE_IRQPRIO_MACHINE_CHECK: 175 case BOOKE_IRQPRIO_MACHINE_CHECK:
176 allowed = vcpu->arch.msr & MSR_ME; 176 allowed = vcpu->arch.shared->msr & MSR_ME;
177 msr_mask = 0; 177 msr_mask = 0;
178 break; 178 break;
179 case BOOKE_IRQPRIO_EXTERNAL: 179 case BOOKE_IRQPRIO_EXTERNAL:
180 case BOOKE_IRQPRIO_DECREMENTER: 180 case BOOKE_IRQPRIO_DECREMENTER:
181 case BOOKE_IRQPRIO_FIT: 181 case BOOKE_IRQPRIO_FIT:
182 allowed = vcpu->arch.msr & MSR_EE; 182 allowed = vcpu->arch.shared->msr & MSR_EE;
183 msr_mask = MSR_CE|MSR_ME|MSR_DE; 183 msr_mask = MSR_CE|MSR_ME|MSR_DE;
184 break; 184 break;
185 case BOOKE_IRQPRIO_DEBUG: 185 case BOOKE_IRQPRIO_DEBUG:
186 allowed = vcpu->arch.msr & MSR_DE; 186 allowed = vcpu->arch.shared->msr & MSR_DE;
187 msr_mask = MSR_ME; 187 msr_mask = MSR_ME;
188 break; 188 break;
189 } 189 }
190 190
191 if (allowed) { 191 if (allowed) {
192 vcpu->arch.srr0 = vcpu->arch.pc; 192 vcpu->arch.srr0 = vcpu->arch.pc;
193 vcpu->arch.srr1 = vcpu->arch.msr; 193 vcpu->arch.srr1 = vcpu->arch.shared->msr;
194 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 194 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
195 if (update_esr == true) 195 if (update_esr == true)
196 vcpu->arch.esr = vcpu->arch.queued_esr; 196 vcpu->arch.esr = vcpu->arch.queued_esr;
197 if (update_dear == true) 197 if (update_dear == true)
198 vcpu->arch.dear = vcpu->arch.queued_dear; 198 vcpu->arch.dear = vcpu->arch.queued_dear;
199 kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); 199 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
200 200
201 clear_bit(priority, &vcpu->arch.pending_exceptions); 201 clear_bit(priority, &vcpu->arch.pending_exceptions);
202 } 202 }
@@ -265,7 +265,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
265 break; 265 break;
266 266
267 case BOOKE_INTERRUPT_PROGRAM: 267 case BOOKE_INTERRUPT_PROGRAM:
268 if (vcpu->arch.msr & MSR_PR) { 268 if (vcpu->arch.shared->msr & MSR_PR) {
269 /* Program traps generated by user-level software must be handled 269 /* Program traps generated by user-level software must be handled
270 * by the guest kernel. */ 270 * by the guest kernel. */
271 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); 271 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
@@ -467,7 +467,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
467int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 467int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
468{ 468{
469 vcpu->arch.pc = 0; 469 vcpu->arch.pc = 0;
470 vcpu->arch.msr = 0; 470 vcpu->arch.shared->msr = 0;
471 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 471 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
472 472
473 vcpu->arch.shadow_pid = 1; 473 vcpu->arch.shadow_pid = 1;
@@ -490,7 +490,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
490 regs->ctr = vcpu->arch.ctr; 490 regs->ctr = vcpu->arch.ctr;
491 regs->lr = vcpu->arch.lr; 491 regs->lr = vcpu->arch.lr;
492 regs->xer = kvmppc_get_xer(vcpu); 492 regs->xer = kvmppc_get_xer(vcpu);
493 regs->msr = vcpu->arch.msr; 493 regs->msr = vcpu->arch.shared->msr;
494 regs->srr0 = vcpu->arch.srr0; 494 regs->srr0 = vcpu->arch.srr0;
495 regs->srr1 = vcpu->arch.srr1; 495 regs->srr1 = vcpu->arch.srr1;
496 regs->pid = vcpu->arch.pid; 496 regs->pid = vcpu->arch.pid;