aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2011-06-14 19:34:29 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:32 -0400
commitecee273fc48f7f48f0c2f074335c43aaa790c308 (patch)
treeba12981dbad927816a9cc51042aa2febd85fc74e /arch/powerpc/kvm
parentc51584d52e3878aa9b2bb98cdfb87173e7acf560 (diff)
KVM: PPC: booke: use shadow_msr
Keep the guest MSR and the guest-mode true MSR separate, rather than modifying the guest MSR on each guest entry to produce a true MSR. Any bits which should be modified based on guest MSR must be explicitly propagated from vcpu->arch.shared->msr to vcpu->arch.shadow_msr in kvmppc_set_msr(). While we're modifying the guest entry code, reorder a few instructions to bury some load latencies. Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/booke.c1
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S17
2 files changed, 7 insertions, 11 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 8462b3a1c1c7..05cedb5f8210 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -514,6 +514,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
514 514
515 vcpu->arch.pc = 0; 515 vcpu->arch.pc = 0;
516 vcpu->arch.shared->msr = 0; 516 vcpu->arch.shared->msr = 0;
517 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
517 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 518 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
518 519
519 vcpu->arch.shadow_pid = 1; 520 vcpu->arch.shadow_pid = 1;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index b58ccae95904..55410cc45ad7 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -24,8 +24,6 @@
24#include <asm/page.h> 24#include <asm/page.h>
25#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
26 26
27#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
28
29#define VCPU_GPR(n) (VCPU_GPRS + (n * 4)) 27#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
30 28
31/* The host stack layout: */ 29/* The host stack layout: */
@@ -405,20 +403,17 @@ lightweight_exit:
405 403
406 /* Finish loading guest volatiles and jump to guest. */ 404 /* Finish loading guest volatiles and jump to guest. */
407 lwz r3, VCPU_CTR(r4) 405 lwz r3, VCPU_CTR(r4)
406 lwz r5, VCPU_CR(r4)
407 lwz r6, VCPU_PC(r4)
408 lwz r7, VCPU_SHADOW_MSR(r4)
408 mtctr r3 409 mtctr r3
409 lwz r3, VCPU_CR(r4) 410 mtcr r5
410 mtcr r3 411 mtsrr0 r6
412 mtsrr1 r7
411 lwz r5, VCPU_GPR(r5)(r4) 413 lwz r5, VCPU_GPR(r5)(r4)
412 lwz r6, VCPU_GPR(r6)(r4) 414 lwz r6, VCPU_GPR(r6)(r4)
413 lwz r7, VCPU_GPR(r7)(r4) 415 lwz r7, VCPU_GPR(r7)(r4)
414 lwz r8, VCPU_GPR(r8)(r4) 416 lwz r8, VCPU_GPR(r8)(r4)
415 lwz r3, VCPU_PC(r4)
416 mtsrr0 r3
417 lwz r3, VCPU_SHARED(r4)
418 lwz r3, (VCPU_SHARED_MSR + 4)(r3)
419 oris r3, r3, KVMPPC_MSR_MASK@h
420 ori r3, r3, KVMPPC_MSR_MASK@l
421 mtsrr1 r3
422 417
423 /* Clear any debug events which occurred since we disabled MSR[DE]. 418 /* Clear any debug events which occurred since we disabled MSR[DE].
424 * XXX This gives us a 3-instruction window in which a breakpoint 419 * XXX This gives us a 3-instruction window in which a breakpoint