aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2011-06-14 19:34:31 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:32 -0400
commit4cd35f675ba41a99a477e28a6add4a66833325f2 (patch)
treed4b26dadccbad63b63ce90b358efa5903c4e075e /arch/powerpc/kvm
parentecee273fc48f7f48f0c2f074335c43aaa790c308 (diff)
KVM: PPC: e500: Save/restore SPE state
This is done lazily. The SPE save will be done only if the guest has used SPE since the last preemption or heavyweight exit. Restore will be done only on demand, when enabling MSR_SPE in the shadow MSR, in response to an SPE fault or mtmsr emulation. For SPEFSCR, Linux already switches it on context switch (non-lazily), so the only remaining bit is to save it between qemu and the guest. Signed-off-by: Liu Yu <yu.liu@freescale.com> Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/booke.c84
-rw-r--r--arch/powerpc/kvm/booke.h22
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S38
-rw-r--r--arch/powerpc/kvm/e500.c7
4 files changed, 134 insertions, 17 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 05cedb5f8210..0ecbecb2f7cc 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -13,6 +13,7 @@
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 * 14 *
15 * Copyright IBM Corp. 2007 15 * Copyright IBM Corp. 2007
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
16 * 17 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
@@ -78,6 +79,57 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
78 } 79 }
79} 80}
80 81
82#ifdef CONFIG_SPE
83void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
84{
85 preempt_disable();
86 enable_kernel_spe();
87 kvmppc_save_guest_spe(vcpu);
88 vcpu->arch.shadow_msr &= ~MSR_SPE;
89 preempt_enable();
90}
91
92static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
93{
94 preempt_disable();
95 enable_kernel_spe();
96 kvmppc_load_guest_spe(vcpu);
97 vcpu->arch.shadow_msr |= MSR_SPE;
98 preempt_enable();
99}
100
101static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
102{
103 if (vcpu->arch.shared->msr & MSR_SPE) {
104 if (!(vcpu->arch.shadow_msr & MSR_SPE))
105 kvmppc_vcpu_enable_spe(vcpu);
106 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
107 kvmppc_vcpu_disable_spe(vcpu);
108 }
109}
110#else
111static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
112{
113}
114#endif
115
116/* Helper function for "full" MSR writes. No need to call this if only EE is
117 * changing. */
118void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
119{
120 if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
121 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
122
123 vcpu->arch.shared->msr = new_msr;
124
125 if (vcpu->arch.shared->msr & MSR_WE) {
126 kvm_vcpu_block(vcpu);
127 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
128 };
129
130 kvmppc_vcpu_sync_spe(vcpu);
131}
132
81static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, 133static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
82 unsigned int priority) 134 unsigned int priority)
83{ 135{
@@ -344,10 +396,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
344 r = RESUME_GUEST; 396 r = RESUME_GUEST;
345 break; 397 break;
346 398
347 case BOOKE_INTERRUPT_SPE_UNAVAIL: 399#ifdef CONFIG_SPE
348 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL); 400 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
401 if (vcpu->arch.shared->msr & MSR_SPE)
402 kvmppc_vcpu_enable_spe(vcpu);
403 else
404 kvmppc_booke_queue_irqprio(vcpu,
405 BOOKE_IRQPRIO_SPE_UNAVAIL);
349 r = RESUME_GUEST; 406 r = RESUME_GUEST;
350 break; 407 break;
408 }
351 409
352 case BOOKE_INTERRUPT_SPE_FP_DATA: 410 case BOOKE_INTERRUPT_SPE_FP_DATA:
353 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); 411 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
@@ -358,6 +416,28 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
358 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); 416 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
359 r = RESUME_GUEST; 417 r = RESUME_GUEST;
360 break; 418 break;
419#else
420 case BOOKE_INTERRUPT_SPE_UNAVAIL:
421 /*
422 * Guest wants SPE, but host kernel doesn't support it. Send
423 * an "unimplemented operation" program check to the guest.
424 */
425 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
426 r = RESUME_GUEST;
427 break;
428
429 /*
430 * These really should never happen without CONFIG_SPE,
431 * as we should never enable the real MSR[SPE] in the guest.
432 */
433 case BOOKE_INTERRUPT_SPE_FP_DATA:
434 case BOOKE_INTERRUPT_SPE_FP_ROUND:
435 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
436 __func__, exit_nr, vcpu->arch.pc);
437 run->hw.hardware_exit_reason = exit_nr;
438 r = RESUME_HOST;
439 break;
440#endif
361 441
362 case BOOKE_INTERRUPT_DATA_STORAGE: 442 case BOOKE_INTERRUPT_DATA_STORAGE:
363 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, 443 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 492bb7030358..0fa1732ddcb7 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -52,24 +52,18 @@
52 52
53extern unsigned long kvmppc_booke_handlers; 53extern unsigned long kvmppc_booke_handlers;
54 54
55/* Helper function for "full" MSR writes. No need to call this if only EE is 55void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
56 * changing. */
57static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
58{
59 if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
60 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
61
62 vcpu->arch.shared->msr = new_msr;
63
64 if (vcpu->arch.shared->msr & MSR_WE) {
65 kvm_vcpu_block(vcpu);
66 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
67 };
68}
69 56
70int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 57int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
71 unsigned int inst, int *advance); 58 unsigned int inst, int *advance);
72int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); 59int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
73int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); 60int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
74 61
62/* low-level asm code to transfer guest state */
63void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
64void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
65
66/* high-level function, manages flags, host state */
67void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
68
75#endif /* __KVM_BOOKE_H__ */ 69#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 55410cc45ad7..8cb3dfe29f75 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -13,6 +13,7 @@
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 * 14 *
15 * Copyright IBM Corp. 2007 15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
16 * 17 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */ 19 */
@@ -239,6 +240,14 @@ _GLOBAL(kvmppc_resume_host)
239heavyweight_exit: 240heavyweight_exit:
240 /* Not returning to guest. */ 241 /* Not returning to guest. */
241 242
243#ifdef CONFIG_SPE
244 /* save guest SPEFSCR and load host SPEFSCR */
245 mfspr r9, SPRN_SPEFSCR
246 stw r9, VCPU_SPEFSCR(r4)
247 lwz r9, VCPU_HOST_SPEFSCR(r4)
248 mtspr SPRN_SPEFSCR, r9
249#endif
250
242 /* We already saved guest volatile register state; now save the 251 /* We already saved guest volatile register state; now save the
243 * non-volatiles. */ 252 * non-volatiles. */
244 stw r15, VCPU_GPR(r15)(r4) 253 stw r15, VCPU_GPR(r15)(r4)
@@ -340,6 +349,14 @@ _GLOBAL(__kvmppc_vcpu_run)
340 lwz r30, VCPU_GPR(r30)(r4) 349 lwz r30, VCPU_GPR(r30)(r4)
341 lwz r31, VCPU_GPR(r31)(r4) 350 lwz r31, VCPU_GPR(r31)(r4)
342 351
352#ifdef CONFIG_SPE
353 /* save host SPEFSCR and load guest SPEFSCR */
354 mfspr r3, SPRN_SPEFSCR
355 stw r3, VCPU_HOST_SPEFSCR(r4)
356 lwz r3, VCPU_SPEFSCR(r4)
357 mtspr SPRN_SPEFSCR, r3
358#endif
359
343lightweight_exit: 360lightweight_exit:
344 stw r2, HOST_R2(r1) 361 stw r2, HOST_R2(r1)
345 362
@@ -425,3 +442,24 @@ lightweight_exit:
425 lwz r3, VCPU_GPR(r3)(r4) 442 lwz r3, VCPU_GPR(r3)(r4)
426 lwz r4, VCPU_GPR(r4)(r4) 443 lwz r4, VCPU_GPR(r4)(r4)
427 rfi 444 rfi
445
446#ifdef CONFIG_SPE
447_GLOBAL(kvmppc_save_guest_spe)
448 cmpi 0,r3,0
449 beqlr-
450 SAVE_32EVRS(0, r4, r3, VCPU_EVR)
451 evxor evr6, evr6, evr6
452 evmwumiaa evr6, evr6, evr6
453 li r4,VCPU_ACC
454 evstddx evr6, r4, r3 /* save acc */
455 blr
456
457_GLOBAL(kvmppc_load_guest_spe)
458 cmpi 0,r3,0
459 beqlr-
460 li r4,VCPU_ACC
461 evlddx evr6,r4,r3
462 evmra evr6,evr6 /* load acc */
463 REST_32EVRS(0, r4, r3, VCPU_EVR)
464 blr
465#endif
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 318dbc61ba44..797a7447c268 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. 2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 * 3 *
4 * Author: Yu Liu, <yu.liu@freescale.com> 4 * Author: Yu Liu, <yu.liu@freescale.com>
5 * 5 *
@@ -41,6 +41,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
41void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 41void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
42{ 42{
43 kvmppc_e500_tlb_put(vcpu); 43 kvmppc_e500_tlb_put(vcpu);
44
45#ifdef CONFIG_SPE
46 if (vcpu->arch.shadow_msr & MSR_SPE)
47 kvmppc_vcpu_disable_spe(vcpu);
48#endif
44} 49}
45 50
46int kvmppc_core_check_processor_compat(void) 51int kvmppc_core_check_processor_compat(void)