aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2011-06-14 19:34:31 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:32 -0400
commit4cd35f675ba41a99a477e28a6add4a66833325f2 (patch)
treed4b26dadccbad63b63ce90b358efa5903c4e075e /arch/powerpc
parentecee273fc48f7f48f0c2f074335c43aaa790c308 (diff)
KVM: PPC: e500: Save/restore SPE state
This is done lazily. The SPE save will be done only if the guest has used SPE since the last preemption or heavyweight exit. Restore will be done only on demand, when enabling MSR_SPE in the shadow MSR, in response to an SPE fault or mtmsr emulation. For SPEFSCR, Linux already switches it on context switch (non-lazily), so the only remaining bit is to save it between qemu and the guest. Signed-off-by: Liu Yu <yu.liu@freescale.com> Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h6
-rw-r--r--arch/powerpc/include/asm/reg_booke.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c7
-rw-r--r--arch/powerpc/kvm/booke.c84
-rw-r--r--arch/powerpc/kvm/booke.h22
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S38
-rw-r--r--arch/powerpc/kvm/e500.c7
7 files changed, 148 insertions, 17 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 12cb1807e8d7..c4ce1054b866 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -195,6 +195,12 @@ struct kvm_vcpu_arch {
195 u64 fpr[32]; 195 u64 fpr[32];
196 u64 fpscr; 196 u64 fpscr;
197 197
198#ifdef CONFIG_SPE
199 ulong evr[32];
200 ulong spefscr;
201 ulong host_spefscr;
202 u64 acc;
203#endif
198#ifdef CONFIG_ALTIVEC 204#ifdef CONFIG_ALTIVEC
199 vector128 vr[32]; 205 vector128 vr[32];
200 vector128 vscr; 206 vector128 vscr;
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 0f0ad9fa01c1..9ec0b39f9ddc 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -318,6 +318,7 @@
318#define ESR_ILK 0x00100000 /* Instr. Cache Locking */ 318#define ESR_ILK 0x00100000 /* Instr. Cache Locking */
319#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */ 319#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */
320#define ESR_BO 0x00020000 /* Byte Ordering */ 320#define ESR_BO 0x00020000 /* Byte Ordering */
321#define ESR_SPV 0x00000080 /* Signal Processing operation */
321 322
322/* Bit definitions related to the DBCR0. */ 323/* Bit definitions related to the DBCR0. */
323#if defined(CONFIG_40x) 324#if defined(CONFIG_40x)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 25de8e4808a4..ecd2b3ad7ff6 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -497,6 +497,13 @@ int main(void)
497 DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); 497 DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
498#endif 498#endif
499 499
500#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
501 DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
502 DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
503 DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
504 DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
505#endif
506
500#ifdef CONFIG_KVM_EXIT_TIMING 507#ifdef CONFIG_KVM_EXIT_TIMING
501 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, 508 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
502 arch.timing_exit.tv32.tbu)); 509 arch.timing_exit.tv32.tbu));
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 05cedb5f8210..0ecbecb2f7cc 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -13,6 +13,7 @@
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 * 14 *
15 * Copyright IBM Corp. 2007 15 * Copyright IBM Corp. 2007
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
16 * 17 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
@@ -78,6 +79,57 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
78 } 79 }
79} 80}
80 81
82#ifdef CONFIG_SPE
83void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
84{
85 preempt_disable();
86 enable_kernel_spe();
87 kvmppc_save_guest_spe(vcpu);
88 vcpu->arch.shadow_msr &= ~MSR_SPE;
89 preempt_enable();
90}
91
92static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
93{
94 preempt_disable();
95 enable_kernel_spe();
96 kvmppc_load_guest_spe(vcpu);
97 vcpu->arch.shadow_msr |= MSR_SPE;
98 preempt_enable();
99}
100
101static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
102{
103 if (vcpu->arch.shared->msr & MSR_SPE) {
104 if (!(vcpu->arch.shadow_msr & MSR_SPE))
105 kvmppc_vcpu_enable_spe(vcpu);
106 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
107 kvmppc_vcpu_disable_spe(vcpu);
108 }
109}
110#else
111static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
112{
113}
114#endif
115
116/* Helper function for "full" MSR writes. No need to call this if only EE is
117 * changing. */
118void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
119{
120 if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
121 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
122
123 vcpu->arch.shared->msr = new_msr;
124
125 if (vcpu->arch.shared->msr & MSR_WE) {
126 kvm_vcpu_block(vcpu);
127 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
128 };
129
130 kvmppc_vcpu_sync_spe(vcpu);
131}
132
81static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, 133static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
82 unsigned int priority) 134 unsigned int priority)
83{ 135{
@@ -344,10 +396,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
344 r = RESUME_GUEST; 396 r = RESUME_GUEST;
345 break; 397 break;
346 398
347 case BOOKE_INTERRUPT_SPE_UNAVAIL: 399#ifdef CONFIG_SPE
348 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL); 400 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
401 if (vcpu->arch.shared->msr & MSR_SPE)
402 kvmppc_vcpu_enable_spe(vcpu);
403 else
404 kvmppc_booke_queue_irqprio(vcpu,
405 BOOKE_IRQPRIO_SPE_UNAVAIL);
349 r = RESUME_GUEST; 406 r = RESUME_GUEST;
350 break; 407 break;
408 }
351 409
352 case BOOKE_INTERRUPT_SPE_FP_DATA: 410 case BOOKE_INTERRUPT_SPE_FP_DATA:
353 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); 411 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
@@ -358,6 +416,28 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
358 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); 416 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
359 r = RESUME_GUEST; 417 r = RESUME_GUEST;
360 break; 418 break;
419#else
420 case BOOKE_INTERRUPT_SPE_UNAVAIL:
421 /*
422 * Guest wants SPE, but host kernel doesn't support it. Send
423 * an "unimplemented operation" program check to the guest.
424 */
425 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
426 r = RESUME_GUEST;
427 break;
428
429 /*
430 * These really should never happen without CONFIG_SPE,
431 * as we should never enable the real MSR[SPE] in the guest.
432 */
433 case BOOKE_INTERRUPT_SPE_FP_DATA:
434 case BOOKE_INTERRUPT_SPE_FP_ROUND:
435 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
436 __func__, exit_nr, vcpu->arch.pc);
437 run->hw.hardware_exit_reason = exit_nr;
438 r = RESUME_HOST;
439 break;
440#endif
361 441
362 case BOOKE_INTERRUPT_DATA_STORAGE: 442 case BOOKE_INTERRUPT_DATA_STORAGE:
363 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, 443 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 492bb7030358..0fa1732ddcb7 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -52,24 +52,18 @@
52 52
53extern unsigned long kvmppc_booke_handlers; 53extern unsigned long kvmppc_booke_handlers;
54 54
55/* Helper function for "full" MSR writes. No need to call this if only EE is 55void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
56 * changing. */
57static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
58{
59 if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
60 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
61
62 vcpu->arch.shared->msr = new_msr;
63
64 if (vcpu->arch.shared->msr & MSR_WE) {
65 kvm_vcpu_block(vcpu);
66 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
67 };
68}
69 56
70int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 57int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
71 unsigned int inst, int *advance); 58 unsigned int inst, int *advance);
72int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); 59int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
73int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); 60int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
74 61
62/* low-level asm code to transfer guest state */
63void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
64void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
65
66/* high-level function, manages flags, host state */
67void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
68
75#endif /* __KVM_BOOKE_H__ */ 69#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 55410cc45ad7..8cb3dfe29f75 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -13,6 +13,7 @@
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 * 14 *
15 * Copyright IBM Corp. 2007 15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
16 * 17 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */ 19 */
@@ -239,6 +240,14 @@ _GLOBAL(kvmppc_resume_host)
239heavyweight_exit: 240heavyweight_exit:
240 /* Not returning to guest. */ 241 /* Not returning to guest. */
241 242
243#ifdef CONFIG_SPE
244 /* save guest SPEFSCR and load host SPEFSCR */
245 mfspr r9, SPRN_SPEFSCR
246 stw r9, VCPU_SPEFSCR(r4)
247 lwz r9, VCPU_HOST_SPEFSCR(r4)
248 mtspr SPRN_SPEFSCR, r9
249#endif
250
242 /* We already saved guest volatile register state; now save the 251 /* We already saved guest volatile register state; now save the
243 * non-volatiles. */ 252 * non-volatiles. */
244 stw r15, VCPU_GPR(r15)(r4) 253 stw r15, VCPU_GPR(r15)(r4)
@@ -340,6 +349,14 @@ _GLOBAL(__kvmppc_vcpu_run)
340 lwz r30, VCPU_GPR(r30)(r4) 349 lwz r30, VCPU_GPR(r30)(r4)
341 lwz r31, VCPU_GPR(r31)(r4) 350 lwz r31, VCPU_GPR(r31)(r4)
342 351
352#ifdef CONFIG_SPE
353 /* save host SPEFSCR and load guest SPEFSCR */
354 mfspr r3, SPRN_SPEFSCR
355 stw r3, VCPU_HOST_SPEFSCR(r4)
356 lwz r3, VCPU_SPEFSCR(r4)
357 mtspr SPRN_SPEFSCR, r3
358#endif
359
343lightweight_exit: 360lightweight_exit:
344 stw r2, HOST_R2(r1) 361 stw r2, HOST_R2(r1)
345 362
@@ -425,3 +442,24 @@ lightweight_exit:
425 lwz r3, VCPU_GPR(r3)(r4) 442 lwz r3, VCPU_GPR(r3)(r4)
426 lwz r4, VCPU_GPR(r4)(r4) 443 lwz r4, VCPU_GPR(r4)(r4)
427 rfi 444 rfi
445
446#ifdef CONFIG_SPE
447_GLOBAL(kvmppc_save_guest_spe)
448 cmpi 0,r3,0
449 beqlr-
450 SAVE_32EVRS(0, r4, r3, VCPU_EVR)
451 evxor evr6, evr6, evr6
452 evmwumiaa evr6, evr6, evr6
453 li r4,VCPU_ACC
454 evstddx evr6, r4, r3 /* save acc */
455 blr
456
457_GLOBAL(kvmppc_load_guest_spe)
458 cmpi 0,r3,0
459 beqlr-
460 li r4,VCPU_ACC
461 evlddx evr6,r4,r3
462 evmra evr6,evr6 /* load acc */
463 REST_32EVRS(0, r4, r3, VCPU_EVR)
464 blr
465#endif
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 318dbc61ba44..797a7447c268 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. 2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 * 3 *
4 * Author: Yu Liu, <yu.liu@freescale.com> 4 * Author: Yu Liu, <yu.liu@freescale.com>
5 * 5 *
@@ -41,6 +41,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
41void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 41void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
42{ 42{
43 kvmppc_e500_tlb_put(vcpu); 43 kvmppc_e500_tlb_put(vcpu);
44
45#ifdef CONFIG_SPE
46 if (vcpu->arch.shadow_msr & MSR_SPE)
47 kvmppc_vcpu_disable_spe(vcpu);
48#endif
44} 49}
45 50
46int kvmppc_core_check_processor_compat(void) 51int kvmppc_core_check_processor_compat(void)