diff options
author | Scott Wood <scottwood@freescale.com> | 2011-12-20 10:34:45 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-04-08 05:54:15 -0400 |
commit | 8fae845f4956de0becc115e926d33eff46722e94 (patch) | |
tree | 22c64321beca71db6d671e7479362b0e1caddc56 /arch | |
parent | d30f6e480055e5be12e7a03fd11ea912a451daa5 (diff) |
KVM: PPC: booke: standard PPC floating point support
e500mc has a normal PPC FPU, rather than SPE which is found
on e500v1/v2.
Based on code from Liu Yu <yu.liu@freescale.com>.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/switch_to.h | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 44 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.h | 31 |
3 files changed, 76 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index caf82d0a00de..1622c356ba90 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
@@ -17,6 +17,7 @@ extern struct task_struct *_switch(struct thread_struct *prev, | |||
17 | struct thread_struct *next); | 17 | struct thread_struct *next); |
18 | 18 | ||
19 | extern void giveup_fpu(struct task_struct *); | 19 | extern void giveup_fpu(struct task_struct *); |
20 | extern void load_up_fpu(void); | ||
20 | extern void disable_kernel_fp(void); | 21 | extern void disable_kernel_fp(void); |
21 | extern void enable_kernel_fp(void); | 22 | extern void enable_kernel_fp(void); |
22 | extern void flush_fp_to_thread(struct task_struct *); | 23 | extern void flush_fp_to_thread(struct task_struct *); |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 75dbaeb2efa3..0b77be187cf7 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -457,6 +457,11 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
457 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 457 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
458 | { | 458 | { |
459 | int ret; | 459 | int ret; |
460 | #ifdef CONFIG_PPC_FPU | ||
461 | unsigned int fpscr; | ||
462 | int fpexc_mode; | ||
463 | u64 fpr[32]; | ||
464 | #endif | ||
460 | 465 | ||
461 | if (!vcpu->arch.sane) { | 466 | if (!vcpu->arch.sane) { |
462 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 467 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
@@ -479,7 +484,46 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
479 | } | 484 | } |
480 | 485 | ||
481 | kvm_guest_enter(); | 486 | kvm_guest_enter(); |
487 | |||
488 | #ifdef CONFIG_PPC_FPU | ||
489 | /* Save userspace FPU state in stack */ | ||
490 | enable_kernel_fp(); | ||
491 | memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); | ||
492 | fpscr = current->thread.fpscr.val; | ||
493 | fpexc_mode = current->thread.fpexc_mode; | ||
494 | |||
495 | /* Restore guest FPU state to thread */ | ||
496 | memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr)); | ||
497 | current->thread.fpscr.val = vcpu->arch.fpscr; | ||
498 | |||
499 | /* | ||
500 | * Since we can't trap on MSR_FP in GS-mode, we consider the guest | ||
501 | * as always using the FPU. Kernel usage of FP (via | ||
502 | * enable_kernel_fp()) in this thread must not occur while | ||
503 | * vcpu->fpu_active is set. | ||
504 | */ | ||
505 | vcpu->fpu_active = 1; | ||
506 | |||
507 | kvmppc_load_guest_fp(vcpu); | ||
508 | #endif | ||
509 | |||
482 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | 510 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
511 | |||
512 | #ifdef CONFIG_PPC_FPU | ||
513 | kvmppc_save_guest_fp(vcpu); | ||
514 | |||
515 | vcpu->fpu_active = 0; | ||
516 | |||
517 | /* Save guest FPU state from thread */ | ||
518 | memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr)); | ||
519 | vcpu->arch.fpscr = current->thread.fpscr.val; | ||
520 | |||
521 | /* Restore userspace FPU state from stack */ | ||
522 | memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); | ||
523 | current->thread.fpscr.val = fpscr; | ||
524 | current->thread.fpexc_mode = fpexc_mode; | ||
525 | #endif | ||
526 | |||
483 | kvm_guest_exit(); | 527 | kvm_guest_exit(); |
484 | 528 | ||
485 | out: | 529 | out: |
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index d53bcf2558f5..62c4fe55d19b 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <asm/kvm_ppc.h> | 25 | #include <asm/kvm_ppc.h> |
26 | #include <asm/switch_to.h> | ||
26 | #include "timing.h" | 27 | #include "timing.h" |
27 | 28 | ||
28 | /* interrupt priortity ordering */ | 29 | /* interrupt priortity ordering */ |
@@ -96,4 +97,34 @@ enum int_class { | |||
96 | 97 | ||
97 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); | 98 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); |
98 | 99 | ||
100 | /* | ||
101 | * Load up guest vcpu FP state if it's needed. | ||
102 | * It also set the MSR_FP in thread so that host know | ||
103 | * we're holding FPU, and then host can help to save | ||
104 | * guest vcpu FP state if other threads require to use FPU. | ||
105 | * This simulates an FP unavailable fault. | ||
106 | * | ||
107 | * It requires to be called with preemption disabled. | ||
108 | */ | ||
109 | static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) | ||
110 | { | ||
111 | #ifdef CONFIG_PPC_FPU | ||
112 | if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) { | ||
113 | load_up_fpu(); | ||
114 | current->thread.regs->msr |= MSR_FP; | ||
115 | } | ||
116 | #endif | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Save guest vcpu FP state into thread. | ||
121 | * It requires to be called with preemption disabled. | ||
122 | */ | ||
123 | static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu) | ||
124 | { | ||
125 | #ifdef CONFIG_PPC_FPU | ||
126 | if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP)) | ||
127 | giveup_fpu(current); | ||
128 | #endif | ||
129 | } | ||
99 | #endif /* __KVM_BOOKE_H__ */ | 130 | #endif /* __KVM_BOOKE_H__ */ |