aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorMihai Caraman <mihai.caraman@freescale.com>2014-08-20 09:36:22 -0400
committerAlexander Graf <agraf@suse.de>2014-09-22 04:11:32 -0400
commit3efc7da61f6c5af78f67f03df8b0e1a473d8bc45 (patch)
tree834b48106b82c7e5ed6e3eacfc106bd4856a6200 /arch/powerpc/kvm
parentbc8a4e5c2504eeca248f0b668fe94a80081cb9b6 (diff)
KVM: PPC: Book3E: Increase FPU laziness
Increase FPU laziness by loading the guest state into the unit before entering the guest instead of doing it on each vcpu schedule. Without this improvement an interrupt may claim floating point corrupting guest state. Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/booke.c43
-rw-r--r--arch/powerpc/kvm/booke.h34
-rw-r--r--arch/powerpc/kvm/e500mc.c2
3 files changed, 36 insertions, 43 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 074b7fc795b7..91e7217db9d9 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -124,6 +124,40 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
124} 124}
125#endif 125#endif
126 126
127/*
128 * Load up guest vcpu FP state if it's needed.
129 * It also set the MSR_FP in thread so that host know
130 * we're holding FPU, and then host can help to save
131 * guest vcpu FP state if other threads require to use FPU.
132 * This simulates an FP unavailable fault.
133 *
134 * It requires to be called with preemption disabled.
135 */
136static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
137{
138#ifdef CONFIG_PPC_FPU
139 if (!(current->thread.regs->msr & MSR_FP)) {
140 enable_kernel_fp();
141 load_fp_state(&vcpu->arch.fp);
142 current->thread.fp_save_area = &vcpu->arch.fp;
143 current->thread.regs->msr |= MSR_FP;
144 }
145#endif
146}
147
148/*
149 * Save guest vcpu FP state into thread.
150 * It requires to be called with preemption disabled.
151 */
152static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
153{
154#ifdef CONFIG_PPC_FPU
155 if (current->thread.regs->msr & MSR_FP)
156 giveup_fpu(current);
157 current->thread.fp_save_area = NULL;
158#endif
159}
160
127static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu) 161static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
128{ 162{
129#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV) 163#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
@@ -658,12 +692,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
658 692
659 /* 693 /*
660 * Since we can't trap on MSR_FP in GS-mode, we consider the guest 694 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
661 * as always using the FPU. Kernel usage of FP (via 695 * as always using the FPU.
662 * enable_kernel_fp()) in this thread must not occur while
663 * vcpu->fpu_active is set.
664 */ 696 */
665 vcpu->fpu_active = 1;
666
667 kvmppc_load_guest_fp(vcpu); 697 kvmppc_load_guest_fp(vcpu);
668#endif 698#endif
669 699
@@ -687,8 +717,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
687 717
688#ifdef CONFIG_PPC_FPU 718#ifdef CONFIG_PPC_FPU
689 kvmppc_save_guest_fp(vcpu); 719 kvmppc_save_guest_fp(vcpu);
690
691 vcpu->fpu_active = 0;
692#endif 720#endif
693 721
694out: 722out:
@@ -1194,6 +1222,7 @@ out:
1194 else { 1222 else {
1195 /* interrupts now hard-disabled */ 1223 /* interrupts now hard-disabled */
1196 kvmppc_fix_ee_before_entry(); 1224 kvmppc_fix_ee_before_entry();
1225 kvmppc_load_guest_fp(vcpu);
1197 } 1226 }
1198 } 1227 }
1199 1228
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index f753543c56fa..e73d513f72d0 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -116,40 +116,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
116extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, 116extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
117 ulong *spr_val); 117 ulong *spr_val);
118 118
119/*
120 * Load up guest vcpu FP state if it's needed.
121 * It also set the MSR_FP in thread so that host know
122 * we're holding FPU, and then host can help to save
123 * guest vcpu FP state if other threads require to use FPU.
124 * This simulates an FP unavailable fault.
125 *
126 * It requires to be called with preemption disabled.
127 */
128static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
129{
130#ifdef CONFIG_PPC_FPU
131 if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
132 enable_kernel_fp();
133 load_fp_state(&vcpu->arch.fp);
134 current->thread.fp_save_area = &vcpu->arch.fp;
135 current->thread.regs->msr |= MSR_FP;
136 }
137#endif
138}
139
140/*
141 * Save guest vcpu FP state into thread.
142 * It requires to be called with preemption disabled.
143 */
144static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
145{
146#ifdef CONFIG_PPC_FPU
147 if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
148 giveup_fpu(current);
149 current->thread.fp_save_area = NULL;
150#endif
151}
152
153static inline void kvmppc_clear_dbsr(void) 119static inline void kvmppc_clear_dbsr(void)
154{ 120{
155 mtspr(SPRN_DBSR, mfspr(SPRN_DBSR)); 121 mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 000cf8242e7d..454934990672 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -145,8 +145,6 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
145 kvmppc_e500_tlbil_all(vcpu_e500); 145 kvmppc_e500_tlbil_all(vcpu_e500);
146 __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu; 146 __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu;
147 } 147 }
148
149 kvmppc_load_guest_fp(vcpu);
150} 148}
151 149
152static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu) 150static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)