diff options
author | Paul Mackerras <paulus@samba.org> | 2013-10-15 05:43:01 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-01-09 04:14:59 -0500 |
commit | 09548fdaf32ce77a68e7f9a8a3098c1306b04858 (patch) | |
tree | 4f57c5c932068c4aae1ff0aba76dbde7614a73b3 /arch/powerpc/kvm | |
parent | b1f0d94c26b64e814243b736f47e7ef40d96432c (diff) |
KVM: PPC: Use load_fp/vr_state rather than load_up_fpu/altivec
The load_up_fpu and load_up_altivec functions were never intended to
be called from C, and do things like modifying the MSR value in their
callers' stack frames, which are assumed to be interrupt frames. In
addition, on 32-bit Book S they require the MMU to be off.
This makes KVM use the new load_fp_state() and load_vr_state() functions
instead of load_up_fpu/altivec. This means we can remove the assembler
glue in book3s_rmhandlers.S, and potentially fixes a bug on Book E,
where load_up_fpu was called directly from C.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/book3s_exports.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 18 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_rmhandlers.S | 47 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.h | 3 |
4 files changed, 14 insertions, 58 deletions
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c index 852989a9bad3..20d4ea8e656d 100644 --- a/arch/powerpc/kvm/book3s_exports.c +++ b/arch/powerpc/kvm/book3s_exports.c | |||
@@ -25,9 +25,5 @@ EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); | |||
25 | #endif | 25 | #endif |
26 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | 26 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
27 | EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); | 27 | EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); |
28 | EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); | ||
29 | #ifdef CONFIG_ALTIVEC | ||
30 | EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec); | ||
31 | #endif | ||
32 | #endif | 28 | #endif |
33 | 29 | ||
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 21bf7c5c9545..d63a91f825d3 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -691,7 +691,8 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
691 | #endif | 691 | #endif |
692 | t->fp_state.fpscr = vcpu->arch.fpscr; | 692 | t->fp_state.fpscr = vcpu->arch.fpscr; |
693 | t->fpexc_mode = 0; | 693 | t->fpexc_mode = 0; |
694 | kvmppc_load_up_fpu(); | 694 | enable_kernel_fp(); |
695 | load_fp_state(&t->fp_state); | ||
695 | } | 696 | } |
696 | 697 | ||
697 | if (msr & MSR_VEC) { | 698 | if (msr & MSR_VEC) { |
@@ -699,7 +700,8 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
699 | memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); | 700 | memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); |
700 | t->vr_state.vscr = vcpu->arch.vscr; | 701 | t->vr_state.vscr = vcpu->arch.vscr; |
701 | t->vrsave = -1; | 702 | t->vrsave = -1; |
702 | kvmppc_load_up_altivec(); | 703 | enable_kernel_altivec(); |
704 | load_vr_state(&t->vr_state); | ||
703 | #endif | 705 | #endif |
704 | } | 706 | } |
705 | 707 | ||
@@ -722,11 +724,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |||
722 | if (!lost_ext) | 724 | if (!lost_ext) |
723 | return; | 725 | return; |
724 | 726 | ||
725 | if (lost_ext & MSR_FP) | 727 | if (lost_ext & MSR_FP) { |
726 | kvmppc_load_up_fpu(); | 728 | enable_kernel_fp(); |
729 | load_fp_state(¤t->thread.fp_state); | ||
730 | } | ||
727 | #ifdef CONFIG_ALTIVEC | 731 | #ifdef CONFIG_ALTIVEC |
728 | if (lost_ext & MSR_VEC) | 732 | if (lost_ext & MSR_VEC) { |
729 | kvmppc_load_up_altivec(); | 733 | enable_kernel_altivec(); |
734 | load_vr_state(¤t->thread.vr_state); | ||
735 | } | ||
730 | #endif | 736 | #endif |
731 | current->thread.regs->msr |= lost_ext; | 737 | current->thread.regs->msr |= lost_ext; |
732 | } | 738 | } |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index a38c4c9edab8..c78ffbc371a5 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -166,51 +166,4 @@ _GLOBAL(kvmppc_entry_trampoline) | |||
166 | mtsrr1 r6 | 166 | mtsrr1 r6 |
167 | RFI | 167 | RFI |
168 | 168 | ||
169 | #if defined(CONFIG_PPC_BOOK3S_32) | ||
170 | #define STACK_LR INT_FRAME_SIZE+4 | ||
171 | |||
172 | /* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */ | ||
173 | #define MSR_EXT_START \ | ||
174 | PPC_STL r20, _NIP(r1); \ | ||
175 | mfmsr r20; \ | ||
176 | LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \ | ||
177 | andc r3,r20,r3; /* Disable DR,EE */ \ | ||
178 | mtmsr r3; \ | ||
179 | sync | ||
180 | |||
181 | #define MSR_EXT_END \ | ||
182 | mtmsr r20; /* Enable DR,EE */ \ | ||
183 | sync; \ | ||
184 | PPC_LL r20, _NIP(r1) | ||
185 | |||
186 | #elif defined(CONFIG_PPC_BOOK3S_64) | ||
187 | #define STACK_LR _LINK | ||
188 | #define MSR_EXT_START | ||
189 | #define MSR_EXT_END | ||
190 | #endif | ||
191 | |||
192 | /* | ||
193 | * Activate current's external feature (FPU/Altivec/VSX) | ||
194 | */ | ||
195 | #define define_load_up(what) \ | ||
196 | \ | ||
197 | _GLOBAL(kvmppc_load_up_ ## what); \ | ||
198 | PPC_STLU r1, -INT_FRAME_SIZE(r1); \ | ||
199 | mflr r3; \ | ||
200 | PPC_STL r3, STACK_LR(r1); \ | ||
201 | MSR_EXT_START; \ | ||
202 | \ | ||
203 | bl FUNC(load_up_ ## what); \ | ||
204 | \ | ||
205 | MSR_EXT_END; \ | ||
206 | PPC_LL r3, STACK_LR(r1); \ | ||
207 | mtlr r3; \ | ||
208 | addi r1, r1, INT_FRAME_SIZE; \ | ||
209 | blr | ||
210 | |||
211 | define_load_up(fpu) | ||
212 | #ifdef CONFIG_ALTIVEC | ||
213 | define_load_up(altivec) | ||
214 | #endif | ||
215 | |||
216 | #include "book3s_segment.S" | 169 | #include "book3s_segment.S" |
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 09bfd9bc7cf8..fe59f225327f 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h | |||
@@ -136,7 +136,8 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) | |||
136 | { | 136 | { |
137 | #ifdef CONFIG_PPC_FPU | 137 | #ifdef CONFIG_PPC_FPU |
138 | if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) { | 138 | if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) { |
139 | load_up_fpu(); | 139 | enable_kernel_fp(); |
140 | load_fp_state(¤t->thread.fp_state); | ||
140 | current->thread.regs->msr |= MSR_FP; | 141 | current->thread.regs->msr |= MSR_FP; |
141 | } | 142 | } |
142 | #endif | 143 | #endif |