diff options
author | Anton Blanchard <anton@samba.org> | 2015-10-28 20:44:02 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-11-30 21:52:25 -0500 |
commit | a7d623d4d053ccb0cdfad210bced2ec25ddf69a2 (patch) | |
tree | b1bcf400b9c53c66d483bb641f98fb6e2f270bf9 /arch/powerpc/kernel/process.c | |
parent | 98da581e0846f6d932a4bc46a55458140e20478a (diff) |
powerpc: Move part of giveup_vsx into c
Move the MSR modification into c. Removing it from the assembly
function will allow us to avoid costly MSR writes by batching them
up.
Check the FP and VMX bits before calling the relevant giveup_*()
function. This makes giveup_vsx() and flush_vsx_to_thread() perform
more like their sister functions, and allows us to use
flush_vsx_to_thread() in the signal code.
Move the check_if_tm_restore_required() check in.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel/process.c')
-rw-r--r-- | arch/powerpc/kernel/process.c | 28 |
1 files changed, 19 insertions, 9 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 6bcf82bed610..0cb627662ded 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -205,6 +205,25 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); | |||
205 | #endif /* CONFIG_ALTIVEC */ | 205 | #endif /* CONFIG_ALTIVEC */ |
206 | 206 | ||
207 | #ifdef CONFIG_VSX | 207 | #ifdef CONFIG_VSX |
208 | void giveup_vsx(struct task_struct *tsk) | ||
209 | { | ||
210 | u64 oldmsr = mfmsr(); | ||
211 | u64 newmsr; | ||
212 | |||
213 | check_if_tm_restore_required(tsk); | ||
214 | |||
215 | newmsr = oldmsr | (MSR_FP|MSR_VEC|MSR_VSX); | ||
216 | if (oldmsr != newmsr) | ||
217 | mtmsr_isync(newmsr); | ||
218 | |||
219 | if (tsk->thread.regs->msr & MSR_FP) | ||
220 | __giveup_fpu(tsk); | ||
221 | if (tsk->thread.regs->msr & MSR_VEC) | ||
222 | __giveup_altivec(tsk); | ||
223 | __giveup_vsx(tsk); | ||
224 | } | ||
225 | EXPORT_SYMBOL(giveup_vsx); | ||
226 | |||
208 | void enable_kernel_vsx(void) | 227 | void enable_kernel_vsx(void) |
209 | { | 228 | { |
210 | WARN_ON(preemptible()); | 229 | WARN_ON(preemptible()); |
@@ -220,15 +239,6 @@ void enable_kernel_vsx(void) | |||
220 | } | 239 | } |
221 | EXPORT_SYMBOL(enable_kernel_vsx); | 240 | EXPORT_SYMBOL(enable_kernel_vsx); |
222 | 241 | ||
223 | void giveup_vsx(struct task_struct *tsk) | ||
224 | { | ||
225 | check_if_tm_restore_required(tsk); | ||
226 | giveup_fpu(tsk); | ||
227 | giveup_altivec(tsk); | ||
228 | __giveup_vsx(tsk); | ||
229 | } | ||
230 | EXPORT_SYMBOL(giveup_vsx); | ||
231 | |||
232 | void flush_vsx_to_thread(struct task_struct *tsk) | 242 | void flush_vsx_to_thread(struct task_struct *tsk) |
233 | { | 243 | { |
234 | if (tsk->thread.regs) { | 244 | if (tsk->thread.regs) { |