aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2015-10-28 20:44:02 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2015-11-30 21:52:25 -0500
commita7d623d4d053ccb0cdfad210bced2ec25ddf69a2 (patch)
treeb1bcf400b9c53c66d483bb641f98fb6e2f270bf9
parent98da581e0846f6d932a4bc46a55458140e20478a (diff)
powerpc: Move part of giveup_vsx into c
Move the MSR modification into c. Removing it from the assembly function will allow us to avoid costly MSR writes by batching them up. Check the FP and VMX bits before calling the relevant giveup_*() function. This makes giveup_vsx() and flush_vsx_to_thread() perform more like their sister functions, and allows us to use flush_vsx_to_thread() in the signal code. Move the check_if_tm_restore_required() check in. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/kernel/process.c28
-rw-r--r--arch/powerpc/kernel/signal_32.c4
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kernel/vector.S6
4 files changed, 23 insertions, 19 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 6bcf82bed610..0cb627662ded 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -205,6 +205,25 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
205#endif /* CONFIG_ALTIVEC */ 205#endif /* CONFIG_ALTIVEC */
206 206
207#ifdef CONFIG_VSX 207#ifdef CONFIG_VSX
208void giveup_vsx(struct task_struct *tsk)
209{
210 u64 oldmsr = mfmsr();
211 u64 newmsr;
212
213 check_if_tm_restore_required(tsk);
214
215 newmsr = oldmsr | (MSR_FP|MSR_VEC|MSR_VSX);
216 if (oldmsr != newmsr)
217 mtmsr_isync(newmsr);
218
219 if (tsk->thread.regs->msr & MSR_FP)
220 __giveup_fpu(tsk);
221 if (tsk->thread.regs->msr & MSR_VEC)
222 __giveup_altivec(tsk);
223 __giveup_vsx(tsk);
224}
225EXPORT_SYMBOL(giveup_vsx);
226
208void enable_kernel_vsx(void) 227void enable_kernel_vsx(void)
209{ 228{
210 WARN_ON(preemptible()); 229 WARN_ON(preemptible());
@@ -220,15 +239,6 @@ void enable_kernel_vsx(void)
220} 239}
221EXPORT_SYMBOL(enable_kernel_vsx); 240EXPORT_SYMBOL(enable_kernel_vsx);
222 241
223void giveup_vsx(struct task_struct *tsk)
224{
225 check_if_tm_restore_required(tsk);
226 giveup_fpu(tsk);
227 giveup_altivec(tsk);
228 __giveup_vsx(tsk);
229}
230EXPORT_SYMBOL(giveup_vsx);
231
232void flush_vsx_to_thread(struct task_struct *tsk) 242void flush_vsx_to_thread(struct task_struct *tsk)
233{ 243{
234 if (tsk->thread.regs) { 244 if (tsk->thread.regs) {
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 3cd7a32c8ff4..4022cbb7e2d6 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -458,7 +458,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
458 * contains valid data 458 * contains valid data
459 */ 459 */
460 if (current->thread.used_vsr && ctx_has_vsx_region) { 460 if (current->thread.used_vsr && ctx_has_vsx_region) {
461 __giveup_vsx(current); 461 flush_vsx_to_thread(current);
462 if (copy_vsx_to_user(&frame->mc_vsregs, current)) 462 if (copy_vsx_to_user(&frame->mc_vsregs, current))
463 return 1; 463 return 1;
464 msr |= MSR_VSX; 464 msr |= MSR_VSX;
@@ -606,7 +606,7 @@ static int save_tm_user_regs(struct pt_regs *regs,
606 * contains valid data 606 * contains valid data
607 */ 607 */
608 if (current->thread.used_vsr) { 608 if (current->thread.used_vsr) {
609 __giveup_vsx(current); 609 flush_vsx_to_thread(current);
610 if (copy_vsx_to_user(&frame->mc_vsregs, current)) 610 if (copy_vsx_to_user(&frame->mc_vsregs, current))
611 return 1; 611 return 1;
612 if (msr & MSR_VSX) { 612 if (msr & MSR_VSX) {
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 6f2b555516e6..3b2339912911 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -147,7 +147,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
147 * VMX data. 147 * VMX data.
148 */ 148 */
149 if (current->thread.used_vsr && ctx_has_vsx_region) { 149 if (current->thread.used_vsr && ctx_has_vsx_region) {
150 __giveup_vsx(current); 150 flush_vsx_to_thread(current);
151 v_regs += ELF_NVRREG; 151 v_regs += ELF_NVRREG;
152 err |= copy_vsx_to_user(v_regs, current); 152 err |= copy_vsx_to_user(v_regs, current);
153 /* set MSR_VSX in the MSR value in the frame to 153 /* set MSR_VSX in the MSR value in the frame to
@@ -270,7 +270,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
270 * VMX data. 270 * VMX data.
271 */ 271 */
272 if (current->thread.used_vsr) { 272 if (current->thread.used_vsr) {
273 __giveup_vsx(current); 273 flush_vsx_to_thread(current);
274 v_regs += ELF_NVRREG; 274 v_regs += ELF_NVRREG;
275 tm_v_regs += ELF_NVRREG; 275 tm_v_regs += ELF_NVRREG;
276 276
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 6e925b40a484..98675b08efe2 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -177,14 +177,8 @@ _GLOBAL(load_up_vsx)
177 * __giveup_vsx(tsk) 177 * __giveup_vsx(tsk)
178 * Disable VSX for the task given as the argument. 178 * Disable VSX for the task given as the argument.
179 * Does NOT save vsx registers. 179 * Does NOT save vsx registers.
180 * Enables the VSX for use in the kernel on return.
181 */ 180 */
182_GLOBAL(__giveup_vsx) 181_GLOBAL(__giveup_vsx)
183 mfmsr r5
184 oris r5,r5,MSR_VSX@h
185 mtmsrd r5 /* enable use of VSX now */
186 isync
187
188 addi r3,r3,THREAD /* want THREAD of task */ 182 addi r3,r3,THREAD /* want THREAD of task */
189 ld r5,PT_REGS(r3) 183 ld r5,PT_REGS(r3)
190 cmpdi 0,r5,0 184 cmpdi 0,r5,0