aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/process.c6
-rw-r--r--arch/powerpc/kernel/signal_32.c21
-rw-r--r--arch/powerpc/kernel/signal_64.c20
-rw-r--r--include/asm-powerpc/system.h8
4 files changed, 31 insertions, 24 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 105d5609ff57..913f90692a36 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -201,13 +201,13 @@ int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
201} 201}
202#endif /* CONFIG_SPE */ 202#endif /* CONFIG_SPE */
203 203
204#ifndef CONFIG_SMP
204/* 205/*
205 * If we are doing lazy switching of CPU state (FP, altivec or SPE), 206 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
206 * and the current task has some state, discard it. 207 * and the current task has some state, discard it.
207 */ 208 */
208static inline void discard_lazy_cpu_state(void) 209void discard_lazy_cpu_state(void)
209{ 210{
210#ifndef CONFIG_SMP
211 preempt_disable(); 211 preempt_disable();
212 if (last_task_used_math == current) 212 if (last_task_used_math == current)
213 last_task_used_math = NULL; 213 last_task_used_math = NULL;
@@ -220,8 +220,8 @@ static inline void discard_lazy_cpu_state(void)
220 last_task_used_spe = NULL; 220 last_task_used_spe = NULL;
221#endif 221#endif
222 preempt_enable(); 222 preempt_enable();
223#endif /* CONFIG_SMP */
224} 223}
224#endif /* CONFIG_SMP */
225 225
226int set_dabr(unsigned long dabr) 226int set_dabr(unsigned long dabr)
227{ 227{
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d3f0b6d452fb..177bba78fb0b 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -497,6 +497,15 @@ static long restore_user_regs(struct pt_regs *regs,
497 if (err) 497 if (err)
498 return 1; 498 return 1;
499 499
500 /*
501 * Do this before updating the thread state in
502 * current->thread.fpr/vr/evr. That way, if we get preempted
503 * and another task grabs the FPU/Altivec/SPE, it won't be
504 * tempted to save the current CPU state into the thread_struct
505 * and corrupt what we are writing there.
506 */
507 discard_lazy_cpu_state();
508
500 /* force the process to reload the FP registers from 509 /* force the process to reload the FP registers from
501 current->thread when it next does FP instructions */ 510 current->thread when it next does FP instructions */
502 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); 511 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
@@ -538,18 +547,6 @@ static long restore_user_regs(struct pt_regs *regs,
538 return 1; 547 return 1;
539#endif /* CONFIG_SPE */ 548#endif /* CONFIG_SPE */
540 549
541#ifndef CONFIG_SMP
542 preempt_disable();
543 if (last_task_used_math == current)
544 last_task_used_math = NULL;
545 if (last_task_used_altivec == current)
546 last_task_used_altivec = NULL;
547#ifdef CONFIG_SPE
548 if (last_task_used_spe == current)
549 last_task_used_spe = NULL;
550#endif
551 preempt_enable();
552#endif
553 return 0; 550 return 0;
554} 551}
555 552
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 5462bef898f6..7b9d999e2115 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -207,10 +207,20 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
207 207
208 if (!sig) 208 if (!sig)
209 regs->gpr[13] = save_r13; 209 regs->gpr[13] = save_r13;
210 err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
211 if (set != NULL) 210 if (set != NULL)
212 err |= __get_user(set->sig[0], &sc->oldmask); 211 err |= __get_user(set->sig[0], &sc->oldmask);
213 212
213 /*
214 * Do this before updating the thread state in
215 * current->thread.fpr/vr. That way, if we get preempted
216 * and another task grabs the FPU/Altivec, it won't be
217 * tempted to save the current CPU state into the thread_struct
218 * and corrupt what we are writing there.
219 */
220 discard_lazy_cpu_state();
221
222 err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
223
214#ifdef CONFIG_ALTIVEC 224#ifdef CONFIG_ALTIVEC
215 err |= __get_user(v_regs, &sc->v_regs); 225 err |= __get_user(v_regs, &sc->v_regs);
216 err |= __get_user(msr, &sc->gp_regs[PT_MSR]); 226 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
@@ -229,14 +239,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
229 current->thread.vrsave = 0; 239 current->thread.vrsave = 0;
230#endif /* CONFIG_ALTIVEC */ 240#endif /* CONFIG_ALTIVEC */
231 241
232#ifndef CONFIG_SMP
233 preempt_disable();
234 if (last_task_used_math == current)
235 last_task_used_math = NULL;
236 if (last_task_used_altivec == current)
237 last_task_used_altivec = NULL;
238 preempt_enable();
239#endif
240 /* Force reload of FP/VEC */ 242 /* Force reload of FP/VEC */
241 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC); 243 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC);
242 244
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 0c58e32a9570..4c888303e85b 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -133,6 +133,14 @@ extern int fix_alignment(struct pt_regs *);
133extern void cvt_fd(float *from, double *to, struct thread_struct *thread); 133extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
134extern void cvt_df(double *from, float *to, struct thread_struct *thread); 134extern void cvt_df(double *from, float *to, struct thread_struct *thread);
135 135
136#ifndef CONFIG_SMP
137extern void discard_lazy_cpu_state(void);
138#else
139static inline void discard_lazy_cpu_state(void)
140{
141}
142#endif
143
136#ifdef CONFIG_ALTIVEC 144#ifdef CONFIG_ALTIVEC
137extern void flush_altivec_to_thread(struct task_struct *); 145extern void flush_altivec_to_thread(struct task_struct *);
138#else 146#else