aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-01-11 06:11:39 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-12 04:09:29 -0500
commit5388fb1025443ec223ba556b10efc4c5f83f8682 (patch)
treeb14832a8886bd254533f226263a2047545c57805 /arch
parent593195f9b2309693f27b402f34573f7920b82c3e (diff)
[PATCH] powerpc: Avoid potential FP corruption with preempt and UP
Heikki Lindholm pointed out that there was a potential race with the lazy CPU state (FP, VR, EVR) stuff if preempt is enabled. The race is that in the process of restoring FP state on sigreturn, the task gets preempted by a user task that wants to use the FPU. It will take an FP unavailable exception, which will write the current FPU state to the thread_struct, overwriting the values which sigreturn has stored. Note that this can only happen on UP since we don't implement lazy CPU state on SMP. The fix is to flush the lazy CPU state before updating the thread_struct. To do this we re-use the flush_lazy_cpu_state() function from process.c. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/process.c6
-rw-r--r--arch/powerpc/kernel/signal_32.c21
-rw-r--r--arch/powerpc/kernel/signal_64.c20
3 files changed, 23 insertions, 24 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 105d5609ff57..913f90692a36 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -201,13 +201,13 @@ int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
201} 201}
202#endif /* CONFIG_SPE */ 202#endif /* CONFIG_SPE */
203 203
204#ifndef CONFIG_SMP
204/* 205/*
205 * If we are doing lazy switching of CPU state (FP, altivec or SPE), 206 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
206 * and the current task has some state, discard it. 207 * and the current task has some state, discard it.
207 */ 208 */
208static inline void discard_lazy_cpu_state(void) 209void discard_lazy_cpu_state(void)
209{ 210{
210#ifndef CONFIG_SMP
211 preempt_disable(); 211 preempt_disable();
212 if (last_task_used_math == current) 212 if (last_task_used_math == current)
213 last_task_used_math = NULL; 213 last_task_used_math = NULL;
@@ -220,8 +220,8 @@ static inline void discard_lazy_cpu_state(void)
220 last_task_used_spe = NULL; 220 last_task_used_spe = NULL;
221#endif 221#endif
222 preempt_enable(); 222 preempt_enable();
223#endif /* CONFIG_SMP */
224} 223}
224#endif /* CONFIG_SMP */
225 225
226int set_dabr(unsigned long dabr) 226int set_dabr(unsigned long dabr)
227{ 227{
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d3f0b6d452fb..177bba78fb0b 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -497,6 +497,15 @@ static long restore_user_regs(struct pt_regs *regs,
497 if (err) 497 if (err)
498 return 1; 498 return 1;
499 499
500 /*
501 * Do this before updating the thread state in
502 * current->thread.fpr/vr/evr. That way, if we get preempted
503 * and another task grabs the FPU/Altivec/SPE, it won't be
504 * tempted to save the current CPU state into the thread_struct
505 * and corrupt what we are writing there.
506 */
507 discard_lazy_cpu_state();
508
500 /* force the process to reload the FP registers from 509 /* force the process to reload the FP registers from
501 current->thread when it next does FP instructions */ 510 current->thread when it next does FP instructions */
502 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); 511 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
@@ -538,18 +547,6 @@ static long restore_user_regs(struct pt_regs *regs,
538 return 1; 547 return 1;
539#endif /* CONFIG_SPE */ 548#endif /* CONFIG_SPE */
540 549
541#ifndef CONFIG_SMP
542 preempt_disable();
543 if (last_task_used_math == current)
544 last_task_used_math = NULL;
545 if (last_task_used_altivec == current)
546 last_task_used_altivec = NULL;
547#ifdef CONFIG_SPE
548 if (last_task_used_spe == current)
549 last_task_used_spe = NULL;
550#endif
551 preempt_enable();
552#endif
553 return 0; 550 return 0;
554} 551}
555 552
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 5462bef898f6..7b9d999e2115 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -207,10 +207,20 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
207 207
208 if (!sig) 208 if (!sig)
209 regs->gpr[13] = save_r13; 209 regs->gpr[13] = save_r13;
210 err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
211 if (set != NULL) 210 if (set != NULL)
212 err |= __get_user(set->sig[0], &sc->oldmask); 211 err |= __get_user(set->sig[0], &sc->oldmask);
213 212
213 /*
214 * Do this before updating the thread state in
215 * current->thread.fpr/vr. That way, if we get preempted
216 * and another task grabs the FPU/Altivec, it won't be
217 * tempted to save the current CPU state into the thread_struct
218 * and corrupt what we are writing there.
219 */
220 discard_lazy_cpu_state();
221
222 err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
223
214#ifdef CONFIG_ALTIVEC 224#ifdef CONFIG_ALTIVEC
215 err |= __get_user(v_regs, &sc->v_regs); 225 err |= __get_user(v_regs, &sc->v_regs);
216 err |= __get_user(msr, &sc->gp_regs[PT_MSR]); 226 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
@@ -229,14 +239,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
229 current->thread.vrsave = 0; 239 current->thread.vrsave = 0;
230#endif /* CONFIG_ALTIVEC */ 240#endif /* CONFIG_ALTIVEC */
231 241
232#ifndef CONFIG_SMP
233 preempt_disable();
234 if (last_task_used_math == current)
235 last_task_used_math = NULL;
236 if (last_task_used_altivec == current)
237 last_task_used_altivec = NULL;
238 preempt_enable();
239#endif
240 /* Force reload of FP/VEC */ 242 /* Force reload of FP/VEC */
241 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC); 243 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC);
242 244