diff options
-rw-r--r-- | arch/powerpc/kernel/process.c | 62 |
1 files changed, 25 insertions, 37 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a5a7542a8ff3..105d5609ff57 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -201,6 +201,28 @@ int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) | |||
201 | } | 201 | } |
202 | #endif /* CONFIG_SPE */ | 202 | #endif /* CONFIG_SPE */ |
203 | 203 | ||
204 | /* | ||
205 | * If we are doing lazy switching of CPU state (FP, altivec or SPE), | ||
206 | * and the current task has some state, discard it. | ||
207 | */ | ||
208 | static inline void discard_lazy_cpu_state(void) | ||
209 | { | ||
210 | #ifndef CONFIG_SMP | ||
211 | preempt_disable(); | ||
212 | if (last_task_used_math == current) | ||
213 | last_task_used_math = NULL; | ||
214 | #ifdef CONFIG_ALTIVEC | ||
215 | if (last_task_used_altivec == current) | ||
216 | last_task_used_altivec = NULL; | ||
217 | #endif /* CONFIG_ALTIVEC */ | ||
218 | #ifdef CONFIG_SPE | ||
219 | if (last_task_used_spe == current) | ||
220 | last_task_used_spe = NULL; | ||
221 | #endif | ||
222 | preempt_enable(); | ||
223 | #endif /* CONFIG_SMP */ | ||
224 | } | ||
225 | |||
204 | int set_dabr(unsigned long dabr) | 226 | int set_dabr(unsigned long dabr) |
205 | { | 227 | { |
206 | if (ppc_md.set_dabr) | 228 | if (ppc_md.set_dabr) |
@@ -434,19 +456,7 @@ void show_regs(struct pt_regs * regs) | |||
434 | void exit_thread(void) | 456 | void exit_thread(void) |
435 | { | 457 | { |
436 | kprobe_flush_task(current); | 458 | kprobe_flush_task(current); |
437 | 459 | discard_lazy_cpu_state(); | |
438 | #ifndef CONFIG_SMP | ||
439 | if (last_task_used_math == current) | ||
440 | last_task_used_math = NULL; | ||
441 | #ifdef CONFIG_ALTIVEC | ||
442 | if (last_task_used_altivec == current) | ||
443 | last_task_used_altivec = NULL; | ||
444 | #endif /* CONFIG_ALTIVEC */ | ||
445 | #ifdef CONFIG_SPE | ||
446 | if (last_task_used_spe == current) | ||
447 | last_task_used_spe = NULL; | ||
448 | #endif | ||
449 | #endif /* CONFIG_SMP */ | ||
450 | } | 460 | } |
451 | 461 | ||
452 | void flush_thread(void) | 462 | void flush_thread(void) |
@@ -458,18 +468,7 @@ void flush_thread(void) | |||
458 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); | 468 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); |
459 | #endif | 469 | #endif |
460 | 470 | ||
461 | #ifndef CONFIG_SMP | 471 | discard_lazy_cpu_state(); |
462 | if (last_task_used_math == current) | ||
463 | last_task_used_math = NULL; | ||
464 | #ifdef CONFIG_ALTIVEC | ||
465 | if (last_task_used_altivec == current) | ||
466 | last_task_used_altivec = NULL; | ||
467 | #endif /* CONFIG_ALTIVEC */ | ||
468 | #ifdef CONFIG_SPE | ||
469 | if (last_task_used_spe == current) | ||
470 | last_task_used_spe = NULL; | ||
471 | #endif | ||
472 | #endif /* CONFIG_SMP */ | ||
473 | 472 | ||
474 | #ifdef CONFIG_PPC64 /* for now */ | 473 | #ifdef CONFIG_PPC64 /* for now */ |
475 | if (current->thread.dabr) { | 474 | if (current->thread.dabr) { |
@@ -635,18 +634,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) | |||
635 | } | 634 | } |
636 | #endif | 635 | #endif |
637 | 636 | ||
638 | #ifndef CONFIG_SMP | 637 | discard_lazy_cpu_state(); |
639 | if (last_task_used_math == current) | ||
640 | last_task_used_math = NULL; | ||
641 | #ifdef CONFIG_ALTIVEC | ||
642 | if (last_task_used_altivec == current) | ||
643 | last_task_used_altivec = NULL; | ||
644 | #endif | ||
645 | #ifdef CONFIG_SPE | ||
646 | if (last_task_used_spe == current) | ||
647 | last_task_used_spe = NULL; | ||
648 | #endif | ||
649 | #endif /* CONFIG_SMP */ | ||
650 | memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); | 638 | memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); |
651 | current->thread.fpscr.val = 0; | 639 | current->thread.fpscr.val = 0; |
652 | #ifdef CONFIG_ALTIVEC | 640 | #ifdef CONFIG_ALTIVEC |