aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/process_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/process_32.c')
-rw-r--r--arch/sh/kernel/process_32.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 0673c4746be3..aff5fe02e393 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -288,8 +288,14 @@ static void ubc_set_tracing(int asid, unsigned long pc)
288__notrace_funcgraph struct task_struct * 288__notrace_funcgraph struct task_struct *
289__switch_to(struct task_struct *prev, struct task_struct *next) 289__switch_to(struct task_struct *prev, struct task_struct *next)
290{ 290{
291 struct thread_struct *next_t = &next->thread;
292
291#if defined(CONFIG_SH_FPU) 293#if defined(CONFIG_SH_FPU)
292 unlazy_fpu(prev, task_pt_regs(prev)); 294 unlazy_fpu(prev, task_pt_regs(prev));
295
296 /* we're going to use this soon, after a few expensive things */
297 if (next->fpu_counter > 5)
298 prefetch(&next_t->fpu.hard);
293#endif 299#endif
294 300
295#ifdef CONFIG_MMU 301#ifdef CONFIG_MMU
@@ -321,6 +327,16 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
321#endif 327#endif
322 } 328 }
323 329
330#if defined(CONFIG_SH_FPU)
331 /* If the task has used fpu the last 5 timeslices, just do a full
332 * restore of the math state immediately to avoid the trap; the
333 * chances of needing FPU soon are obviously high now
334 */
335 if (next->fpu_counter > 5) {
336 fpu_state_restore(task_pt_regs(next));
337 }
338#endif
339
324 return prev; 340 return prev;
325} 341}
326 342