aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/process_32.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-01-12 22:51:40 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-01-12 22:51:40 -0500
commit0ea820cf9bf58f735ed40ec67947159c4f170012 (patch)
tree77320006b4dded5804c678c1a869571be5c0b95f /arch/sh/kernel/process_32.c
parenta3705799e2cc5fb69d88ad6a7f317a8f5597f18d (diff)
sh: Move over to dynamically allocated FPU context.
This follows the x86 xstate changes and implements a task_xstate slab cache that is dynamically sized to match one of hard FP/soft FP/FPU-less. This also tidies up and consolidates some of the SH-2A/SH-4 FPU fragmentation. Now fpu state restorers are commonly defined, with the init_fpu()/fpu_init() mess reworked to follow the x86 convention. The fpu_init() register initialization has been replaced by xstate setup followed by writing out to hardware via the standard restore path. As init_fpu() now performs a slab allocation a secondary lighterweight restorer is also introduced for the context switch. In the future the DSP state will be rolled in here, too. More work remains for math emulation and the SH-5 FPU, which presently uses its own special (UP-only) interfaces. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/process_32.c')
-rw-r--r--arch/sh/kernel/process_32.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index c4361402ec5e..03de6573aa76 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -156,6 +156,8 @@ void start_thread(struct pt_regs *regs, unsigned long new_pc,
156 regs->sr = SR_FD; 156 regs->sr = SR_FD;
157 regs->pc = new_pc; 157 regs->pc = new_pc;
158 regs->regs[15] = new_sp; 158 regs->regs[15] = new_sp;
159
160 free_thread_xstate(current);
159} 161}
160EXPORT_SYMBOL(start_thread); 162EXPORT_SYMBOL(start_thread);
161 163
@@ -316,7 +318,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
316 318
317 /* we're going to use this soon, after a few expensive things */ 319 /* we're going to use this soon, after a few expensive things */
318 if (next->fpu_counter > 5) 320 if (next->fpu_counter > 5)
319 prefetch(&next_t->fpu.hard); 321 prefetch(next_t->xstate);
320 322
321#ifdef CONFIG_MMU 323#ifdef CONFIG_MMU
322 /* 324 /*
@@ -353,7 +355,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
353 * chances of needing FPU soon are obviously high now 355 * chances of needing FPU soon are obviously high now
354 */ 356 */
355 if (next->fpu_counter > 5) 357 if (next->fpu_counter > 5)
356 fpu_state_restore(task_pt_regs(next)); 358 __fpu_state_restore();
357 359
358 return prev; 360 return prev;
359} 361}