diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-12-08 01:47:12 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-12-08 01:47:12 -0500 |
commit | 6424db52e24e8cdf89917fa3c10395116440160e (patch) | |
tree | bd923d78f90addb080abc82f3bff6ca5c9bf10b4 /arch/sh/kernel/process_32.c | |
parent | 09a072947791088b88ae15111cf68fc5aaaf758d (diff) | |
parent | 6a5a0b9139b19dd1a107870269a35bc9cf18d2dc (diff) |
Merge branch 'master' into sh/hw-breakpoints
Conflict between FPU thread flag migration and debug
thread flag addition.
Conflicts:
arch/sh/include/asm/thread_info.h
arch/sh/include/asm/ubc.h
arch/sh/kernel/process_32.c
Diffstat (limited to 'arch/sh/kernel/process_32.c')
-rw-r--r-- | arch/sh/kernel/process_32.c | 43 |
1 files changed, 33 insertions, 10 deletions
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 4a2c866f9773..7399d78fc8ed 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c | |||
@@ -133,7 +133,10 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
133 | regs.regs[5] = (unsigned long)fn; | 133 | regs.regs[5] = (unsigned long)fn; |
134 | 134 | ||
135 | regs.pc = (unsigned long)kernel_thread_helper; | 135 | regs.pc = (unsigned long)kernel_thread_helper; |
136 | regs.sr = (1 << 30); | 136 | regs.sr = SR_MD; |
137 | #if defined(CONFIG_SH_FPU) | ||
138 | regs.sr |= SR_FD; | ||
139 | #endif | ||
137 | 140 | ||
138 | /* Ok, create the new process.. */ | 141 | /* Ok, create the new process.. */ |
139 | pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, | 142 | pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, |
@@ -141,6 +144,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
141 | 144 | ||
142 | return pid; | 145 | return pid; |
143 | } | 146 | } |
147 | EXPORT_SYMBOL(kernel_thread); | ||
144 | 148 | ||
145 | /* | 149 | /* |
146 | * Free current thread data structures etc.. | 150 | * Free current thread data structures etc.. |
@@ -184,6 +188,16 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
184 | 188 | ||
185 | return fpvalid; | 189 | return fpvalid; |
186 | } | 190 | } |
191 | EXPORT_SYMBOL(dump_fpu); | ||
192 | |||
193 | /* | ||
194 | * This gets called before we allocate a new thread and copy | ||
195 | * the current task into it. | ||
196 | */ | ||
197 | void prepare_to_copy(struct task_struct *tsk) | ||
198 | { | ||
199 | unlazy_fpu(tsk, task_pt_regs(tsk)); | ||
200 | } | ||
187 | 201 | ||
188 | asmlinkage void ret_from_fork(void); | 202 | asmlinkage void ret_from_fork(void); |
189 | 203 | ||
@@ -193,15 +207,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
193 | { | 207 | { |
194 | struct thread_info *ti = task_thread_info(p); | 208 | struct thread_info *ti = task_thread_info(p); |
195 | struct pt_regs *childregs; | 209 | struct pt_regs *childregs; |
196 | struct task_struct *tsk = current; | ||
197 | |||
198 | #if defined(CONFIG_SH_FPU) | ||
199 | unlazy_fpu(tsk, regs); | ||
200 | p->thread.fpu = tsk->thread.fpu; | ||
201 | copy_to_stopped_child_used_math(p); | ||
202 | #endif | ||
203 | 210 | ||
204 | #if defined(CONFIG_SH_DSP) | 211 | #if defined(CONFIG_SH_DSP) |
212 | struct task_struct *tsk = current; | ||
213 | |||
205 | if (is_dsp_enabled(tsk)) { | 214 | if (is_dsp_enabled(tsk)) { |
206 | /* We can use the __save_dsp or just copy the struct: | 215 | /* We can use the __save_dsp or just copy the struct: |
207 | * __save_dsp(p); | 216 | * __save_dsp(p); |
@@ -220,6 +229,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
220 | } else { | 229 | } else { |
221 | childregs->regs[15] = (unsigned long)childregs; | 230 | childregs->regs[15] = (unsigned long)childregs; |
222 | ti->addr_limit = KERNEL_DS; | 231 | ti->addr_limit = KERNEL_DS; |
232 | ti->status &= ~TS_USEDFPU; | ||
233 | p->fpu_counter = 0; | ||
223 | } | 234 | } |
224 | 235 | ||
225 | if (clone_flags & CLONE_SETTLS) | 236 | if (clone_flags & CLONE_SETTLS) |
@@ -242,9 +253,13 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
242 | __notrace_funcgraph struct task_struct * | 253 | __notrace_funcgraph struct task_struct * |
243 | __switch_to(struct task_struct *prev, struct task_struct *next) | 254 | __switch_to(struct task_struct *prev, struct task_struct *next) |
244 | { | 255 | { |
245 | #if defined(CONFIG_SH_FPU) | 256 | struct thread_struct *next_t = &next->thread; |
257 | |||
246 | unlazy_fpu(prev, task_pt_regs(prev)); | 258 | unlazy_fpu(prev, task_pt_regs(prev)); |
247 | #endif | 259 | |
260 | /* we're going to use this soon, after a few expensive things */ | ||
261 | if (next->fpu_counter > 5) | ||
262 | prefetch(&next_t->fpu.hard); | ||
248 | 263 | ||
249 | #ifdef CONFIG_MMU | 264 | #ifdef CONFIG_MMU |
250 | /* | 265 | /* |
@@ -256,6 +271,14 @@ __switch_to(struct task_struct *prev, struct task_struct *next) | |||
256 | : "r" (task_thread_info(next))); | 271 | : "r" (task_thread_info(next))); |
257 | #endif | 272 | #endif |
258 | 273 | ||
274 | /* | ||
275 | * If the task has used fpu the last 5 timeslices, just do a full | ||
276 | * restore of the math state immediately to avoid the trap; the | ||
277 | * chances of needing FPU soon are obviously high now | ||
278 | */ | ||
279 | if (next->fpu_counter > 5) | ||
280 | fpu_state_restore(task_pt_regs(next)); | ||
281 | |||
259 | return prev; | 282 | return prev; |
260 | } | 283 | } |
261 | 284 | ||