diff options
Diffstat (limited to 'arch/sparc/kernel/process_32.c')
-rw-r--r-- | arch/sparc/kernel/process_32.c | 709 |
1 files changed, 709 insertions, 0 deletions
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c new file mode 100644 index 000000000000..e8c43ffe317e --- /dev/null +++ b/arch/sparc/kernel/process_32.c | |||
@@ -0,0 +1,709 @@ | |||
1 | /* linux/arch/sparc/kernel/process.c | ||
2 | * | ||
3 | * Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This file handles the architecture-dependent parts of process handling.. | ||
9 | */ | ||
10 | |||
11 | #include <stdarg.h> | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/stddef.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/user.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/reboot.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/pm.h> | ||
26 | #include <linux/init.h> | ||
27 | |||
28 | #include <asm/auxio.h> | ||
29 | #include <asm/oplib.h> | ||
30 | #include <asm/uaccess.h> | ||
31 | #include <asm/system.h> | ||
32 | #include <asm/page.h> | ||
33 | #include <asm/pgalloc.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/delay.h> | ||
36 | #include <asm/processor.h> | ||
37 | #include <asm/psr.h> | ||
38 | #include <asm/elf.h> | ||
39 | #include <asm/prom.h> | ||
40 | #include <asm/unistd.h> | ||
41 | |||
42 | /* | ||
43 | * Power management idle function | ||
44 | * Set in pm platform drivers (apc.c and pmc.c) | ||
45 | */ | ||
46 | void (*pm_idle)(void); | ||
47 | |||
48 | /* | ||
49 | * Power-off handler instantiation for pm.h compliance | ||
50 | * This is done via auxio, but could be used as a fallback | ||
51 | * handler when auxio is not present-- unused for now... | ||
52 | */ | ||
53 | void (*pm_power_off)(void) = machine_power_off; | ||
54 | EXPORT_SYMBOL(pm_power_off); | ||
55 | |||
56 | /* | ||
57 | * sysctl - toggle power-off restriction for serial console | ||
58 | * systems in machine_power_off() | ||
59 | */ | ||
60 | int scons_pwroff = 1; | ||
61 | |||
62 | extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *); | ||
63 | |||
64 | struct task_struct *last_task_used_math = NULL; | ||
65 | struct thread_info *current_set[NR_CPUS]; | ||
66 | |||
67 | #ifndef CONFIG_SMP | ||
68 | |||
69 | #define SUN4C_FAULT_HIGH 100 | ||
70 | |||
71 | /* | ||
72 | * the idle loop on a Sparc... ;) | ||
73 | */ | ||
74 | void cpu_idle(void) | ||
75 | { | ||
76 | /* endless idle loop with no priority at all */ | ||
77 | for (;;) { | ||
78 | if (ARCH_SUN4C) { | ||
79 | static int count = HZ; | ||
80 | static unsigned long last_jiffies; | ||
81 | static unsigned long last_faults; | ||
82 | static unsigned long fps; | ||
83 | unsigned long now; | ||
84 | unsigned long faults; | ||
85 | |||
86 | extern unsigned long sun4c_kernel_faults; | ||
87 | extern void sun4c_grow_kernel_ring(void); | ||
88 | |||
89 | local_irq_disable(); | ||
90 | now = jiffies; | ||
91 | count -= (now - last_jiffies); | ||
92 | last_jiffies = now; | ||
93 | if (count < 0) { | ||
94 | count += HZ; | ||
95 | faults = sun4c_kernel_faults; | ||
96 | fps = (fps + (faults - last_faults)) >> 1; | ||
97 | last_faults = faults; | ||
98 | #if 0 | ||
99 | printk("kernel faults / second = %ld\n", fps); | ||
100 | #endif | ||
101 | if (fps >= SUN4C_FAULT_HIGH) { | ||
102 | sun4c_grow_kernel_ring(); | ||
103 | } | ||
104 | } | ||
105 | local_irq_enable(); | ||
106 | } | ||
107 | |||
108 | if (pm_idle) { | ||
109 | while (!need_resched()) | ||
110 | (*pm_idle)(); | ||
111 | } else { | ||
112 | while (!need_resched()) | ||
113 | cpu_relax(); | ||
114 | } | ||
115 | preempt_enable_no_resched(); | ||
116 | schedule(); | ||
117 | preempt_disable(); | ||
118 | check_pgt_cache(); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | #else | ||
123 | |||
124 | /* This is being executed in task 0 'user space'. */ | ||
125 | void cpu_idle(void) | ||
126 | { | ||
127 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
128 | /* endless idle loop with no priority at all */ | ||
129 | while(1) { | ||
130 | while (!need_resched()) | ||
131 | cpu_relax(); | ||
132 | preempt_enable_no_resched(); | ||
133 | schedule(); | ||
134 | preempt_disable(); | ||
135 | check_pgt_cache(); | ||
136 | } | ||
137 | } | ||
138 | |||
139 | #endif | ||
140 | |||
141 | /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ | ||
142 | void machine_halt(void) | ||
143 | { | ||
144 | local_irq_enable(); | ||
145 | mdelay(8); | ||
146 | local_irq_disable(); | ||
147 | prom_halt(); | ||
148 | panic("Halt failed!"); | ||
149 | } | ||
150 | |||
151 | void machine_restart(char * cmd) | ||
152 | { | ||
153 | char *p; | ||
154 | |||
155 | local_irq_enable(); | ||
156 | mdelay(8); | ||
157 | local_irq_disable(); | ||
158 | |||
159 | p = strchr (reboot_command, '\n'); | ||
160 | if (p) *p = 0; | ||
161 | if (cmd) | ||
162 | prom_reboot(cmd); | ||
163 | if (*reboot_command) | ||
164 | prom_reboot(reboot_command); | ||
165 | prom_feval ("reset"); | ||
166 | panic("Reboot failed!"); | ||
167 | } | ||
168 | |||
169 | void machine_power_off(void) | ||
170 | { | ||
171 | #ifdef CONFIG_SUN_AUXIO | ||
172 | if (auxio_power_register && | ||
173 | (strcmp(of_console_device->type, "serial") || scons_pwroff)) | ||
174 | *auxio_power_register |= AUXIO_POWER_OFF; | ||
175 | #endif | ||
176 | machine_halt(); | ||
177 | } | ||
178 | |||
179 | #if 0 | ||
180 | |||
181 | static DEFINE_SPINLOCK(sparc_backtrace_lock); | ||
182 | |||
183 | void __show_backtrace(unsigned long fp) | ||
184 | { | ||
185 | struct reg_window *rw; | ||
186 | unsigned long flags; | ||
187 | int cpu = smp_processor_id(); | ||
188 | |||
189 | spin_lock_irqsave(&sparc_backtrace_lock, flags); | ||
190 | |||
191 | rw = (struct reg_window *)fp; | ||
192 | while(rw && (((unsigned long) rw) >= PAGE_OFFSET) && | ||
193 | !(((unsigned long) rw) & 0x7)) { | ||
194 | printk("CPU[%d]: ARGS[%08lx,%08lx,%08lx,%08lx,%08lx,%08lx] " | ||
195 | "FP[%08lx] CALLER[%08lx]: ", cpu, | ||
196 | rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3], | ||
197 | rw->ins[4], rw->ins[5], | ||
198 | rw->ins[6], | ||
199 | rw->ins[7]); | ||
200 | printk("%pS\n", (void *) rw->ins[7]); | ||
201 | rw = (struct reg_window *) rw->ins[6]; | ||
202 | } | ||
203 | spin_unlock_irqrestore(&sparc_backtrace_lock, flags); | ||
204 | } | ||
205 | |||
206 | #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") | ||
207 | #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") | ||
208 | #define __GET_FP(fp) __asm__ __volatile__("mov %%i6, %0" : "=r" (fp)) | ||
209 | |||
210 | void show_backtrace(void) | ||
211 | { | ||
212 | unsigned long fp; | ||
213 | |||
214 | __SAVE; __SAVE; __SAVE; __SAVE; | ||
215 | __SAVE; __SAVE; __SAVE; __SAVE; | ||
216 | __RESTORE; __RESTORE; __RESTORE; __RESTORE; | ||
217 | __RESTORE; __RESTORE; __RESTORE; __RESTORE; | ||
218 | |||
219 | __GET_FP(fp); | ||
220 | |||
221 | __show_backtrace(fp); | ||
222 | } | ||
223 | |||
224 | #ifdef CONFIG_SMP | ||
225 | void smp_show_backtrace_all_cpus(void) | ||
226 | { | ||
227 | xc0((smpfunc_t) show_backtrace); | ||
228 | show_backtrace(); | ||
229 | } | ||
230 | #endif | ||
231 | |||
232 | void show_stackframe(struct sparc_stackf *sf) | ||
233 | { | ||
234 | unsigned long size; | ||
235 | unsigned long *stk; | ||
236 | int i; | ||
237 | |||
238 | printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx " | ||
239 | "l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n", | ||
240 | sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3], | ||
241 | sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]); | ||
242 | printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx " | ||
243 | "i4: %08lx i5: %08lx fp: %08lx i7: %08lx\n", | ||
244 | sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3], | ||
245 | sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc); | ||
246 | printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx " | ||
247 | "x3: %08lx x4: %08lx x5: %08lx xx: %08lx\n", | ||
248 | (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1], | ||
249 | sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5], | ||
250 | sf->xxargs[0]); | ||
251 | size = ((unsigned long)sf->fp) - ((unsigned long)sf); | ||
252 | size -= STACKFRAME_SZ; | ||
253 | stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ); | ||
254 | i = 0; | ||
255 | do { | ||
256 | printk("s%d: %08lx\n", i++, *stk++); | ||
257 | } while ((size -= sizeof(unsigned long))); | ||
258 | } | ||
259 | #endif | ||
260 | |||
261 | void show_regs(struct pt_regs *r) | ||
262 | { | ||
263 | struct reg_window *rw = (struct reg_window *) r->u_regs[14]; | ||
264 | |||
265 | printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", | ||
266 | r->psr, r->pc, r->npc, r->y, print_tainted()); | ||
267 | printk("PC: <%pS>\n", (void *) r->pc); | ||
268 | printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", | ||
269 | r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3], | ||
270 | r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]); | ||
271 | printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", | ||
272 | r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11], | ||
273 | r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]); | ||
274 | printk("RPC: <%pS>\n", (void *) r->u_regs[15]); | ||
275 | |||
276 | printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", | ||
277 | rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3], | ||
278 | rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]); | ||
279 | printk("%%I: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", | ||
280 | rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3], | ||
281 | rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]); | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * The show_stack is an external API which we do not use ourselves. | ||
286 | * The oops is printed in die_if_kernel. | ||
287 | */ | ||
288 | void show_stack(struct task_struct *tsk, unsigned long *_ksp) | ||
289 | { | ||
290 | unsigned long pc, fp; | ||
291 | unsigned long task_base; | ||
292 | struct reg_window *rw; | ||
293 | int count = 0; | ||
294 | |||
295 | if (tsk != NULL) | ||
296 | task_base = (unsigned long) task_stack_page(tsk); | ||
297 | else | ||
298 | task_base = (unsigned long) current_thread_info(); | ||
299 | |||
300 | fp = (unsigned long) _ksp; | ||
301 | do { | ||
302 | /* Bogus frame pointer? */ | ||
303 | if (fp < (task_base + sizeof(struct thread_info)) || | ||
304 | fp >= (task_base + (PAGE_SIZE << 1))) | ||
305 | break; | ||
306 | rw = (struct reg_window *) fp; | ||
307 | pc = rw->ins[7]; | ||
308 | printk("[%08lx : ", pc); | ||
309 | printk("%pS ] ", (void *) pc); | ||
310 | fp = rw->ins[6]; | ||
311 | } while (++count < 16); | ||
312 | printk("\n"); | ||
313 | } | ||
314 | |||
315 | void dump_stack(void) | ||
316 | { | ||
317 | unsigned long *ksp; | ||
318 | |||
319 | __asm__ __volatile__("mov %%fp, %0" | ||
320 | : "=r" (ksp)); | ||
321 | show_stack(current, ksp); | ||
322 | } | ||
323 | |||
324 | EXPORT_SYMBOL(dump_stack); | ||
325 | |||
326 | /* | ||
327 | * Note: sparc64 has a pretty intricated thread_saved_pc, check it out. | ||
328 | */ | ||
329 | unsigned long thread_saved_pc(struct task_struct *tsk) | ||
330 | { | ||
331 | return task_thread_info(tsk)->kpc; | ||
332 | } | ||
333 | |||
334 | /* | ||
335 | * Free current thread data structures etc.. | ||
336 | */ | ||
337 | void exit_thread(void) | ||
338 | { | ||
339 | #ifndef CONFIG_SMP | ||
340 | if(last_task_used_math == current) { | ||
341 | #else | ||
342 | if (test_thread_flag(TIF_USEDFPU)) { | ||
343 | #endif | ||
344 | /* Keep process from leaving FPU in a bogon state. */ | ||
345 | put_psr(get_psr() | PSR_EF); | ||
346 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, | ||
347 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); | ||
348 | #ifndef CONFIG_SMP | ||
349 | last_task_used_math = NULL; | ||
350 | #else | ||
351 | clear_thread_flag(TIF_USEDFPU); | ||
352 | #endif | ||
353 | } | ||
354 | } | ||
355 | |||
356 | void flush_thread(void) | ||
357 | { | ||
358 | current_thread_info()->w_saved = 0; | ||
359 | |||
360 | #ifndef CONFIG_SMP | ||
361 | if(last_task_used_math == current) { | ||
362 | #else | ||
363 | if (test_thread_flag(TIF_USEDFPU)) { | ||
364 | #endif | ||
365 | /* Clean the fpu. */ | ||
366 | put_psr(get_psr() | PSR_EF); | ||
367 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, | ||
368 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); | ||
369 | #ifndef CONFIG_SMP | ||
370 | last_task_used_math = NULL; | ||
371 | #else | ||
372 | clear_thread_flag(TIF_USEDFPU); | ||
373 | #endif | ||
374 | } | ||
375 | |||
376 | /* Now, this task is no longer a kernel thread. */ | ||
377 | current->thread.current_ds = USER_DS; | ||
378 | if (current->thread.flags & SPARC_FLAG_KTHREAD) { | ||
379 | current->thread.flags &= ~SPARC_FLAG_KTHREAD; | ||
380 | |||
381 | /* We must fixup kregs as well. */ | ||
382 | /* XXX This was not fixed for ti for a while, worked. Unused? */ | ||
383 | current->thread.kregs = (struct pt_regs *) | ||
384 | (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ)); | ||
385 | } | ||
386 | } | ||
387 | |||
388 | static inline struct sparc_stackf __user * | ||
389 | clone_stackframe(struct sparc_stackf __user *dst, | ||
390 | struct sparc_stackf __user *src) | ||
391 | { | ||
392 | unsigned long size, fp; | ||
393 | struct sparc_stackf *tmp; | ||
394 | struct sparc_stackf __user *sp; | ||
395 | |||
396 | if (get_user(tmp, &src->fp)) | ||
397 | return NULL; | ||
398 | |||
399 | fp = (unsigned long) tmp; | ||
400 | size = (fp - ((unsigned long) src)); | ||
401 | fp = (unsigned long) dst; | ||
402 | sp = (struct sparc_stackf __user *)(fp - size); | ||
403 | |||
404 | /* do_fork() grabs the parent semaphore, we must release it | ||
405 | * temporarily so we can build the child clone stack frame | ||
406 | * without deadlocking. | ||
407 | */ | ||
408 | if (__copy_user(sp, src, size)) | ||
409 | sp = NULL; | ||
410 | else if (put_user(fp, &sp->fp)) | ||
411 | sp = NULL; | ||
412 | |||
413 | return sp; | ||
414 | } | ||
415 | |||
416 | asmlinkage int sparc_do_fork(unsigned long clone_flags, | ||
417 | unsigned long stack_start, | ||
418 | struct pt_regs *regs, | ||
419 | unsigned long stack_size) | ||
420 | { | ||
421 | unsigned long parent_tid_ptr, child_tid_ptr; | ||
422 | unsigned long orig_i1 = regs->u_regs[UREG_I1]; | ||
423 | long ret; | ||
424 | |||
425 | parent_tid_ptr = regs->u_regs[UREG_I2]; | ||
426 | child_tid_ptr = regs->u_regs[UREG_I4]; | ||
427 | |||
428 | ret = do_fork(clone_flags, stack_start, | ||
429 | regs, stack_size, | ||
430 | (int __user *) parent_tid_ptr, | ||
431 | (int __user *) child_tid_ptr); | ||
432 | |||
433 | /* If we get an error and potentially restart the system | ||
434 | * call, we're screwed because copy_thread() clobbered | ||
435 | * the parent's %o1. So detect that case and restore it | ||
436 | * here. | ||
437 | */ | ||
438 | if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK) | ||
439 | regs->u_regs[UREG_I1] = orig_i1; | ||
440 | |||
441 | return ret; | ||
442 | } | ||
443 | |||
444 | /* Copy a Sparc thread. The fork() return value conventions | ||
445 | * under SunOS are nothing short of bletcherous: | ||
446 | * Parent --> %o0 == childs pid, %o1 == 0 | ||
447 | * Child --> %o0 == parents pid, %o1 == 1 | ||
448 | * | ||
449 | * NOTE: We have a separate fork kpsr/kwim because | ||
450 | * the parent could change these values between | ||
451 | * sys_fork invocation and when we reach here | ||
452 | * if the parent should sleep while trying to | ||
453 | * allocate the task_struct and kernel stack in | ||
454 | * do_fork(). | ||
455 | * XXX See comment above sys_vfork in sparc64. todo. | ||
456 | */ | ||
457 | extern void ret_from_fork(void); | ||
458 | |||
459 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | ||
460 | unsigned long unused, | ||
461 | struct task_struct *p, struct pt_regs *regs) | ||
462 | { | ||
463 | struct thread_info *ti = task_thread_info(p); | ||
464 | struct pt_regs *childregs; | ||
465 | char *new_stack; | ||
466 | |||
467 | #ifndef CONFIG_SMP | ||
468 | if(last_task_used_math == current) { | ||
469 | #else | ||
470 | if (test_thread_flag(TIF_USEDFPU)) { | ||
471 | #endif | ||
472 | put_psr(get_psr() | PSR_EF); | ||
473 | fpsave(&p->thread.float_regs[0], &p->thread.fsr, | ||
474 | &p->thread.fpqueue[0], &p->thread.fpqdepth); | ||
475 | #ifdef CONFIG_SMP | ||
476 | clear_thread_flag(TIF_USEDFPU); | ||
477 | #endif | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | * p->thread_info new_stack childregs | ||
482 | * ! ! ! {if(PSR_PS) } | ||
483 | * V V (stk.fr.) V (pt_regs) { (stk.fr.) } | ||
484 | * +----- - - - - - ------+===========+============={+==========}+ | ||
485 | */ | ||
486 | new_stack = task_stack_page(p) + THREAD_SIZE; | ||
487 | if (regs->psr & PSR_PS) | ||
488 | new_stack -= STACKFRAME_SZ; | ||
489 | new_stack -= STACKFRAME_SZ + TRACEREG_SZ; | ||
490 | memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); | ||
491 | childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ); | ||
492 | |||
493 | /* | ||
494 | * A new process must start with interrupts closed in 2.5, | ||
495 | * because this is how Mingo's scheduler works (see schedule_tail | ||
496 | * and finish_arch_switch). If we do not do it, a timer interrupt hits | ||
497 | * before we unlock, attempts to re-take the rq->lock, and then we die. | ||
498 | * Thus, kpsr|=PSR_PIL. | ||
499 | */ | ||
500 | ti->ksp = (unsigned long) new_stack; | ||
501 | ti->kpc = (((unsigned long) ret_from_fork) - 0x8); | ||
502 | ti->kpsr = current->thread.fork_kpsr | PSR_PIL; | ||
503 | ti->kwim = current->thread.fork_kwim; | ||
504 | |||
505 | if(regs->psr & PSR_PS) { | ||
506 | extern struct pt_regs fake_swapper_regs; | ||
507 | |||
508 | p->thread.kregs = &fake_swapper_regs; | ||
509 | new_stack += STACKFRAME_SZ + TRACEREG_SZ; | ||
510 | childregs->u_regs[UREG_FP] = (unsigned long) new_stack; | ||
511 | p->thread.flags |= SPARC_FLAG_KTHREAD; | ||
512 | p->thread.current_ds = KERNEL_DS; | ||
513 | memcpy(new_stack, (void *)regs->u_regs[UREG_FP], STACKFRAME_SZ); | ||
514 | childregs->u_regs[UREG_G6] = (unsigned long) ti; | ||
515 | } else { | ||
516 | p->thread.kregs = childregs; | ||
517 | childregs->u_regs[UREG_FP] = sp; | ||
518 | p->thread.flags &= ~SPARC_FLAG_KTHREAD; | ||
519 | p->thread.current_ds = USER_DS; | ||
520 | |||
521 | if (sp != regs->u_regs[UREG_FP]) { | ||
522 | struct sparc_stackf __user *childstack; | ||
523 | struct sparc_stackf __user *parentstack; | ||
524 | |||
525 | /* | ||
526 | * This is a clone() call with supplied user stack. | ||
527 | * Set some valid stack frames to give to the child. | ||
528 | */ | ||
529 | childstack = (struct sparc_stackf __user *) | ||
530 | (sp & ~0x7UL); | ||
531 | parentstack = (struct sparc_stackf __user *) | ||
532 | regs->u_regs[UREG_FP]; | ||
533 | |||
534 | #if 0 | ||
535 | printk("clone: parent stack:\n"); | ||
536 | show_stackframe(parentstack); | ||
537 | #endif | ||
538 | |||
539 | childstack = clone_stackframe(childstack, parentstack); | ||
540 | if (!childstack) | ||
541 | return -EFAULT; | ||
542 | |||
543 | #if 0 | ||
544 | printk("clone: child stack:\n"); | ||
545 | show_stackframe(childstack); | ||
546 | #endif | ||
547 | |||
548 | childregs->u_regs[UREG_FP] = (unsigned long)childstack; | ||
549 | } | ||
550 | } | ||
551 | |||
552 | #ifdef CONFIG_SMP | ||
553 | /* FPU must be disabled on SMP. */ | ||
554 | childregs->psr &= ~PSR_EF; | ||
555 | #endif | ||
556 | |||
557 | /* Set the return value for the child. */ | ||
558 | childregs->u_regs[UREG_I0] = current->pid; | ||
559 | childregs->u_regs[UREG_I1] = 1; | ||
560 | |||
561 | /* Set the return value for the parent. */ | ||
562 | regs->u_regs[UREG_I1] = 0; | ||
563 | |||
564 | if (clone_flags & CLONE_SETTLS) | ||
565 | childregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3]; | ||
566 | |||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | /* | ||
571 | * fill in the fpu structure for a core dump. | ||
572 | */ | ||
573 | int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) | ||
574 | { | ||
575 | if (used_math()) { | ||
576 | memset(fpregs, 0, sizeof(*fpregs)); | ||
577 | fpregs->pr_q_entrysize = 8; | ||
578 | return 1; | ||
579 | } | ||
580 | #ifdef CONFIG_SMP | ||
581 | if (test_thread_flag(TIF_USEDFPU)) { | ||
582 | put_psr(get_psr() | PSR_EF); | ||
583 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, | ||
584 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); | ||
585 | if (regs != NULL) { | ||
586 | regs->psr &= ~(PSR_EF); | ||
587 | clear_thread_flag(TIF_USEDFPU); | ||
588 | } | ||
589 | } | ||
590 | #else | ||
591 | if (current == last_task_used_math) { | ||
592 | put_psr(get_psr() | PSR_EF); | ||
593 | fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, | ||
594 | ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); | ||
595 | if (regs != NULL) { | ||
596 | regs->psr &= ~(PSR_EF); | ||
597 | last_task_used_math = NULL; | ||
598 | } | ||
599 | } | ||
600 | #endif | ||
601 | memcpy(&fpregs->pr_fr.pr_regs[0], | ||
602 | ¤t->thread.float_regs[0], | ||
603 | (sizeof(unsigned long) * 32)); | ||
604 | fpregs->pr_fsr = current->thread.fsr; | ||
605 | fpregs->pr_qcnt = current->thread.fpqdepth; | ||
606 | fpregs->pr_q_entrysize = 8; | ||
607 | fpregs->pr_en = 1; | ||
608 | if(fpregs->pr_qcnt != 0) { | ||
609 | memcpy(&fpregs->pr_q[0], | ||
610 | ¤t->thread.fpqueue[0], | ||
611 | sizeof(struct fpq) * fpregs->pr_qcnt); | ||
612 | } | ||
613 | /* Zero out the rest. */ | ||
614 | memset(&fpregs->pr_q[fpregs->pr_qcnt], 0, | ||
615 | sizeof(struct fpq) * (32 - fpregs->pr_qcnt)); | ||
616 | return 1; | ||
617 | } | ||
618 | |||
619 | /* | ||
620 | * sparc_execve() executes a new program after the asm stub has set | ||
621 | * things up for us. This should basically do what I want it to. | ||
622 | */ | ||
623 | asmlinkage int sparc_execve(struct pt_regs *regs) | ||
624 | { | ||
625 | int error, base = 0; | ||
626 | char *filename; | ||
627 | |||
628 | /* Check for indirect call. */ | ||
629 | if(regs->u_regs[UREG_G1] == 0) | ||
630 | base = 1; | ||
631 | |||
632 | filename = getname((char __user *)regs->u_regs[base + UREG_I0]); | ||
633 | error = PTR_ERR(filename); | ||
634 | if(IS_ERR(filename)) | ||
635 | goto out; | ||
636 | error = do_execve(filename, | ||
637 | (char __user * __user *)regs->u_regs[base + UREG_I1], | ||
638 | (char __user * __user *)regs->u_regs[base + UREG_I2], | ||
639 | regs); | ||
640 | putname(filename); | ||
641 | out: | ||
642 | return error; | ||
643 | } | ||
644 | |||
645 | /* | ||
646 | * This is the mechanism for creating a new kernel thread. | ||
647 | * | ||
648 | * NOTE! Only a kernel-only process(ie the swapper or direct descendants | ||
649 | * who haven't done an "execve()") should use this: it will work within | ||
650 | * a system call from a "real" process, but the process memory space will | ||
651 | * not be freed until both the parent and the child have exited. | ||
652 | */ | ||
653 | pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | ||
654 | { | ||
655 | long retval; | ||
656 | |||
657 | __asm__ __volatile__("mov %4, %%g2\n\t" /* Set aside fn ptr... */ | ||
658 | "mov %5, %%g3\n\t" /* and arg. */ | ||
659 | "mov %1, %%g1\n\t" | ||
660 | "mov %2, %%o0\n\t" /* Clone flags. */ | ||
661 | "mov 0, %%o1\n\t" /* usp arg == 0 */ | ||
662 | "t 0x10\n\t" /* Linux/Sparc clone(). */ | ||
663 | "cmp %%o1, 0\n\t" | ||
664 | "be 1f\n\t" /* The parent, just return. */ | ||
665 | " nop\n\t" /* Delay slot. */ | ||
666 | "jmpl %%g2, %%o7\n\t" /* Call the function. */ | ||
667 | " mov %%g3, %%o0\n\t" /* Get back the arg in delay. */ | ||
668 | "mov %3, %%g1\n\t" | ||
669 | "t 0x10\n\t" /* Linux/Sparc exit(). */ | ||
670 | /* Notreached by child. */ | ||
671 | "1: mov %%o0, %0\n\t" : | ||
672 | "=r" (retval) : | ||
673 | "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED), | ||
674 | "i" (__NR_exit), "r" (fn), "r" (arg) : | ||
675 | "g1", "g2", "g3", "o0", "o1", "memory", "cc"); | ||
676 | return retval; | ||
677 | } | ||
678 | |||
679 | unsigned long get_wchan(struct task_struct *task) | ||
680 | { | ||
681 | unsigned long pc, fp, bias = 0; | ||
682 | unsigned long task_base = (unsigned long) task; | ||
683 | unsigned long ret = 0; | ||
684 | struct reg_window *rw; | ||
685 | int count = 0; | ||
686 | |||
687 | if (!task || task == current || | ||
688 | task->state == TASK_RUNNING) | ||
689 | goto out; | ||
690 | |||
691 | fp = task_thread_info(task)->ksp + bias; | ||
692 | do { | ||
693 | /* Bogus frame pointer? */ | ||
694 | if (fp < (task_base + sizeof(struct thread_info)) || | ||
695 | fp >= (task_base + (2 * PAGE_SIZE))) | ||
696 | break; | ||
697 | rw = (struct reg_window *) fp; | ||
698 | pc = rw->ins[7]; | ||
699 | if (!in_sched_functions(pc)) { | ||
700 | ret = pc; | ||
701 | goto out; | ||
702 | } | ||
703 | fp = rw->ins[6] + bias; | ||
704 | } while (++count < 16); | ||
705 | |||
706 | out: | ||
707 | return ret; | ||
708 | } | ||
709 | |||