aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/process.c
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2008-12-03 06:11:52 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-04 12:17:21 -0500
commita88b5ba8bd8ac18aad65ee6c6a254e2e74876db3 (patch)
treeeb3d0ffaf53c3f7ec6083752c2097cecd1cb892a /arch/sparc64/kernel/process.c
parentd670bd4f803c8b646acd20f3ba21e65458293faf (diff)
sparc,sparc64: unify kernel/
o Move all files from sparc64/kernel/ to sparc/kernel - rename as appropriate o Update sparc/Makefile to the changes o Update sparc/kernel/Makefile to include the sparc64 files NOTE: This commit changes link order on sparc64! Link order had to change for either of sparc32 and sparc64. And assuming sparc64 see more testing than sparc32 change link order on sparc64 where issues will be caught faster. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/process.c')
-rw-r--r--arch/sparc64/kernel/process.c812
1 files changed, 0 insertions, 812 deletions
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
deleted file mode 100644
index d5e2acef9877..000000000000
--- a/arch/sparc64/kernel/process.c
+++ /dev/null
@@ -1,812 +0,0 @@
1/* arch/sparc64/kernel/process.c
2 *
3 * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8/*
9 * This file handles the architecture-dependent parts of process handling..
10 */
11
12#include <stdarg.h>
13
14#include <linux/errno.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/fs.h>
20#include <linux/smp.h>
21#include <linux/stddef.h>
22#include <linux/ptrace.h>
23#include <linux/slab.h>
24#include <linux/user.h>
25#include <linux/delay.h>
26#include <linux/compat.h>
27#include <linux/tick.h>
28#include <linux/init.h>
29#include <linux/cpu.h>
30#include <linux/elfcore.h>
31#include <linux/sysrq.h>
32
33#include <asm/uaccess.h>
34#include <asm/system.h>
35#include <asm/page.h>
36#include <asm/pgalloc.h>
37#include <asm/pgtable.h>
38#include <asm/processor.h>
39#include <asm/pstate.h>
40#include <asm/elf.h>
41#include <asm/fpumacro.h>
42#include <asm/head.h>
43#include <asm/cpudata.h>
44#include <asm/mmu_context.h>
45#include <asm/unistd.h>
46#include <asm/hypervisor.h>
47#include <asm/syscalls.h>
48#include <asm/irq_regs.h>
49#include <asm/smp.h>
50
51#include "kstack.h"
52
53static void sparc64_yield(int cpu)
54{
55 if (tlb_type != hypervisor)
56 return;
57
58 clear_thread_flag(TIF_POLLING_NRFLAG);
59 smp_mb__after_clear_bit();
60
61 while (!need_resched() && !cpu_is_offline(cpu)) {
62 unsigned long pstate;
63
64 /* Disable interrupts. */
65 __asm__ __volatile__(
66 "rdpr %%pstate, %0\n\t"
67 "andn %0, %1, %0\n\t"
68 "wrpr %0, %%g0, %%pstate"
69 : "=&r" (pstate)
70 : "i" (PSTATE_IE));
71
72 if (!need_resched() && !cpu_is_offline(cpu))
73 sun4v_cpu_yield();
74
75 /* Re-enable interrupts. */
76 __asm__ __volatile__(
77 "rdpr %%pstate, %0\n\t"
78 "or %0, %1, %0\n\t"
79 "wrpr %0, %%g0, %%pstate"
80 : "=&r" (pstate)
81 : "i" (PSTATE_IE));
82 }
83
84 set_thread_flag(TIF_POLLING_NRFLAG);
85}
86
87/* The idle loop on sparc64. */
88void cpu_idle(void)
89{
90 int cpu = smp_processor_id();
91
92 set_thread_flag(TIF_POLLING_NRFLAG);
93
94 while(1) {
95 tick_nohz_stop_sched_tick(1);
96
97 while (!need_resched() && !cpu_is_offline(cpu))
98 sparc64_yield(cpu);
99
100 tick_nohz_restart_sched_tick();
101
102 preempt_enable_no_resched();
103
104#ifdef CONFIG_HOTPLUG_CPU
105 if (cpu_is_offline(cpu))
106 cpu_play_dead();
107#endif
108
109 schedule();
110 preempt_disable();
111 }
112}
113
114#ifdef CONFIG_COMPAT
115static void show_regwindow32(struct pt_regs *regs)
116{
117 struct reg_window32 __user *rw;
118 struct reg_window32 r_w;
119 mm_segment_t old_fs;
120
121 __asm__ __volatile__ ("flushw");
122 rw = compat_ptr((unsigned)regs->u_regs[14]);
123 old_fs = get_fs();
124 set_fs (USER_DS);
125 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
126 set_fs (old_fs);
127 return;
128 }
129
130 set_fs (old_fs);
131 printk("l0: %08x l1: %08x l2: %08x l3: %08x "
132 "l4: %08x l5: %08x l6: %08x l7: %08x\n",
133 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
134 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
135 printk("i0: %08x i1: %08x i2: %08x i3: %08x "
136 "i4: %08x i5: %08x i6: %08x i7: %08x\n",
137 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
138 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
139}
140#else
141#define show_regwindow32(regs) do { } while (0)
142#endif
143
144static void show_regwindow(struct pt_regs *regs)
145{
146 struct reg_window __user *rw;
147 struct reg_window *rwk;
148 struct reg_window r_w;
149 mm_segment_t old_fs;
150
151 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
152 __asm__ __volatile__ ("flushw");
153 rw = (struct reg_window __user *)
154 (regs->u_regs[14] + STACK_BIAS);
155 rwk = (struct reg_window *)
156 (regs->u_regs[14] + STACK_BIAS);
157 if (!(regs->tstate & TSTATE_PRIV)) {
158 old_fs = get_fs();
159 set_fs (USER_DS);
160 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
161 set_fs (old_fs);
162 return;
163 }
164 rwk = &r_w;
165 set_fs (old_fs);
166 }
167 } else {
168 show_regwindow32(regs);
169 return;
170 }
171 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
172 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
173 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
174 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
175 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
176 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
177 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
178 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
179 if (regs->tstate & TSTATE_PRIV)
180 printk("I7: <%pS>\n", (void *) rwk->ins[7]);
181}
182
183void show_regs(struct pt_regs *regs)
184{
185 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
186 regs->tpc, regs->tnpc, regs->y, print_tainted());
187 printk("TPC: <%pS>\n", (void *) regs->tpc);
188 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
189 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
190 regs->u_regs[3]);
191 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
192 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
193 regs->u_regs[7]);
194 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
195 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
196 regs->u_regs[11]);
197 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
198 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
199 regs->u_regs[15]);
200 printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
201 show_regwindow(regs);
202}
203
204struct global_reg_snapshot global_reg_snapshot[NR_CPUS];
205static DEFINE_SPINLOCK(global_reg_snapshot_lock);
206
207static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
208 int this_cpu)
209{
210 flushw_all();
211
212 global_reg_snapshot[this_cpu].tstate = regs->tstate;
213 global_reg_snapshot[this_cpu].tpc = regs->tpc;
214 global_reg_snapshot[this_cpu].tnpc = regs->tnpc;
215 global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7];
216
217 if (regs->tstate & TSTATE_PRIV) {
218 struct reg_window *rw;
219
220 rw = (struct reg_window *)
221 (regs->u_regs[UREG_FP] + STACK_BIAS);
222 if (kstack_valid(tp, (unsigned long) rw)) {
223 global_reg_snapshot[this_cpu].i7 = rw->ins[7];
224 rw = (struct reg_window *)
225 (rw->ins[6] + STACK_BIAS);
226 if (kstack_valid(tp, (unsigned long) rw))
227 global_reg_snapshot[this_cpu].rpc = rw->ins[7];
228 }
229 } else {
230 global_reg_snapshot[this_cpu].i7 = 0;
231 global_reg_snapshot[this_cpu].rpc = 0;
232 }
233 global_reg_snapshot[this_cpu].thread = tp;
234}
235
236/* In order to avoid hangs we do not try to synchronize with the
237 * global register dump client cpus. The last store they make is to
238 * the thread pointer, so do a short poll waiting for that to become
239 * non-NULL.
240 */
241static void __global_reg_poll(struct global_reg_snapshot *gp)
242{
243 int limit = 0;
244
245 while (!gp->thread && ++limit < 100) {
246 barrier();
247 udelay(1);
248 }
249}
250
251void __trigger_all_cpu_backtrace(void)
252{
253 struct thread_info *tp = current_thread_info();
254 struct pt_regs *regs = get_irq_regs();
255 unsigned long flags;
256 int this_cpu, cpu;
257
258 if (!regs)
259 regs = tp->kregs;
260
261 spin_lock_irqsave(&global_reg_snapshot_lock, flags);
262
263 memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot));
264
265 this_cpu = raw_smp_processor_id();
266
267 __global_reg_self(tp, regs, this_cpu);
268
269 smp_fetch_global_regs();
270
271 for_each_online_cpu(cpu) {
272 struct global_reg_snapshot *gp = &global_reg_snapshot[cpu];
273
274 __global_reg_poll(gp);
275
276 tp = gp->thread;
277 printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n",
278 (cpu == this_cpu ? '*' : ' '), cpu,
279 gp->tstate, gp->tpc, gp->tnpc,
280 ((tp && tp->task) ? tp->task->comm : "NULL"),
281 ((tp && tp->task) ? tp->task->pid : -1));
282
283 if (gp->tstate & TSTATE_PRIV) {
284 printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
285 (void *) gp->tpc,
286 (void *) gp->o7,
287 (void *) gp->i7,
288 (void *) gp->rpc);
289 } else {
290 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
291 gp->tpc, gp->o7, gp->i7, gp->rpc);
292 }
293 }
294
295 memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot));
296
297 spin_unlock_irqrestore(&global_reg_snapshot_lock, flags);
298}
299
300#ifdef CONFIG_MAGIC_SYSRQ
301
302static void sysrq_handle_globreg(int key, struct tty_struct *tty)
303{
304 __trigger_all_cpu_backtrace();
305}
306
307static struct sysrq_key_op sparc_globalreg_op = {
308 .handler = sysrq_handle_globreg,
309 .help_msg = "Globalregs",
310 .action_msg = "Show Global CPU Regs",
311};
312
313static int __init sparc_globreg_init(void)
314{
315 return register_sysrq_key('y', &sparc_globalreg_op);
316}
317
318core_initcall(sparc_globreg_init);
319
320#endif
321
322unsigned long thread_saved_pc(struct task_struct *tsk)
323{
324 struct thread_info *ti = task_thread_info(tsk);
325 unsigned long ret = 0xdeadbeefUL;
326
327 if (ti && ti->ksp) {
328 unsigned long *sp;
329 sp = (unsigned long *)(ti->ksp + STACK_BIAS);
330 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
331 sp[14]) {
332 unsigned long *fp;
333 fp = (unsigned long *)(sp[14] + STACK_BIAS);
334 if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
335 ret = fp[15];
336 }
337 }
338 return ret;
339}
340
341/* Free current thread data structures etc.. */
342void exit_thread(void)
343{
344 struct thread_info *t = current_thread_info();
345
346 if (t->utraps) {
347 if (t->utraps[0] < 2)
348 kfree (t->utraps);
349 else
350 t->utraps[0]--;
351 }
352
353 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
354 t->user_cntd0 = t->user_cntd1 = NULL;
355 t->pcr_reg = 0;
356 write_pcr(0);
357 }
358}
359
360void flush_thread(void)
361{
362 struct thread_info *t = current_thread_info();
363 struct mm_struct *mm;
364
365 if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
366 clear_ti_thread_flag(t, TIF_ABI_PENDING);
367 if (test_ti_thread_flag(t, TIF_32BIT))
368 clear_ti_thread_flag(t, TIF_32BIT);
369 else
370 set_ti_thread_flag(t, TIF_32BIT);
371 }
372
373 mm = t->task->mm;
374 if (mm)
375 tsb_context_switch(mm);
376
377 set_thread_wsaved(0);
378
379 /* Turn off performance counters if on. */
380 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
381 t->user_cntd0 = t->user_cntd1 = NULL;
382 t->pcr_reg = 0;
383 write_pcr(0);
384 }
385
386 /* Clear FPU register state. */
387 t->fpsaved[0] = 0;
388
389 if (get_thread_current_ds() != ASI_AIUS)
390 set_fs(USER_DS);
391}
392
393/* It's a bit more tricky when 64-bit tasks are involved... */
394static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
395{
396 unsigned long fp, distance, rval;
397
398 if (!(test_thread_flag(TIF_32BIT))) {
399 csp += STACK_BIAS;
400 psp += STACK_BIAS;
401 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
402 fp += STACK_BIAS;
403 } else
404 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
405
406 /* Now 8-byte align the stack as this is mandatory in the
407 * Sparc ABI due to how register windows work. This hides
408 * the restriction from thread libraries etc. -DaveM
409 */
410 csp &= ~7UL;
411
412 distance = fp - psp;
413 rval = (csp - distance);
414 if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
415 rval = 0;
416 else if (test_thread_flag(TIF_32BIT)) {
417 if (put_user(((u32)csp),
418 &(((struct reg_window32 __user *)rval)->ins[6])))
419 rval = 0;
420 } else {
421 if (put_user(((u64)csp - STACK_BIAS),
422 &(((struct reg_window __user *)rval)->ins[6])))
423 rval = 0;
424 else
425 rval = rval - STACK_BIAS;
426 }
427
428 return rval;
429}
430
431/* Standard stuff. */
432static inline void shift_window_buffer(int first_win, int last_win,
433 struct thread_info *t)
434{
435 int i;
436
437 for (i = first_win; i < last_win; i++) {
438 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
439 memcpy(&t->reg_window[i], &t->reg_window[i+1],
440 sizeof(struct reg_window));
441 }
442}
443
444void synchronize_user_stack(void)
445{
446 struct thread_info *t = current_thread_info();
447 unsigned long window;
448
449 flush_user_windows();
450 if ((window = get_thread_wsaved()) != 0) {
451 int winsize = sizeof(struct reg_window);
452 int bias = 0;
453
454 if (test_thread_flag(TIF_32BIT))
455 winsize = sizeof(struct reg_window32);
456 else
457 bias = STACK_BIAS;
458
459 window -= 1;
460 do {
461 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
462 struct reg_window *rwin = &t->reg_window[window];
463
464 if (!copy_to_user((char __user *)sp, rwin, winsize)) {
465 shift_window_buffer(window, get_thread_wsaved() - 1, t);
466 set_thread_wsaved(get_thread_wsaved() - 1);
467 }
468 } while (window--);
469 }
470}
471
472static void stack_unaligned(unsigned long sp)
473{
474 siginfo_t info;
475
476 info.si_signo = SIGBUS;
477 info.si_errno = 0;
478 info.si_code = BUS_ADRALN;
479 info.si_addr = (void __user *) sp;
480 info.si_trapno = 0;
481 force_sig_info(SIGBUS, &info, current);
482}
483
484void fault_in_user_windows(void)
485{
486 struct thread_info *t = current_thread_info();
487 unsigned long window;
488 int winsize = sizeof(struct reg_window);
489 int bias = 0;
490
491 if (test_thread_flag(TIF_32BIT))
492 winsize = sizeof(struct reg_window32);
493 else
494 bias = STACK_BIAS;
495
496 flush_user_windows();
497 window = get_thread_wsaved();
498
499 if (likely(window != 0)) {
500 window -= 1;
501 do {
502 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
503 struct reg_window *rwin = &t->reg_window[window];
504
505 if (unlikely(sp & 0x7UL))
506 stack_unaligned(sp);
507
508 if (unlikely(copy_to_user((char __user *)sp,
509 rwin, winsize)))
510 goto barf;
511 } while (window--);
512 }
513 set_thread_wsaved(0);
514 return;
515
516barf:
517 set_thread_wsaved(window + 1);
518 do_exit(SIGILL);
519}
520
521asmlinkage long sparc_do_fork(unsigned long clone_flags,
522 unsigned long stack_start,
523 struct pt_regs *regs,
524 unsigned long stack_size)
525{
526 int __user *parent_tid_ptr, *child_tid_ptr;
527 unsigned long orig_i1 = regs->u_regs[UREG_I1];
528 long ret;
529
530#ifdef CONFIG_COMPAT
531 if (test_thread_flag(TIF_32BIT)) {
532 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
533 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
534 } else
535#endif
536 {
537 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
538 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
539 }
540
541 ret = do_fork(clone_flags, stack_start,
542 regs, stack_size,
543 parent_tid_ptr, child_tid_ptr);
544
545 /* If we get an error and potentially restart the system
546 * call, we're screwed because copy_thread() clobbered
547 * the parent's %o1. So detect that case and restore it
548 * here.
549 */
550 if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
551 regs->u_regs[UREG_I1] = orig_i1;
552
553 return ret;
554}
555
556/* Copy a Sparc thread. The fork() return value conventions
557 * under SunOS are nothing short of bletcherous:
558 * Parent --> %o0 == childs pid, %o1 == 0
559 * Child --> %o0 == parents pid, %o1 == 1
560 */
561int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
562 unsigned long unused,
563 struct task_struct *p, struct pt_regs *regs)
564{
565 struct thread_info *t = task_thread_info(p);
566 struct sparc_stackf *parent_sf;
567 unsigned long child_stack_sz;
568 char *child_trap_frame;
569 int kernel_thread;
570
571 kernel_thread = (regs->tstate & TSTATE_PRIV) ? 1 : 0;
572 parent_sf = ((struct sparc_stackf *) regs) - 1;
573
574 /* Calculate offset to stack_frame & pt_regs */
575 child_stack_sz = ((STACKFRAME_SZ + TRACEREG_SZ) +
576 (kernel_thread ? STACKFRAME_SZ : 0));
577 child_trap_frame = (task_stack_page(p) +
578 (THREAD_SIZE - child_stack_sz));
579 memcpy(child_trap_frame, parent_sf, child_stack_sz);
580
581 t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) |
582 (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
583 (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
584 t->new_child = 1;
585 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
586 t->kregs = (struct pt_regs *) (child_trap_frame +
587 sizeof(struct sparc_stackf));
588 t->fpsaved[0] = 0;
589
590 if (kernel_thread) {
591 struct sparc_stackf *child_sf = (struct sparc_stackf *)
592 (child_trap_frame + (STACKFRAME_SZ + TRACEREG_SZ));
593
594 /* Zero terminate the stack backtrace. */
595 child_sf->fp = NULL;
596 t->kregs->u_regs[UREG_FP] =
597 ((unsigned long) child_sf) - STACK_BIAS;
598
599 /* Special case, if we are spawning a kernel thread from
600 * a userspace task (usermode helper, NFS or similar), we
601 * must disable performance counters in the child because
602 * the address space and protection realm are changing.
603 */
604 if (t->flags & _TIF_PERFCTR) {
605 t->user_cntd0 = t->user_cntd1 = NULL;
606 t->pcr_reg = 0;
607 t->flags &= ~_TIF_PERFCTR;
608 }
609 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
610 t->kregs->u_regs[UREG_G6] = (unsigned long) t;
611 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
612 } else {
613 if (t->flags & _TIF_32BIT) {
614 sp &= 0x00000000ffffffffUL;
615 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
616 }
617 t->kregs->u_regs[UREG_FP] = sp;
618 t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT);
619 if (sp != regs->u_regs[UREG_FP]) {
620 unsigned long csp;
621
622 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
623 if (!csp)
624 return -EFAULT;
625 t->kregs->u_regs[UREG_FP] = csp;
626 }
627 if (t->utraps)
628 t->utraps[0]++;
629 }
630
631 /* Set the return value for the child. */
632 t->kregs->u_regs[UREG_I0] = current->pid;
633 t->kregs->u_regs[UREG_I1] = 1;
634
635 /* Set the second return value for the parent. */
636 regs->u_regs[UREG_I1] = 0;
637
638 if (clone_flags & CLONE_SETTLS)
639 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
640
641 return 0;
642}
643
644/*
645 * This is the mechanism for creating a new kernel thread.
646 *
647 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
648 * who haven't done an "execve()") should use this: it will work within
649 * a system call from a "real" process, but the process memory space will
650 * not be freed until both the parent and the child have exited.
651 */
652pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
653{
654 long retval;
655
656 /* If the parent runs before fn(arg) is called by the child,
657 * the input registers of this function can be clobbered.
658 * So we stash 'fn' and 'arg' into global registers which
659 * will not be modified by the parent.
660 */
661 __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */
662 "mov %5, %%g3\n\t" /* Save ARG into global */
663 "mov %1, %%g1\n\t" /* Clone syscall nr. */
664 "mov %2, %%o0\n\t" /* Clone flags. */
665 "mov 0, %%o1\n\t" /* usp arg == 0 */
666 "t 0x6d\n\t" /* Linux/Sparc clone(). */
667 "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
668 " mov %%o0, %0\n\t"
669 "jmpl %%g2, %%o7\n\t" /* Call the function. */
670 " mov %%g3, %%o0\n\t" /* Set arg in delay. */
671 "mov %3, %%g1\n\t"
672 "t 0x6d\n\t" /* Linux/Sparc exit(). */
673 /* Notreached by child. */
674 "1:" :
675 "=r" (retval) :
676 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
677 "i" (__NR_exit), "r" (fn), "r" (arg) :
678 "g1", "g2", "g3", "o0", "o1", "memory", "cc");
679 return retval;
680}
681
682typedef struct {
683 union {
684 unsigned int pr_regs[32];
685 unsigned long pr_dregs[16];
686 } pr_fr;
687 unsigned int __unused;
688 unsigned int pr_fsr;
689 unsigned char pr_qcnt;
690 unsigned char pr_q_entrysize;
691 unsigned char pr_en;
692 unsigned int pr_q[64];
693} elf_fpregset_t32;
694
695/*
696 * fill in the fpu structure for a core dump.
697 */
698int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
699{
700 unsigned long *kfpregs = current_thread_info()->fpregs;
701 unsigned long fprs = current_thread_info()->fpsaved[0];
702
703 if (test_thread_flag(TIF_32BIT)) {
704 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
705
706 if (fprs & FPRS_DL)
707 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
708 sizeof(unsigned int) * 32);
709 else
710 memset(&fpregs32->pr_fr.pr_regs[0], 0,
711 sizeof(unsigned int) * 32);
712 fpregs32->pr_qcnt = 0;
713 fpregs32->pr_q_entrysize = 8;
714 memset(&fpregs32->pr_q[0], 0,
715 (sizeof(unsigned int) * 64));
716 if (fprs & FPRS_FEF) {
717 fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
718 fpregs32->pr_en = 1;
719 } else {
720 fpregs32->pr_fsr = 0;
721 fpregs32->pr_en = 0;
722 }
723 } else {
724 if(fprs & FPRS_DL)
725 memcpy(&fpregs->pr_regs[0], kfpregs,
726 sizeof(unsigned int) * 32);
727 else
728 memset(&fpregs->pr_regs[0], 0,
729 sizeof(unsigned int) * 32);
730 if(fprs & FPRS_DU)
731 memcpy(&fpregs->pr_regs[16], kfpregs+16,
732 sizeof(unsigned int) * 32);
733 else
734 memset(&fpregs->pr_regs[16], 0,
735 sizeof(unsigned int) * 32);
736 if(fprs & FPRS_FEF) {
737 fpregs->pr_fsr = current_thread_info()->xfsr[0];
738 fpregs->pr_gsr = current_thread_info()->gsr[0];
739 } else {
740 fpregs->pr_fsr = fpregs->pr_gsr = 0;
741 }
742 fpregs->pr_fprs = fprs;
743 }
744 return 1;
745}
746
747/*
748 * sparc_execve() executes a new program after the asm stub has set
749 * things up for us. This should basically do what I want it to.
750 */
751asmlinkage int sparc_execve(struct pt_regs *regs)
752{
753 int error, base = 0;
754 char *filename;
755
756 /* User register window flush is done by entry.S */
757
758 /* Check for indirect call. */
759 if (regs->u_regs[UREG_G1] == 0)
760 base = 1;
761
762 filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
763 error = PTR_ERR(filename);
764 if (IS_ERR(filename))
765 goto out;
766 error = do_execve(filename,
767 (char __user * __user *)
768 regs->u_regs[base + UREG_I1],
769 (char __user * __user *)
770 regs->u_regs[base + UREG_I2], regs);
771 putname(filename);
772 if (!error) {
773 fprs_write(0);
774 current_thread_info()->xfsr[0] = 0;
775 current_thread_info()->fpsaved[0] = 0;
776 regs->tstate &= ~TSTATE_PEF;
777 }
778out:
779 return error;
780}
781
782unsigned long get_wchan(struct task_struct *task)
783{
784 unsigned long pc, fp, bias = 0;
785 struct thread_info *tp;
786 struct reg_window *rw;
787 unsigned long ret = 0;
788 int count = 0;
789
790 if (!task || task == current ||
791 task->state == TASK_RUNNING)
792 goto out;
793
794 tp = task_thread_info(task);
795 bias = STACK_BIAS;
796 fp = task_thread_info(task)->ksp + bias;
797
798 do {
799 if (!kstack_valid(tp, fp))
800 break;
801 rw = (struct reg_window *) fp;
802 pc = rw->ins[7];
803 if (!in_sched_functions(pc)) {
804 ret = pc;
805 goto out;
806 }
807 fp = rw->ins[6] + bias;
808 } while (++count < 16);
809
810out:
811 return ret;
812}