diff options
Diffstat (limited to 'arch/sparc64/kernel/process.c')
| -rw-r--r-- | arch/sparc64/kernel/process.c | 153 |
1 files changed, 141 insertions, 12 deletions
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 4129c0449856..2084f81a76e1 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* arch/sparc64/kernel/process.c | 1 | /* arch/sparc64/kernel/process.c |
| 2 | * | 2 | * |
| 3 | * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net) |
| 4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | 4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) |
| 5 | * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 5 | * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
| 6 | */ | 6 | */ |
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
| 31 | #include <linux/cpu.h> | 31 | #include <linux/cpu.h> |
| 32 | #include <linux/elfcore.h> | 32 | #include <linux/elfcore.h> |
| 33 | #include <linux/sysrq.h> | ||
| 33 | 34 | ||
| 34 | #include <asm/oplib.h> | 35 | #include <asm/oplib.h> |
| 35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
| @@ -49,6 +50,8 @@ | |||
| 49 | #include <asm/sstate.h> | 50 | #include <asm/sstate.h> |
| 50 | #include <asm/reboot.h> | 51 | #include <asm/reboot.h> |
| 51 | #include <asm/syscalls.h> | 52 | #include <asm/syscalls.h> |
| 53 | #include <asm/irq_regs.h> | ||
| 54 | #include <asm/smp.h> | ||
| 52 | 55 | ||
| 53 | /* #define VERBOSE_SHOWREGS */ | 56 | /* #define VERBOSE_SHOWREGS */ |
| 54 | 57 | ||
| @@ -298,6 +301,118 @@ void show_regs(struct pt_regs *regs) | |||
| 298 | #endif | 301 | #endif |
| 299 | } | 302 | } |
| 300 | 303 | ||
| 304 | #ifdef CONFIG_MAGIC_SYSRQ | ||
| 305 | struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; | ||
| 306 | static DEFINE_SPINLOCK(global_reg_snapshot_lock); | ||
| 307 | |||
| 308 | static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, | ||
| 309 | int this_cpu) | ||
| 310 | { | ||
| 311 | flushw_all(); | ||
| 312 | |||
| 313 | global_reg_snapshot[this_cpu].tstate = regs->tstate; | ||
| 314 | global_reg_snapshot[this_cpu].tpc = regs->tpc; | ||
| 315 | global_reg_snapshot[this_cpu].tnpc = regs->tnpc; | ||
| 316 | global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7]; | ||
| 317 | |||
| 318 | if (regs->tstate & TSTATE_PRIV) { | ||
| 319 | struct reg_window *rw; | ||
| 320 | |||
| 321 | rw = (struct reg_window *) | ||
| 322 | (regs->u_regs[UREG_FP] + STACK_BIAS); | ||
| 323 | global_reg_snapshot[this_cpu].i7 = rw->ins[6]; | ||
| 324 | } else | ||
| 325 | global_reg_snapshot[this_cpu].i7 = 0; | ||
| 326 | |||
| 327 | global_reg_snapshot[this_cpu].thread = tp; | ||
| 328 | } | ||
| 329 | |||
| 330 | /* In order to avoid hangs we do not try to synchronize with the | ||
| 331 | * global register dump client cpus. The last store they make is to | ||
| 332 | * the thread pointer, so do a short poll waiting for that to become | ||
| 333 | * non-NULL. | ||
| 334 | */ | ||
| 335 | static void __global_reg_poll(struct global_reg_snapshot *gp) | ||
| 336 | { | ||
| 337 | int limit = 0; | ||
| 338 | |||
| 339 | while (!gp->thread && ++limit < 100) { | ||
| 340 | barrier(); | ||
| 341 | udelay(1); | ||
| 342 | } | ||
| 343 | } | ||
| 344 | |||
| 345 | static void sysrq_handle_globreg(int key, struct tty_struct *tty) | ||
| 346 | { | ||
| 347 | struct thread_info *tp = current_thread_info(); | ||
| 348 | struct pt_regs *regs = get_irq_regs(); | ||
| 349 | #ifdef CONFIG_KALLSYMS | ||
| 350 | char buffer[KSYM_SYMBOL_LEN]; | ||
| 351 | #endif | ||
| 352 | unsigned long flags; | ||
| 353 | int this_cpu, cpu; | ||
| 354 | |||
| 355 | if (!regs) | ||
| 356 | regs = tp->kregs; | ||
| 357 | |||
| 358 | spin_lock_irqsave(&global_reg_snapshot_lock, flags); | ||
| 359 | |||
| 360 | memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); | ||
| 361 | |||
| 362 | this_cpu = raw_smp_processor_id(); | ||
| 363 | |||
| 364 | __global_reg_self(tp, regs, this_cpu); | ||
| 365 | |||
| 366 | smp_fetch_global_regs(); | ||
| 367 | |||
| 368 | for_each_online_cpu(cpu) { | ||
| 369 | struct global_reg_snapshot *gp = &global_reg_snapshot[cpu]; | ||
| 370 | struct thread_info *tp; | ||
| 371 | |||
| 372 | __global_reg_poll(gp); | ||
| 373 | |||
| 374 | tp = gp->thread; | ||
| 375 | printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n", | ||
| 376 | (cpu == this_cpu ? '*' : ' '), cpu, | ||
| 377 | gp->tstate, gp->tpc, gp->tnpc, | ||
| 378 | ((tp && tp->task) ? tp->task->comm : "NULL"), | ||
| 379 | ((tp && tp->task) ? tp->task->pid : -1)); | ||
| 380 | #ifdef CONFIG_KALLSYMS | ||
| 381 | if (gp->tstate & TSTATE_PRIV) { | ||
| 382 | sprint_symbol(buffer, gp->tpc); | ||
| 383 | printk(" TPC[%s] ", buffer); | ||
| 384 | sprint_symbol(buffer, gp->o7); | ||
| 385 | printk("O7[%s] ", buffer); | ||
| 386 | sprint_symbol(buffer, gp->i7); | ||
| 387 | printk("I7[%s]\n", buffer); | ||
| 388 | } else | ||
| 389 | #endif | ||
| 390 | { | ||
| 391 | printk(" TPC[%lx] O7[%lx] I7[%lx]\n", | ||
| 392 | gp->tpc, gp->o7, gp->i7); | ||
| 393 | } | ||
| 394 | } | ||
| 395 | |||
| 396 | memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); | ||
| 397 | |||
| 398 | spin_unlock_irqrestore(&global_reg_snapshot_lock, flags); | ||
| 399 | } | ||
| 400 | |||
| 401 | static struct sysrq_key_op sparc_globalreg_op = { | ||
| 402 | .handler = sysrq_handle_globreg, | ||
| 403 | .help_msg = "Globalregs", | ||
| 404 | .action_msg = "Show Global CPU Regs", | ||
| 405 | }; | ||
| 406 | |||
| 407 | static int __init sparc_globreg_init(void) | ||
| 408 | { | ||
| 409 | return register_sysrq_key('y', &sparc_globalreg_op); | ||
| 410 | } | ||
| 411 | |||
| 412 | core_initcall(sparc_globreg_init); | ||
| 413 | |||
| 414 | #endif | ||
| 415 | |||
| 301 | unsigned long thread_saved_pc(struct task_struct *tsk) | 416 | unsigned long thread_saved_pc(struct task_struct *tsk) |
| 302 | { | 417 | { |
| 303 | struct thread_info *ti = task_thread_info(tsk); | 418 | struct thread_info *ti = task_thread_info(tsk); |
| @@ -542,20 +657,39 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
| 542 | struct task_struct *p, struct pt_regs *regs) | 657 | struct task_struct *p, struct pt_regs *regs) |
| 543 | { | 658 | { |
| 544 | struct thread_info *t = task_thread_info(p); | 659 | struct thread_info *t = task_thread_info(p); |
| 660 | struct sparc_stackf *parent_sf; | ||
| 661 | unsigned long child_stack_sz; | ||
| 545 | char *child_trap_frame; | 662 | char *child_trap_frame; |
| 663 | int kernel_thread; | ||
| 546 | 664 | ||
| 547 | /* Calculate offset to stack_frame & pt_regs */ | 665 | kernel_thread = (regs->tstate & TSTATE_PRIV) ? 1 : 0; |
| 548 | child_trap_frame = task_stack_page(p) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ)); | 666 | parent_sf = ((struct sparc_stackf *) regs) - 1; |
| 549 | memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ)); | ||
| 550 | 667 | ||
| 551 | t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | | 668 | /* Calculate offset to stack_frame & pt_regs */ |
| 669 | child_stack_sz = ((STACKFRAME_SZ + TRACEREG_SZ) + | ||
| 670 | (kernel_thread ? STACKFRAME_SZ : 0)); | ||
| 671 | child_trap_frame = (task_stack_page(p) + | ||
| 672 | (THREAD_SIZE - child_stack_sz)); | ||
| 673 | memcpy(child_trap_frame, parent_sf, child_stack_sz); | ||
| 674 | |||
| 675 | t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | | ||
| 676 | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | | ||
| 552 | (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT); | 677 | (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT); |
| 553 | t->new_child = 1; | 678 | t->new_child = 1; |
| 554 | t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; | 679 | t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; |
| 555 | t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf)); | 680 | t->kregs = (struct pt_regs *) (child_trap_frame + |
| 681 | sizeof(struct sparc_stackf)); | ||
| 556 | t->fpsaved[0] = 0; | 682 | t->fpsaved[0] = 0; |
| 557 | 683 | ||
| 558 | if (regs->tstate & TSTATE_PRIV) { | 684 | if (kernel_thread) { |
| 685 | struct sparc_stackf *child_sf = (struct sparc_stackf *) | ||
| 686 | (child_trap_frame + (STACKFRAME_SZ + TRACEREG_SZ)); | ||
| 687 | |||
| 688 | /* Zero terminate the stack backtrace. */ | ||
| 689 | child_sf->fp = NULL; | ||
| 690 | t->kregs->u_regs[UREG_FP] = | ||
| 691 | ((unsigned long) child_sf) - STACK_BIAS; | ||
| 692 | |||
| 559 | /* Special case, if we are spawning a kernel thread from | 693 | /* Special case, if we are spawning a kernel thread from |
| 560 | * a userspace task (via KMOD, NFS, or similar) we must | 694 | * a userspace task (via KMOD, NFS, or similar) we must |
| 561 | * disable performance counters in the child because the | 695 | * disable performance counters in the child because the |
| @@ -566,12 +700,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
| 566 | t->pcr_reg = 0; | 700 | t->pcr_reg = 0; |
| 567 | t->flags &= ~_TIF_PERFCTR; | 701 | t->flags &= ~_TIF_PERFCTR; |
| 568 | } | 702 | } |
| 569 | t->kregs->u_regs[UREG_FP] = t->ksp; | ||
| 570 | t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); | 703 | t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); |
| 571 | flush_register_windows(); | ||
| 572 | memcpy((void *)(t->ksp + STACK_BIAS), | ||
| 573 | (void *)(regs->u_regs[UREG_FP] + STACK_BIAS), | ||
| 574 | sizeof(struct sparc_stackf)); | ||
| 575 | t->kregs->u_regs[UREG_G6] = (unsigned long) t; | 704 | t->kregs->u_regs[UREG_G6] = (unsigned long) t; |
| 576 | t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; | 705 | t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; |
| 577 | } else { | 706 | } else { |
