diff options
author | David S. Miller <davem@davemloft.net> | 2008-05-20 02:46:00 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-05-20 03:33:45 -0400 |
commit | 93dae5b70e7c1c8e927d22e1c20a941ca376906a (patch) | |
tree | f255087706b9d176455b17e4384f4632f59f4cde /arch | |
parent | 88278ca27a43ae503572b52ea2c171fbf45db5a2 (diff) |
sparc64: Add global register dumping facility.
When a cpu really is stuck in the kernel, it can be often
impossible to figure out which cpu is stuck where. The
worst case is when the stuck cpu has interrupts disabled.
Therefore, implement a global cpu state capture that uses
SMP message interrupts which are not disabled by the
normal IRQ enable/disable APIs of the kernel.
As long as we can get a sysrq 'y' to the kernel, we can
get a dump. Even if the console interrupt cpu is wedged,
we can trigger it from userspace using /proc/sysrq-trigger
The output is made compact so that this facility is more
useful on high cpu count systems, which is where this
facility will likely find itself the most useful :)
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc64/kernel/process.c | 117 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 10 | ||||
-rw-r--r-- | arch/sparc64/mm/ultra.S | 29 |
3 files changed, 154 insertions, 2 deletions
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 4129c0449856..0a0c05fc3a33 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* arch/sparc64/kernel/process.c | 1 | /* arch/sparc64/kernel/process.c |
2 | * | 2 | * |
3 | * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net) |
4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | 4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) |
5 | * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 5 | * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
6 | */ | 6 | */ |
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/cpu.h> | 31 | #include <linux/cpu.h> |
32 | #include <linux/elfcore.h> | 32 | #include <linux/elfcore.h> |
33 | #include <linux/sysrq.h> | ||
33 | 34 | ||
34 | #include <asm/oplib.h> | 35 | #include <asm/oplib.h> |
35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
@@ -49,6 +50,8 @@ | |||
49 | #include <asm/sstate.h> | 50 | #include <asm/sstate.h> |
50 | #include <asm/reboot.h> | 51 | #include <asm/reboot.h> |
51 | #include <asm/syscalls.h> | 52 | #include <asm/syscalls.h> |
53 | #include <asm/irq_regs.h> | ||
54 | #include <asm/smp.h> | ||
52 | 55 | ||
53 | /* #define VERBOSE_SHOWREGS */ | 56 | /* #define VERBOSE_SHOWREGS */ |
54 | 57 | ||
@@ -298,6 +301,118 @@ void show_regs(struct pt_regs *regs) | |||
298 | #endif | 301 | #endif |
299 | } | 302 | } |
300 | 303 | ||
304 | #ifdef CONFIG_MAGIC_SYSRQ | ||
305 | struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; | ||
306 | static DEFINE_SPINLOCK(global_reg_snapshot_lock); | ||
307 | |||
308 | static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, | ||
309 | int this_cpu) | ||
310 | { | ||
311 | flushw_all(); | ||
312 | |||
313 | global_reg_snapshot[this_cpu].tstate = regs->tstate; | ||
314 | global_reg_snapshot[this_cpu].tpc = regs->tpc; | ||
315 | global_reg_snapshot[this_cpu].tnpc = regs->tnpc; | ||
316 | global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7]; | ||
317 | |||
318 | if (regs->tstate & TSTATE_PRIV) { | ||
319 | struct reg_window *rw; | ||
320 | |||
321 | rw = (struct reg_window *) | ||
322 | (regs->u_regs[UREG_FP] + STACK_BIAS); | ||
323 | global_reg_snapshot[this_cpu].i7 = rw->ins[6]; | ||
324 | } else | ||
325 | global_reg_snapshot[this_cpu].i7 = 0; | ||
326 | |||
327 | global_reg_snapshot[this_cpu].thread = tp; | ||
328 | } | ||
329 | |||
330 | /* In order to avoid hangs we do not try to synchronize with the | ||
331 | * global register dump client cpus. The last store they make is to | ||
332 | * the thread pointer, so do a short poll waiting for that to become | ||
333 | * non-NULL. | ||
334 | */ | ||
335 | static void __global_reg_poll(struct global_reg_snapshot *gp) | ||
336 | { | ||
337 | int limit = 0; | ||
338 | |||
339 | while (!gp->thread && ++limit < 100) { | ||
340 | barrier(); | ||
341 | udelay(1); | ||
342 | } | ||
343 | } | ||
344 | |||
345 | static void sysrq_handle_globreg(int key, struct tty_struct *tty) | ||
346 | { | ||
347 | struct thread_info *tp = current_thread_info(); | ||
348 | struct pt_regs *regs = get_irq_regs(); | ||
349 | #ifdef CONFIG_KALLSYMS | ||
350 | char buffer[KSYM_SYMBOL_LEN]; | ||
351 | #endif | ||
352 | unsigned long flags; | ||
353 | int this_cpu, cpu; | ||
354 | |||
355 | if (!regs) | ||
356 | regs = tp->kregs; | ||
357 | |||
358 | spin_lock_irqsave(&global_reg_snapshot_lock, flags); | ||
359 | |||
360 | memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); | ||
361 | |||
362 | this_cpu = raw_smp_processor_id(); | ||
363 | |||
364 | __global_reg_self(tp, regs, this_cpu); | ||
365 | |||
366 | smp_fetch_global_regs(); | ||
367 | |||
368 | for_each_online_cpu(cpu) { | ||
369 | struct global_reg_snapshot *gp = &global_reg_snapshot[cpu]; | ||
370 | struct thread_info *tp; | ||
371 | |||
372 | __global_reg_poll(gp); | ||
373 | |||
374 | tp = gp->thread; | ||
375 | printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n", | ||
376 | (cpu == this_cpu ? '*' : ' '), cpu, | ||
377 | gp->tstate, gp->tpc, gp->tnpc, | ||
378 | ((tp && tp->task) ? tp->task->comm : "NULL"), | ||
379 | ((tp && tp->task) ? tp->task->pid : -1)); | ||
380 | #ifdef CONFIG_KALLSYMS | ||
381 | if (gp->tstate & TSTATE_PRIV) { | ||
382 | sprint_symbol(buffer, gp->tpc); | ||
383 | printk(" TPC[%s] ", buffer); | ||
384 | sprint_symbol(buffer, gp->o7); | ||
385 | printk("O7[%s] ", buffer); | ||
386 | sprint_symbol(buffer, gp->i7); | ||
387 | printk("I7[%s]\n", buffer); | ||
388 | } else | ||
389 | #endif | ||
390 | { | ||
391 | printk(" TPC[%lx] O7[%lx] I7[%lx]\n", | ||
392 | gp->tpc, gp->o7, gp->i7); | ||
393 | } | ||
394 | } | ||
395 | |||
396 | memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); | ||
397 | |||
398 | spin_unlock_irqrestore(&global_reg_snapshot_lock, flags); | ||
399 | } | ||
400 | |||
401 | static struct sysrq_key_op sparc_globalreg_op = { | ||
402 | .handler = sysrq_handle_globreg, | ||
403 | .help_msg = "Globalregs", | ||
404 | .action_msg = "Show Global CPU Regs", | ||
405 | }; | ||
406 | |||
407 | static int __init sparc_globreg_init(void) | ||
408 | { | ||
409 | return register_sysrq_key('y', &sparc_globalreg_op); | ||
410 | } | ||
411 | |||
412 | core_initcall(sparc_globreg_init); | ||
413 | |||
414 | #endif | ||
415 | |||
301 | unsigned long thread_saved_pc(struct task_struct *tsk) | 416 | unsigned long thread_saved_pc(struct task_struct *tsk) |
302 | { | 417 | { |
303 | struct thread_info *ti = task_thread_info(tsk); | 418 | struct thread_info *ti = task_thread_info(tsk); |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 0d6403a630ac..fa63c68a1819 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -900,6 +900,9 @@ extern unsigned long xcall_flush_tlb_mm; | |||
900 | extern unsigned long xcall_flush_tlb_pending; | 900 | extern unsigned long xcall_flush_tlb_pending; |
901 | extern unsigned long xcall_flush_tlb_kernel_range; | 901 | extern unsigned long xcall_flush_tlb_kernel_range; |
902 | extern unsigned long xcall_report_regs; | 902 | extern unsigned long xcall_report_regs; |
903 | #ifdef CONFIG_MAGIC_SYSRQ | ||
904 | extern unsigned long xcall_fetch_glob_regs; | ||
905 | #endif | ||
903 | extern unsigned long xcall_receive_signal; | 906 | extern unsigned long xcall_receive_signal; |
904 | extern unsigned long xcall_new_mmu_context_version; | 907 | extern unsigned long xcall_new_mmu_context_version; |
905 | #ifdef CONFIG_KGDB | 908 | #ifdef CONFIG_KGDB |
@@ -1080,6 +1083,13 @@ void smp_report_regs(void) | |||
1080 | smp_cross_call(&xcall_report_regs, 0, 0, 0); | 1083 | smp_cross_call(&xcall_report_regs, 0, 0, 0); |
1081 | } | 1084 | } |
1082 | 1085 | ||
1086 | #ifdef CONFIG_MAGIC_SYSRQ | ||
1087 | void smp_fetch_global_regs(void) | ||
1088 | { | ||
1089 | smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); | ||
1090 | } | ||
1091 | #endif | ||
1092 | |||
1083 | /* We know that the window frames of the user have been flushed | 1093 | /* We know that the window frames of the user have been flushed |
1084 | * to the stack before we get here because all callers of us | 1094 | * to the stack before we get here because all callers of us |
1085 | * are flush_tlb_*() routines, and these run after flush_cache_*() | 1095 | * are flush_tlb_*() routines, and these run after flush_cache_*() |
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 15d124963f68..9bb2d90a9df6 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * ultra.S: Don't expand these all over the place... | 2 | * ultra.S: Don't expand these all over the place... |
3 | * | 3 | * |
4 | * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com) | 4 | * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net) |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <asm/asi.h> | 7 | #include <asm/asi.h> |
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/thread_info.h> | 15 | #include <asm/thread_info.h> |
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | #include <asm/hypervisor.h> | 17 | #include <asm/hypervisor.h> |
18 | #include <asm/cpudata.h> | ||
18 | 19 | ||
19 | /* Basically, most of the Spitfire vs. Cheetah madness | 20 | /* Basically, most of the Spitfire vs. Cheetah madness |
20 | * has to do with the fact that Cheetah does not support | 21 | * has to do with the fact that Cheetah does not support |
@@ -514,6 +515,32 @@ xcall_report_regs: | |||
514 | b rtrap_xcall | 515 | b rtrap_xcall |
515 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | 516 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 |
516 | 517 | ||
518 | #ifdef CONFIG_MAGIC_SYSRQ | ||
519 | .globl xcall_fetch_glob_regs | ||
520 | xcall_fetch_glob_regs: | ||
521 | sethi %hi(global_reg_snapshot), %g1 | ||
522 | or %g1, %lo(global_reg_snapshot), %g1 | ||
523 | __GET_CPUID(%g2) | ||
524 | sllx %g2, 6, %g3 | ||
525 | add %g1, %g3, %g1 | ||
526 | rdpr %tstate, %g7 | ||
527 | stx %g7, [%g1 + GR_SNAP_TSTATE] | ||
528 | rdpr %tpc, %g7 | ||
529 | stx %g7, [%g1 + GR_SNAP_TPC] | ||
530 | rdpr %tnpc, %g7 | ||
531 | stx %g7, [%g1 + GR_SNAP_TNPC] | ||
532 | stx %o7, [%g1 + GR_SNAP_O7] | ||
533 | stx %i7, [%g1 + GR_SNAP_I7] | ||
534 | sethi %hi(trap_block), %g7 | ||
535 | or %g7, %lo(trap_block), %g7 | ||
536 | sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2 | ||
537 | add %g7, %g2, %g7 | ||
538 | ldx [%g7 + TRAP_PER_CPU_THREAD], %g3 | ||
539 | membar #StoreStore | ||
540 | stx %g3, [%g1 + GR_SNAP_THREAD] | ||
541 | retry | ||
542 | #endif /* CONFIG_MAGIC_SYSRQ */ | ||
543 | |||
517 | #ifdef DCACHE_ALIASING_POSSIBLE | 544 | #ifdef DCACHE_ALIASING_POSSIBLE |
518 | .align 32 | 545 | .align 32 |
519 | .globl xcall_flush_dcache_page_cheetah | 546 | .globl xcall_flush_dcache_page_cheetah |