aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/stack.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-08-06 16:10:23 -0400
committerChris Metcalf <cmetcalf@tilera.com>2013-08-13 16:04:14 -0400
commit3ef23111546df9e9dab2e2befb412a9563db0628 (patch)
treeec72fa0e8d5d22a1390a486e1bf9308c4cbffd34 /arch/tile/kernel/stack.c
parent2f9ac29eec71a696cb0dcc5fb82c0f8d4dac28c9 (diff)
tile: avoid recursive backtrace faults
This change adds support for avoiding recursive backtracer crashes; we haven't seen this in practice other than when things are seriously corrupt, but it may help avoid losing the root cause of a crash. Also, don't abort kernel backtracers for invalid userspace PC's. If we do, we lose the ability to backtrace through a userspace call to a bad address above PAGE_OFFSET, even though that it can be perfectly reasonable to continue the backtrace in such a case. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/kernel/stack.c')
-rw-r--r--arch/tile/kernel/stack.c30
1 files changed, 28 insertions, 2 deletions
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index af8dfc9665f6..c972689231ef 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -103,8 +103,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
103 if (kbt->verbose) 103 if (kbt->verbose)
104 pr_err(" <%s while in kernel mode>\n", fault); 104 pr_err(" <%s while in kernel mode>\n", fault);
105 } else if (EX1_PL(p->ex1) == USER_PL && 105 } else if (EX1_PL(p->ex1) == USER_PL &&
106 p->pc < PAGE_OFFSET && 106 p->sp < PAGE_OFFSET && p->sp != 0) {
107 p->sp < PAGE_OFFSET) {
108 if (kbt->verbose) 107 if (kbt->verbose)
109 pr_err(" <%s while in user mode>\n", fault); 108 pr_err(" <%s while in user mode>\n", fault);
110 } else if (kbt->verbose) { 109 } else if (kbt->verbose) {
@@ -352,6 +351,26 @@ static void describe_addr(struct KBacktraceIterator *kbt,
352} 351}
353 352
354/* 353/*
354 * Avoid possible crash recursion during backtrace. If it happens, it
355 * makes it easy to lose the actual root cause of the failure, so we
356 * put a simple guard on all the backtrace loops.
357 */
358static bool start_backtrace(void)
359{
360 if (current->thread.in_backtrace) {
361 pr_err("Backtrace requested while in backtrace!\n");
362 return false;
363 }
364 current->thread.in_backtrace = true;
365 return true;
366}
367
368static void end_backtrace(void)
369{
370 current->thread.in_backtrace = false;
371}
372
373/*
355 * This method wraps the backtracer's more generic support. 374 * This method wraps the backtracer's more generic support.
356 * It is only invoked from the architecture-specific code; show_stack() 375 * It is only invoked from the architecture-specific code; show_stack()
357 * and dump_stack() (in entry.S) are architecture-independent entry points. 376 * and dump_stack() (in entry.S) are architecture-independent entry points.
@@ -361,6 +380,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
361 int i; 380 int i;
362 int have_mmap_sem = 0; 381 int have_mmap_sem = 0;
363 382
383 if (!start_backtrace())
384 return;
364 if (headers) { 385 if (headers) {
365 /* 386 /*
366 * Add a blank line since if we are called from panic(), 387 * Add a blank line since if we are called from panic(),
@@ -402,6 +423,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
402 pr_err("Stack dump complete\n"); 423 pr_err("Stack dump complete\n");
403 if (have_mmap_sem) 424 if (have_mmap_sem)
404 up_read(&kbt->task->mm->mmap_sem); 425 up_read(&kbt->task->mm->mmap_sem);
426 end_backtrace();
405} 427}
406EXPORT_SYMBOL(tile_show_stack); 428EXPORT_SYMBOL(tile_show_stack);
407 429
@@ -463,6 +485,8 @@ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
463 int skip = trace->skip; 485 int skip = trace->skip;
464 int i = 0; 486 int i = 0;
465 487
488 if (!start_backtrace())
489 goto done;
466 if (task == NULL || task == current) 490 if (task == NULL || task == current)
467 KBacktraceIterator_init_current(&kbt); 491 KBacktraceIterator_init_current(&kbt);
468 else 492 else
@@ -476,6 +500,8 @@ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
476 break; 500 break;
477 trace->entries[i++] = kbt.it.pc; 501 trace->entries[i++] = kbt.it.pc;
478 } 502 }
503 end_backtrace();
504done:
479 trace->nr_entries = i; 505 trace->nr_entries = i;
480} 506}
481EXPORT_SYMBOL(save_stack_trace_tsk); 507EXPORT_SYMBOL(save_stack_trace_tsk);