aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/dumpstack_32.c
diff options
context:
space:
mode:
authorJosh Poimboeuf <jpoimboe@redhat.com>2016-09-08 17:49:20 -0400
committerIngo Molnar <mingo@kernel.org>2016-09-14 11:23:30 -0400
commitcfeeed279dc2fa83a00fbe4856ebd231d56201ab (patch)
tree7aa8cf1f77754ec22e9db937ec40db24b6550a96 /arch/x86/kernel/dumpstack_32.c
parent85063fac1f72419eec4349621fe829b07f9acb1e (diff)
x86/dumpstack: Allow preemption in show_stack_log_lvl() and dump_trace()
show_stack_log_lvl() and dump_trace() are already preemption safe: - If they're running in irq or exception context, preemption is already disabled and the percpu stack pointers can be trusted. - If they're running with preemption enabled, they must be running on the task stack anyway, so it doesn't matter if they're comparing the stack pointer against a percpu stack pointer from this CPU or another one: either way it won't match. Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Byungchul Park <byungchul.park@lge.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Kees Cook <keescook@chromium.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nilay Vaish <nilayvaish@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/a0ca0b1044eca97d4f0ec7c1619cf80b3b65560d.1473371307.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/dumpstack_32.c')
-rw-r--r--arch/x86/kernel/dumpstack_32.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index c533b8b5a373..da5cd62f93ab 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -24,16 +24,16 @@ static void *is_irq_stack(void *p, void *irq)
24} 24}
25 25
26 26
27static void *is_hardirq_stack(unsigned long *stack, int cpu) 27static void *is_hardirq_stack(unsigned long *stack)
28{ 28{
29 void *irq = per_cpu(hardirq_stack, cpu); 29 void *irq = this_cpu_read(hardirq_stack);
30 30
31 return is_irq_stack(stack, irq); 31 return is_irq_stack(stack, irq);
32} 32}
33 33
34static void *is_softirq_stack(unsigned long *stack, int cpu) 34static void *is_softirq_stack(unsigned long *stack)
35{ 35{
36 void *irq = per_cpu(softirq_stack, cpu); 36 void *irq = this_cpu_read(softirq_stack);
37 37
38 return is_irq_stack(stack, irq); 38 return is_irq_stack(stack, irq);
39} 39}
@@ -42,7 +42,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
42 unsigned long *stack, unsigned long bp, 42 unsigned long *stack, unsigned long bp,
43 const struct stacktrace_ops *ops, void *data) 43 const struct stacktrace_ops *ops, void *data)
44{ 44{
45 const unsigned cpu = get_cpu();
46 int graph = 0; 45 int graph = 0;
47 u32 *prev_esp; 46 u32 *prev_esp;
48 47
@@ -53,9 +52,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
53 for (;;) { 52 for (;;) {
54 void *end_stack; 53 void *end_stack;
55 54
56 end_stack = is_hardirq_stack(stack, cpu); 55 end_stack = is_hardirq_stack(stack);
57 if (!end_stack) 56 if (!end_stack)
58 end_stack = is_softirq_stack(stack, cpu); 57 end_stack = is_softirq_stack(stack);
59 58
60 bp = ops->walk_stack(task, stack, bp, ops, data, 59 bp = ops->walk_stack(task, stack, bp, ops, data,
61 end_stack, &graph); 60 end_stack, &graph);
@@ -74,7 +73,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
74 break; 73 break;
75 touch_nmi_watchdog(); 74 touch_nmi_watchdog();
76 } 75 }
77 put_cpu();
78} 76}
79EXPORT_SYMBOL(dump_trace); 77EXPORT_SYMBOL(dump_trace);
80 78