aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Thompson <daniel.thompson@linaro.org>2015-09-22 12:12:10 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-10-03 11:40:51 -0400
commit0768330d46435f324a0b4860c889057524af17c2 (patch)
tree1a7edb29ee41d640e92c67ea7a7ae910e902dd3e
parent001bf455d20645190beb98ff4ee450dfea1b7eb2 (diff)
ARM: 8439/1: Fix backtrace generation when IPI is masked
Currently on ARM when <SysRq-L> is triggered from an interrupt handler (e.g. a SysRq issued using UART or kbd) the main CPU will wedge for ten seconds with interrupts masked before issuing a backtrace for every CPU except itself. The new backtrace code introduced by commit 96f0e00378d4 ("ARM: add basic support for on-demand backtrace of other CPUs") does not work correctly when run from an interrupt handler because IPI_CPU_BACKTRACE is used to generate the backtrace on all CPUs but cannot preempt the current calling context. This can be fixed by detecting that the calling context cannot be preempted and issuing the backtrace directly in this case. Issuing directly leaves us without any pt_regs to pass to nmi_cpu_backtrace() so we also modify the generic code to call dump_stack() when its argument is NULL. Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/kernel/smp.c9
-rw-r--r--lib/nmi_backtrace.c11
2 files changed, 19 insertions, 1 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 7c467c5b048e..b26361355dae 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -749,6 +749,15 @@ core_initcall(register_cpufreq_notifier);
749 749
750static void raise_nmi(cpumask_t *mask) 750static void raise_nmi(cpumask_t *mask)
751{ 751{
752 /*
753 * Generate the backtrace directly if we are running in a calling
754 * context that is not preemptible by the backtrace IPI. Note
755 * that nmi_cpu_backtrace() automatically removes the current cpu
756 * from mask.
757 */
758 if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
759 nmi_cpu_backtrace(NULL);
760
752 smp_cross_call(mask, IPI_CPU_BACKTRACE); 761 smp_cross_call(mask, IPI_CPU_BACKTRACE);
753} 762}
754 763
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 88d3d32e5923..6019c53c669e 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -43,6 +43,12 @@ static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
43 printk("%.*s", (end - start) + 1, buf); 43 printk("%.*s", (end - start) + 1, buf);
44} 44}
45 45
46/*
47 * When raise() is called it will be is passed a pointer to the
48 * backtrace_mask. Architectures that call nmi_cpu_backtrace()
49 * directly from their raise() functions may rely on the mask
50 * they are passed being updated as a side effect of this call.
51 */
46void nmi_trigger_all_cpu_backtrace(bool include_self, 52void nmi_trigger_all_cpu_backtrace(bool include_self,
47 void (*raise)(cpumask_t *mask)) 53 void (*raise)(cpumask_t *mask))
48{ 54{
@@ -149,7 +155,10 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
149 /* Replace printk to write into the NMI seq */ 155 /* Replace printk to write into the NMI seq */
150 this_cpu_write(printk_func, nmi_vprintk); 156 this_cpu_write(printk_func, nmi_vprintk);
151 pr_warn("NMI backtrace for cpu %d\n", cpu); 157 pr_warn("NMI backtrace for cpu %d\n", cpu);
152 show_regs(regs); 158 if (regs)
159 show_regs(regs);
160 else
161 dump_stack();
153 this_cpu_write(printk_func, printk_func_save); 162 this_cpu_write(printk_func, printk_func_save);
154 163
155 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 164 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));