aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-09-03 18:57:13 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-07-17 07:23:17 -0400
commitb2c0b2cbb282f0cf42518ffacbe197e6f2884168 (patch)
tree4b7fc7ce95c914e2a62bce31385711cdd8c29669
parentd770e558e21961ad6cfdf0ff7df0eb5d7d4f0754 (diff)
nmi: create generic NMI backtrace implementation
x86s NMI backtrace implementation (for arch_trigger_all_cpu_backtrace()) is fairly generic in nature - the only architecture specific bits are the act of raising the NMI to other CPUs, and reporting the status of the NMI handler. These are fairly simple to factor out, and produce a generic implementation which can be shared between ARM and x86. Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--include/linux/nmi.h6
-rw-r--r--lib/Makefile2
-rw-r--r--lib/nmi_backtrace.c162
3 files changed, 169 insertions, 1 deletions
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index f94da0e65dea..5791e3229068 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -49,6 +49,12 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
49 arch_trigger_all_cpu_backtrace(false); 49 arch_trigger_all_cpu_backtrace(false);
50 return true; 50 return true;
51} 51}
52
53/* generic implementation */
54void nmi_trigger_all_cpu_backtrace(bool include_self,
55 void (*raise)(cpumask_t *mask));
56bool nmi_cpu_backtrace(struct pt_regs *regs);
57
52#else 58#else
53static inline bool trigger_all_cpu_backtrace(void) 59static inline bool trigger_all_cpu_backtrace(void)
54{ 60{
diff --git a/lib/Makefile b/lib/Makefile
index 6897b527581a..392169c5bc4e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
13 sha1.o md5.o irq_regs.o argv_split.o \ 13 sha1.o md5.o irq_regs.o argv_split.o \
14 proportions.o flex_proportions.o ratelimit.o show_mem.o \ 14 proportions.o flex_proportions.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o seq_buf.o 16 earlycpio.o seq_buf.o nmi_backtrace.o
17 17
18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o 18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
19lib-$(CONFIG_MMU) += ioremap.o 19lib-$(CONFIG_MMU) += ioremap.o
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
new file mode 100644
index 000000000000..88d3d32e5923
--- /dev/null
+++ b/lib/nmi_backtrace.c
@@ -0,0 +1,162 @@
1/*
2 * NMI backtrace support
3 *
4 * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King,
5 * with the following header:
6 *
7 * HW NMI watchdog support
8 *
9 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
10 *
11 * Arch specific calls to support NMI watchdog
12 *
13 * Bits copied from original nmi.c file
14 */
15#include <linux/cpumask.h>
16#include <linux/delay.h>
17#include <linux/kprobes.h>
18#include <linux/nmi.h>
19#include <linux/seq_buf.h>
20
21#ifdef arch_trigger_all_cpu_backtrace
22/* For reliability, we're prepared to waste bits here. */
23static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
24static cpumask_t printtrace_mask;
25
26#define NMI_BUF_SIZE 4096
27
28struct nmi_seq_buf {
29 unsigned char buffer[NMI_BUF_SIZE];
30 struct seq_buf seq;
31};
32
33/* Safe printing in NMI context */
34static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
35
36/* "in progress" flag of arch_trigger_all_cpu_backtrace */
37static unsigned long backtrace_flag;
38
39static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
40{
41 const char *buf = s->buffer + start;
42
43 printk("%.*s", (end - start) + 1, buf);
44}
45
46void nmi_trigger_all_cpu_backtrace(bool include_self,
47 void (*raise)(cpumask_t *mask))
48{
49 struct nmi_seq_buf *s;
50 int i, cpu, this_cpu = get_cpu();
51
52 if (test_and_set_bit(0, &backtrace_flag)) {
53 /*
54 * If there is already a trigger_all_cpu_backtrace() in progress
55 * (backtrace_flag == 1), don't output double cpu dump infos.
56 */
57 put_cpu();
58 return;
59 }
60
61 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
62 if (!include_self)
63 cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
64
65 cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask));
66
67 /*
68 * Set up per_cpu seq_buf buffers that the NMIs running on the other
69 * CPUs will write to.
70 */
71 for_each_cpu(cpu, to_cpumask(backtrace_mask)) {
72 s = &per_cpu(nmi_print_seq, cpu);
73 seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE);
74 }
75
76 if (!cpumask_empty(to_cpumask(backtrace_mask))) {
77 pr_info("Sending NMI to %s CPUs:\n",
78 (include_self ? "all" : "other"));
79 raise(to_cpumask(backtrace_mask));
80 }
81
82 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
83 for (i = 0; i < 10 * 1000; i++) {
84 if (cpumask_empty(to_cpumask(backtrace_mask)))
85 break;
86 mdelay(1);
87 touch_softlockup_watchdog();
88 }
89
90 /*
91 * Now that all the NMIs have triggered, we can dump out their
92 * back traces safely to the console.
93 */
94 for_each_cpu(cpu, &printtrace_mask) {
95 int len, last_i = 0;
96
97 s = &per_cpu(nmi_print_seq, cpu);
98 len = seq_buf_used(&s->seq);
99 if (!len)
100 continue;
101
102 /* Print line by line. */
103 for (i = 0; i < len; i++) {
104 if (s->buffer[i] == '\n') {
105 print_seq_line(s, last_i, i);
106 last_i = i + 1;
107 }
108 }
109 /* Check if there was a partial line. */
110 if (last_i < len) {
111 print_seq_line(s, last_i, len - 1);
112 pr_cont("\n");
113 }
114 }
115
116 clear_bit(0, &backtrace_flag);
117 smp_mb__after_atomic();
118 put_cpu();
119}
120
121/*
122 * It is not safe to call printk() directly from NMI handlers.
123 * It may be fine if the NMI detected a lock up and we have no choice
124 * but to do so, but doing a NMI on all other CPUs to get a back trace
125 * can be done with a sysrq-l. We don't want that to lock up, which
126 * can happen if the NMI interrupts a printk in progress.
127 *
128 * Instead, we redirect the vprintk() to this nmi_vprintk() that writes
129 * the content into a per cpu seq_buf buffer. Then when the NMIs are
130 * all done, we can safely dump the contents of the seq_buf to a printk()
131 * from a non NMI context.
132 */
133static int nmi_vprintk(const char *fmt, va_list args)
134{
135 struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
136 unsigned int len = seq_buf_used(&s->seq);
137
138 seq_buf_vprintf(&s->seq, fmt, args);
139 return seq_buf_used(&s->seq) - len;
140}
141
142bool nmi_cpu_backtrace(struct pt_regs *regs)
143{
144 int cpu = smp_processor_id();
145
146 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
147 printk_func_t printk_func_save = this_cpu_read(printk_func);
148
149 /* Replace printk to write into the NMI seq */
150 this_cpu_write(printk_func, nmi_vprintk);
151 pr_warn("NMI backtrace for cpu %d\n", cpu);
152 show_regs(regs);
153 this_cpu_write(printk_func, printk_func_save);
154
155 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
156 return true;
157 }
158
159 return false;
160}
161NOKPROBE_SYMBOL(nmi_cpu_backtrace);
162#endif