diff options
author | Aaron Tomlin <atomlin@redhat.com> | 2014-06-23 16:22:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-23 19:47:44 -0400 |
commit | f3aca3d09525f87731ba6b892c9b010570bc54b4 (patch) | |
tree | 80c80be186f7bac74cb548444562ae6890938d83 | |
parent | 88e15ce402c58f41037752da092683e90826742a (diff) |
nmi: provide the option to issue an NMI back trace to every cpu but current
Sometimes it is preferred not to use the trigger_all_cpu_backtrace()
routine when one wants to avoid capturing a back trace for current. For
instance if one was previously captured recently.
This patch provides a new routine namely
trigger_allbutself_cpu_backtrace() which offers the flexibility to issue
an NMI to every cpu but current and capture a back trace accordingly.
Patch x86 and sparc to support new routine.
[dzickus@redhat.com: add stub in #else clause]
[dzickus@redhat.com: don't print message in single processor case, wrap with get/put_cpu based on Oleg's suggestion]
[sfr@canb.auug.org.au: undo C99ism]
Signed-off-by: Aaron Tomlin <atomlin@redhat.com>
Signed-off-by: Don Zickus <dzickus@redhat.com>
Acked-by: David S. Miller <davem@davemloft.net>
Cc: Mateusz Guzik <mguzik@redhat.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/sparc/include/asm/irq_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/process_64.c | 18 | ||||
-rw-r--r-- | arch/x86/include/asm/irq.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/apic/hw_nmi.c | 18 | ||||
-rw-r--r-- | include/linux/nmi.h | 11 |
5 files changed, 38 insertions, 13 deletions
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h index 375cffcf7dbd..91d219381306 100644 --- a/arch/sparc/include/asm/irq_64.h +++ b/arch/sparc/include/asm/irq_64.h | |||
@@ -89,7 +89,7 @@ static inline unsigned long get_softint(void) | |||
89 | return retval; | 89 | return retval; |
90 | } | 90 | } |
91 | 91 | ||
92 | void arch_trigger_all_cpu_backtrace(void); | 92 | void arch_trigger_all_cpu_backtrace(bool); |
93 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace | 93 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace |
94 | 94 | ||
95 | extern void *hardirq_stack[NR_CPUS]; | 95 | extern void *hardirq_stack[NR_CPUS]; |
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index b2988f25e230..027e09986194 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -239,7 +239,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp) | |||
239 | } | 239 | } |
240 | } | 240 | } |
241 | 241 | ||
242 | void arch_trigger_all_cpu_backtrace(void) | 242 | void arch_trigger_all_cpu_backtrace(bool include_self) |
243 | { | 243 | { |
244 | struct thread_info *tp = current_thread_info(); | 244 | struct thread_info *tp = current_thread_info(); |
245 | struct pt_regs *regs = get_irq_regs(); | 245 | struct pt_regs *regs = get_irq_regs(); |
@@ -251,16 +251,22 @@ void arch_trigger_all_cpu_backtrace(void) | |||
251 | 251 | ||
252 | spin_lock_irqsave(&global_cpu_snapshot_lock, flags); | 252 | spin_lock_irqsave(&global_cpu_snapshot_lock, flags); |
253 | 253 | ||
254 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); | ||
255 | |||
256 | this_cpu = raw_smp_processor_id(); | 254 | this_cpu = raw_smp_processor_id(); |
257 | 255 | ||
258 | __global_reg_self(tp, regs, this_cpu); | 256 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
257 | |||
258 | if (include_self) | ||
259 | __global_reg_self(tp, regs, this_cpu); | ||
259 | 260 | ||
260 | smp_fetch_global_regs(); | 261 | smp_fetch_global_regs(); |
261 | 262 | ||
262 | for_each_online_cpu(cpu) { | 263 | for_each_online_cpu(cpu) { |
263 | struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg; | 264 | struct global_reg_snapshot *gp; |
265 | |||
266 | if (!include_self && cpu == this_cpu) | ||
267 | continue; | ||
268 | |||
269 | gp = &global_cpu_snapshot[cpu].reg; | ||
264 | 270 | ||
265 | __global_reg_poll(gp); | 271 | __global_reg_poll(gp); |
266 | 272 | ||
@@ -292,7 +298,7 @@ void arch_trigger_all_cpu_backtrace(void) | |||
292 | 298 | ||
293 | static void sysrq_handle_globreg(int key) | 299 | static void sysrq_handle_globreg(int key) |
294 | { | 300 | { |
295 | arch_trigger_all_cpu_backtrace(); | 301 | arch_trigger_all_cpu_backtrace(true); |
296 | } | 302 | } |
297 | 303 | ||
298 | static struct sysrq_key_op sparc_globalreg_op = { | 304 | static struct sysrq_key_op sparc_globalreg_op = { |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index cb6cfcd034cf..a80cbb88ea91 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -43,7 +43,7 @@ extern int vector_used_by_percpu_irq(unsigned int vector); | |||
43 | extern void init_ISA_irqs(void); | 43 | extern void init_ISA_irqs(void); |
44 | 44 | ||
45 | #ifdef CONFIG_X86_LOCAL_APIC | 45 | #ifdef CONFIG_X86_LOCAL_APIC |
46 | void arch_trigger_all_cpu_backtrace(void); | 46 | void arch_trigger_all_cpu_backtrace(bool); |
47 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace | 47 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace |
48 | #endif | 48 | #endif |
49 | 49 | ||
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index c3fcb5de5083..6a1e71bde323 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
@@ -33,31 +33,41 @@ static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; | |||
33 | /* "in progress" flag of arch_trigger_all_cpu_backtrace */ | 33 | /* "in progress" flag of arch_trigger_all_cpu_backtrace */ |
34 | static unsigned long backtrace_flag; | 34 | static unsigned long backtrace_flag; |
35 | 35 | ||
36 | void arch_trigger_all_cpu_backtrace(void) | 36 | void arch_trigger_all_cpu_backtrace(bool include_self) |
37 | { | 37 | { |
38 | int i; | 38 | int i; |
39 | int cpu = get_cpu(); | ||
39 | 40 | ||
40 | if (test_and_set_bit(0, &backtrace_flag)) | 41 | if (test_and_set_bit(0, &backtrace_flag)) { |
41 | /* | 42 | /* |
42 | * If there is already a trigger_all_cpu_backtrace() in progress | 43 | * If there is already a trigger_all_cpu_backtrace() in progress |
43 | * (backtrace_flag == 1), don't output double cpu dump infos. | 44 | * (backtrace_flag == 1), don't output double cpu dump infos. |
44 | */ | 45 | */ |
46 | put_cpu(); | ||
45 | return; | 47 | return; |
48 | } | ||
46 | 49 | ||
47 | cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); | 50 | cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); |
51 | if (!include_self) | ||
52 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); | ||
48 | 53 | ||
49 | printk(KERN_INFO "sending NMI to all CPUs:\n"); | 54 | if (!cpumask_empty(to_cpumask(backtrace_mask))) { |
50 | apic->send_IPI_all(NMI_VECTOR); | 55 | pr_info("sending NMI to %s CPUs:\n", |
56 | (include_self ? "all" : "other")); | ||
57 | apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR); | ||
58 | } | ||
51 | 59 | ||
52 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ | 60 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
53 | for (i = 0; i < 10 * 1000; i++) { | 61 | for (i = 0; i < 10 * 1000; i++) { |
54 | if (cpumask_empty(to_cpumask(backtrace_mask))) | 62 | if (cpumask_empty(to_cpumask(backtrace_mask))) |
55 | break; | 63 | break; |
56 | mdelay(1); | 64 | mdelay(1); |
65 | touch_softlockup_watchdog(); | ||
57 | } | 66 | } |
58 | 67 | ||
59 | clear_bit(0, &backtrace_flag); | 68 | clear_bit(0, &backtrace_flag); |
60 | smp_mb__after_atomic(); | 69 | smp_mb__after_atomic(); |
70 | put_cpu(); | ||
61 | } | 71 | } |
62 | 72 | ||
63 | static int | 73 | static int |
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 6a45fb583ff1..a17ab6398d7c 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
@@ -32,15 +32,24 @@ static inline void touch_nmi_watchdog(void) | |||
32 | #ifdef arch_trigger_all_cpu_backtrace | 32 | #ifdef arch_trigger_all_cpu_backtrace |
33 | static inline bool trigger_all_cpu_backtrace(void) | 33 | static inline bool trigger_all_cpu_backtrace(void) |
34 | { | 34 | { |
35 | arch_trigger_all_cpu_backtrace(); | 35 | arch_trigger_all_cpu_backtrace(true); |
36 | 36 | ||
37 | return true; | 37 | return true; |
38 | } | 38 | } |
39 | static inline bool trigger_allbutself_cpu_backtrace(void) | ||
40 | { | ||
41 | arch_trigger_all_cpu_backtrace(false); | ||
42 | return true; | ||
43 | } | ||
39 | #else | 44 | #else |
40 | static inline bool trigger_all_cpu_backtrace(void) | 45 | static inline bool trigger_all_cpu_backtrace(void) |
41 | { | 46 | { |
42 | return false; | 47 | return false; |
43 | } | 48 | } |
49 | static inline bool trigger_allbutself_cpu_backtrace(void) | ||
50 | { | ||
51 | return false; | ||
52 | } | ||
44 | #endif | 53 | #endif |
45 | 54 | ||
46 | #ifdef CONFIG_LOCKUP_DETECTOR | 55 | #ifdef CONFIG_LOCKUP_DETECTOR |