aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-03-13 00:19:49 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-03-13 00:19:49 -0400
commitfcef8576d8a64fc603e719c97d423f9f6d4e0e8b (patch)
tree08d33a5f3cba8ef8dad81dd261aba97212eb3f8a
parentbc9b83dd1f66402b870301c3c7117b9c1484abb4 (diff)
cpumask: convert arch/x86/kernel/nmi.c's backtrace_mask to a cpumask_var_t
Impact: cleanup, reduce memory usage for CONFIG_CPUMASK_OFFSTACK=y I *think* every path calls check_nmi_watchdog before using the watchdog, so that's the right place for the initialization. If that's wrong, we'll get a nice NULL-deref with CONFIG_CPUMASK_OFFSTACK=y, and have uncovered another bug. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r--arch/x86/kernel/apic/nmi.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index bdfad80c3cf1..d6bd62407152 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -39,7 +39,7 @@
39int unknown_nmi_panic; 39int unknown_nmi_panic;
40int nmi_watchdog_enabled; 40int nmi_watchdog_enabled;
41 41
42static cpumask_t backtrace_mask = CPU_MASK_NONE; 42static cpumask_var_t backtrace_mask;
43 43
44/* nmi_active: 44/* nmi_active:
45 * >0: the lapic NMI watchdog is active, but can be disabled 45 * >0: the lapic NMI watchdog is active, but can be disabled
@@ -138,6 +138,7 @@ int __init check_nmi_watchdog(void)
138 if (!prev_nmi_count) 138 if (!prev_nmi_count)
139 goto error; 139 goto error;
140 140
141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL);
141 printk(KERN_INFO "Testing NMI watchdog ... "); 142 printk(KERN_INFO "Testing NMI watchdog ... ");
142 143
143#ifdef CONFIG_SMP 144#ifdef CONFIG_SMP
@@ -413,14 +414,14 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
413 touched = 1; 414 touched = 1;
414 } 415 }
415 416
416 if (cpu_isset(cpu, backtrace_mask)) { 417 if (cpumask_test_cpu(cpu, backtrace_mask)) {
417 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 418 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
418 419
419 spin_lock(&lock); 420 spin_lock(&lock);
420 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); 421 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
421 dump_stack(); 422 dump_stack();
422 spin_unlock(&lock); 423 spin_unlock(&lock);
423 cpu_clear(cpu, backtrace_mask); 424 cpumask_clear_cpu(cpu, backtrace_mask);
424 } 425 }
425 426
426 /* Could check oops_in_progress here too, but it's safer not to */ 427 /* Could check oops_in_progress here too, but it's safer not to */
@@ -554,10 +555,10 @@ void __trigger_all_cpu_backtrace(void)
554{ 555{
555 int i; 556 int i;
556 557
557 backtrace_mask = cpu_online_map; 558 cpumask_copy(backtrace_mask, cpu_online_mask);
558 /* Wait for up to 10 seconds for all CPUs to do the backtrace */ 559 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
559 for (i = 0; i < 10 * 1000; i++) { 560 for (i = 0; i < 10 * 1000; i++) {
560 if (cpus_empty(backtrace_mask)) 561 if (cpumask_empty(backtrace_mask))
561 break; 562 break;
562 mdelay(1); 563 mdelay(1);
563 } 564 }