aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-02-07 02:44:11 -0500
committerIngo Molnar <mingo@elte.hu>2011-02-07 02:44:26 -0500
commitc7f9a6f377fa64e5a74f8c128d4349765c28fab1 (patch)
treee71eaf08c5f9179d6fd6c7e08d8539358dc76ad1 /kernel
parentfe4b04fa31a6dcf4358aa84cf81e5a7fd079469b (diff)
parent8dbdea8444d303a772bceb1ba963f0e3273bfc5e (diff)
Merge branch 'linus' into perf/core
Merge reason: Pick up perf fixes that are now upstream Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/migration.c14
-rw-r--r--kernel/module.c16
-rw-r--r--kernel/perf_event.c10
-rw-r--r--kernel/sched_rt.c2
-rw-r--r--kernel/timer.c6
-rw-r--r--kernel/trace/trace_events.c12
-rw-r--r--kernel/trace/trace_export.c6
-rw-r--r--kernel/trace/trace_syscalls.c19
-rw-r--r--kernel/tracepoint.c31
-rw-r--r--kernel/watchdog.c43
10 files changed, 84 insertions, 75 deletions
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 1d2541940480..441fd629ff04 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -56,6 +56,7 @@ void move_masked_irq(int irq)
56void move_native_irq(int irq) 56void move_native_irq(int irq)
57{ 57{
58 struct irq_desc *desc = irq_to_desc(irq); 58 struct irq_desc *desc = irq_to_desc(irq);
59 bool masked;
59 60
60 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 61 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
61 return; 62 return;
@@ -63,8 +64,15 @@ void move_native_irq(int irq)
63 if (unlikely(desc->status & IRQ_DISABLED)) 64 if (unlikely(desc->status & IRQ_DISABLED))
64 return; 65 return;
65 66
66 desc->irq_data.chip->irq_mask(&desc->irq_data); 67 /*
68 * Be careful vs. already masked interrupts. If this is a
69 * threaded interrupt with ONESHOT set, we can end up with an
70 * interrupt storm.
71 */
72 masked = desc->status & IRQ_MASKED;
73 if (!masked)
74 desc->irq_data.chip->irq_mask(&desc->irq_data);
67 move_masked_irq(irq); 75 move_masked_irq(irq);
68 desc->irq_data.chip->irq_unmask(&desc->irq_data); 76 if (!masked)
77 desc->irq_data.chip->irq_unmask(&desc->irq_data);
69} 78}
70
diff --git a/kernel/module.c b/kernel/module.c
index 34e00b708fad..efa290ea94bf 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2460,9 +2460,9 @@ static void find_module_sections(struct module *mod, struct load_info *info)
2460#endif 2460#endif
2461 2461
2462#ifdef CONFIG_TRACEPOINTS 2462#ifdef CONFIG_TRACEPOINTS
2463 mod->tracepoints = section_objs(info, "__tracepoints", 2463 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2464 sizeof(*mod->tracepoints), 2464 sizeof(*mod->tracepoints_ptrs),
2465 &mod->num_tracepoints); 2465 &mod->num_tracepoints);
2466#endif 2466#endif
2467#ifdef HAVE_JUMP_LABEL 2467#ifdef HAVE_JUMP_LABEL
2468 mod->jump_entries = section_objs(info, "__jump_table", 2468 mod->jump_entries = section_objs(info, "__jump_table",
@@ -3393,7 +3393,7 @@ void module_layout(struct module *mod,
3393 struct modversion_info *ver, 3393 struct modversion_info *ver,
3394 struct kernel_param *kp, 3394 struct kernel_param *kp,
3395 struct kernel_symbol *ks, 3395 struct kernel_symbol *ks,
3396 struct tracepoint *tp) 3396 struct tracepoint * const *tp)
3397{ 3397{
3398} 3398}
3399EXPORT_SYMBOL(module_layout); 3399EXPORT_SYMBOL(module_layout);
@@ -3407,8 +3407,8 @@ void module_update_tracepoints(void)
3407 mutex_lock(&module_mutex); 3407 mutex_lock(&module_mutex);
3408 list_for_each_entry(mod, &modules, list) 3408 list_for_each_entry(mod, &modules, list)
3409 if (!mod->taints) 3409 if (!mod->taints)
3410 tracepoint_update_probe_range(mod->tracepoints, 3410 tracepoint_update_probe_range(mod->tracepoints_ptrs,
3411 mod->tracepoints + mod->num_tracepoints); 3411 mod->tracepoints_ptrs + mod->num_tracepoints);
3412 mutex_unlock(&module_mutex); 3412 mutex_unlock(&module_mutex);
3413} 3413}
3414 3414
@@ -3432,8 +3432,8 @@ int module_get_iter_tracepoints(struct tracepoint_iter *iter)
3432 else if (iter_mod > iter->module) 3432 else if (iter_mod > iter->module)
3433 iter->tracepoint = NULL; 3433 iter->tracepoint = NULL;
3434 found = tracepoint_get_iter_range(&iter->tracepoint, 3434 found = tracepoint_get_iter_range(&iter->tracepoint,
3435 iter_mod->tracepoints, 3435 iter_mod->tracepoints_ptrs,
3436 iter_mod->tracepoints 3436 iter_mod->tracepoints_ptrs
3437 + iter_mod->num_tracepoints); 3437 + iter_mod->num_tracepoints);
3438 if (found) { 3438 if (found) {
3439 iter->module = iter_mod; 3439 iter->module = iter_mod;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 7d3faa25e136..a353a4d6d00d 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1966,11 +1966,12 @@ static void __perf_event_read(void *info)
1966 return; 1966 return;
1967 1967
1968 raw_spin_lock(&ctx->lock); 1968 raw_spin_lock(&ctx->lock);
1969 update_context_time(ctx); 1969 if (ctx->is_active)
1970 update_context_time(ctx);
1970 update_event_times(event); 1971 update_event_times(event);
1972 if (event->state == PERF_EVENT_STATE_ACTIVE)
1973 event->pmu->read(event);
1971 raw_spin_unlock(&ctx->lock); 1974 raw_spin_unlock(&ctx->lock);
1972
1973 event->pmu->read(event);
1974} 1975}
1975 1976
1976static inline u64 perf_event_count(struct perf_event *event) 1977static inline u64 perf_event_count(struct perf_event *event)
@@ -2064,8 +2065,7 @@ static int alloc_callchain_buffers(void)
2064 * accessed from NMI. Use a temporary manual per cpu allocation 2065 * accessed from NMI. Use a temporary manual per cpu allocation
2065 * until that gets sorted out. 2066 * until that gets sorted out.
2066 */ 2067 */
2067 size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) * 2068 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
2068 num_possible_cpus();
2069 2069
2070 entries = kzalloc(size, GFP_KERNEL); 2070 entries = kzalloc(size, GFP_KERNEL);
2071 if (!entries) 2071 if (!entries)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index c914ec747ca6..ad6267714c84 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -625,7 +625,7 @@ static void update_curr_rt(struct rq *rq)
625 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 625 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
626 u64 delta_exec; 626 u64 delta_exec;
627 627
628 if (!task_has_rt_policy(curr)) 628 if (curr->sched_class != &rt_sched_class)
629 return; 629 return;
630 630
631 delta_exec = rq->clock_task - curr->se.exec_start; 631 delta_exec = rq->clock_task - curr->se.exec_start;
diff --git a/kernel/timer.c b/kernel/timer.c
index 43ca9936f2d0..d53ce66daea0 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -969,10 +969,14 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
969int del_timer_sync(struct timer_list *timer) 969int del_timer_sync(struct timer_list *timer)
970{ 970{
971#ifdef CONFIG_LOCKDEP 971#ifdef CONFIG_LOCKDEP
972 unsigned long flags;
973
974 raw_local_irq_save(flags);
972 local_bh_disable(); 975 local_bh_disable();
973 lock_map_acquire(&timer->lockdep_map); 976 lock_map_acquire(&timer->lockdep_map);
974 lock_map_release(&timer->lockdep_map); 977 lock_map_release(&timer->lockdep_map);
975 local_bh_enable(); 978 _local_bh_enable();
979 raw_local_irq_restore(flags);
976#endif 980#endif
977 /* 981 /*
978 * don't use it in hardirq context, because it 982 * don't use it in hardirq context, because it
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 35fde09b81de..5f499e0438a4 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1284,7 +1284,7 @@ trace_create_file_ops(struct module *mod)
1284static void trace_module_add_events(struct module *mod) 1284static void trace_module_add_events(struct module *mod)
1285{ 1285{
1286 struct ftrace_module_file_ops *file_ops = NULL; 1286 struct ftrace_module_file_ops *file_ops = NULL;
1287 struct ftrace_event_call *call, *start, *end; 1287 struct ftrace_event_call **call, **start, **end;
1288 1288
1289 start = mod->trace_events; 1289 start = mod->trace_events;
1290 end = mod->trace_events + mod->num_trace_events; 1290 end = mod->trace_events + mod->num_trace_events;
@@ -1297,7 +1297,7 @@ static void trace_module_add_events(struct module *mod)
1297 return; 1297 return;
1298 1298
1299 for_each_event(call, start, end) { 1299 for_each_event(call, start, end) {
1300 __trace_add_event_call(call, mod, 1300 __trace_add_event_call(*call, mod,
1301 &file_ops->id, &file_ops->enable, 1301 &file_ops->id, &file_ops->enable,
1302 &file_ops->filter, &file_ops->format); 1302 &file_ops->filter, &file_ops->format);
1303 } 1303 }
@@ -1367,8 +1367,8 @@ static struct notifier_block trace_module_nb = {
1367 .priority = 0, 1367 .priority = 0,
1368}; 1368};
1369 1369
1370extern struct ftrace_event_call __start_ftrace_events[]; 1370extern struct ftrace_event_call *__start_ftrace_events[];
1371extern struct ftrace_event_call __stop_ftrace_events[]; 1371extern struct ftrace_event_call *__stop_ftrace_events[];
1372 1372
1373static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; 1373static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1374 1374
@@ -1384,7 +1384,7 @@ __setup("trace_event=", setup_trace_event);
1384 1384
1385static __init int event_trace_init(void) 1385static __init int event_trace_init(void)
1386{ 1386{
1387 struct ftrace_event_call *call; 1387 struct ftrace_event_call **call;
1388 struct dentry *d_tracer; 1388 struct dentry *d_tracer;
1389 struct dentry *entry; 1389 struct dentry *entry;
1390 struct dentry *d_events; 1390 struct dentry *d_events;
@@ -1430,7 +1430,7 @@ static __init int event_trace_init(void)
1430 pr_warning("tracing: Failed to allocate common fields"); 1430 pr_warning("tracing: Failed to allocate common fields");
1431 1431
1432 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { 1432 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1433 __trace_add_event_call(call, NULL, &ftrace_event_id_fops, 1433 __trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
1434 &ftrace_enable_fops, 1434 &ftrace_enable_fops,
1435 &ftrace_event_filter_fops, 1435 &ftrace_event_filter_fops,
1436 &ftrace_event_format_fops); 1436 &ftrace_event_format_fops);
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 4b74d71705c0..bbeec31e0ae3 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -161,13 +161,13 @@ struct ftrace_event_class event_class_ftrace_##call = { \
161 .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ 161 .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
162}; \ 162}; \
163 \ 163 \
164struct ftrace_event_call __used \ 164struct ftrace_event_call __used event_##call = { \
165__attribute__((__aligned__(4))) \
166__attribute__((section("_ftrace_events"))) event_##call = { \
167 .name = #call, \ 165 .name = #call, \
168 .event.type = etype, \ 166 .event.type = etype, \
169 .class = &event_class_ftrace_##call, \ 167 .class = &event_class_ftrace_##call, \
170 .print_fmt = print, \ 168 .print_fmt = print, \
171}; \ 169}; \
170struct ftrace_event_call __used \
171__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
172 172
173#include "trace_entries.h" 173#include "trace_entries.h"
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index b706529b4fc7..5c9fe08d2093 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -55,20 +55,21 @@ struct ftrace_event_class event_class_syscall_exit = {
55 .raw_init = init_syscall_trace, 55 .raw_init = init_syscall_trace,
56}; 56};
57 57
58extern unsigned long __start_syscalls_metadata[]; 58extern struct syscall_metadata *__start_syscalls_metadata[];
59extern unsigned long __stop_syscalls_metadata[]; 59extern struct syscall_metadata *__stop_syscalls_metadata[];
60 60
61static struct syscall_metadata **syscalls_metadata; 61static struct syscall_metadata **syscalls_metadata;
62 62
63static struct syscall_metadata *find_syscall_meta(unsigned long syscall) 63static __init struct syscall_metadata *
64find_syscall_meta(unsigned long syscall)
64{ 65{
65 struct syscall_metadata *start; 66 struct syscall_metadata **start;
66 struct syscall_metadata *stop; 67 struct syscall_metadata **stop;
67 char str[KSYM_SYMBOL_LEN]; 68 char str[KSYM_SYMBOL_LEN];
68 69
69 70
70 start = (struct syscall_metadata *)__start_syscalls_metadata; 71 start = __start_syscalls_metadata;
71 stop = (struct syscall_metadata *)__stop_syscalls_metadata; 72 stop = __stop_syscalls_metadata;
72 kallsyms_lookup(syscall, NULL, NULL, NULL, str); 73 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
73 74
74 for ( ; start < stop; start++) { 75 for ( ; start < stop; start++) {
@@ -78,8 +79,8 @@ static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
78 * with "SyS" instead of "sys", leading to an unwanted 79 * with "SyS" instead of "sys", leading to an unwanted
79 * mismatch. 80 * mismatch.
80 */ 81 */
81 if (start->name && !strcmp(start->name + 3, str + 3)) 82 if ((*start)->name && !strcmp((*start)->name + 3, str + 3))
82 return start; 83 return *start;
83 } 84 }
84 return NULL; 85 return NULL;
85} 86}
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index e95ee7f31d43..68187af4889e 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -27,8 +27,8 @@
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/jump_label.h> 28#include <linux/jump_label.h>
29 29
30extern struct tracepoint __start___tracepoints[]; 30extern struct tracepoint * const __start___tracepoints_ptrs[];
31extern struct tracepoint __stop___tracepoints[]; 31extern struct tracepoint * const __stop___tracepoints_ptrs[];
32 32
33/* Set to 1 to enable tracepoint debug output */ 33/* Set to 1 to enable tracepoint debug output */
34static const int tracepoint_debug; 34static const int tracepoint_debug;
@@ -298,10 +298,10 @@ static void disable_tracepoint(struct tracepoint *elem)
298 * 298 *
299 * Updates the probe callback corresponding to a range of tracepoints. 299 * Updates the probe callback corresponding to a range of tracepoints.
300 */ 300 */
301void 301void tracepoint_update_probe_range(struct tracepoint * const *begin,
302tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end) 302 struct tracepoint * const *end)
303{ 303{
304 struct tracepoint *iter; 304 struct tracepoint * const *iter;
305 struct tracepoint_entry *mark_entry; 305 struct tracepoint_entry *mark_entry;
306 306
307 if (!begin) 307 if (!begin)
@@ -309,12 +309,12 @@ tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
309 309
310 mutex_lock(&tracepoints_mutex); 310 mutex_lock(&tracepoints_mutex);
311 for (iter = begin; iter < end; iter++) { 311 for (iter = begin; iter < end; iter++) {
312 mark_entry = get_tracepoint(iter->name); 312 mark_entry = get_tracepoint((*iter)->name);
313 if (mark_entry) { 313 if (mark_entry) {
314 set_tracepoint(&mark_entry, iter, 314 set_tracepoint(&mark_entry, *iter,
315 !!mark_entry->refcount); 315 !!mark_entry->refcount);
316 } else { 316 } else {
317 disable_tracepoint(iter); 317 disable_tracepoint(*iter);
318 } 318 }
319 } 319 }
320 mutex_unlock(&tracepoints_mutex); 320 mutex_unlock(&tracepoints_mutex);
@@ -326,8 +326,8 @@ tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
326static void tracepoint_update_probes(void) 326static void tracepoint_update_probes(void)
327{ 327{
328 /* Core kernel tracepoints */ 328 /* Core kernel tracepoints */
329 tracepoint_update_probe_range(__start___tracepoints, 329 tracepoint_update_probe_range(__start___tracepoints_ptrs,
330 __stop___tracepoints); 330 __stop___tracepoints_ptrs);
331 /* tracepoints in modules. */ 331 /* tracepoints in modules. */
332 module_update_tracepoints(); 332 module_update_tracepoints();
333} 333}
@@ -514,8 +514,8 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
514 * Will return the first tracepoint in the range if the input tracepoint is 514 * Will return the first tracepoint in the range if the input tracepoint is
515 * NULL. 515 * NULL.
516 */ 516 */
517int tracepoint_get_iter_range(struct tracepoint **tracepoint, 517int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
518 struct tracepoint *begin, struct tracepoint *end) 518 struct tracepoint * const *begin, struct tracepoint * const *end)
519{ 519{
520 if (!*tracepoint && begin != end) { 520 if (!*tracepoint && begin != end) {
521 *tracepoint = begin; 521 *tracepoint = begin;
@@ -534,7 +534,8 @@ static void tracepoint_get_iter(struct tracepoint_iter *iter)
534 /* Core kernel tracepoints */ 534 /* Core kernel tracepoints */
535 if (!iter->module) { 535 if (!iter->module) {
536 found = tracepoint_get_iter_range(&iter->tracepoint, 536 found = tracepoint_get_iter_range(&iter->tracepoint,
537 __start___tracepoints, __stop___tracepoints); 537 __start___tracepoints_ptrs,
538 __stop___tracepoints_ptrs);
538 if (found) 539 if (found)
539 goto end; 540 goto end;
540 } 541 }
@@ -585,8 +586,8 @@ int tracepoint_module_notify(struct notifier_block *self,
585 switch (val) { 586 switch (val) {
586 case MODULE_STATE_COMING: 587 case MODULE_STATE_COMING:
587 case MODULE_STATE_GOING: 588 case MODULE_STATE_GOING:
588 tracepoint_update_probe_range(mod->tracepoints, 589 tracepoint_update_probe_range(mod->tracepoints_ptrs,
589 mod->tracepoints + mod->num_tracepoints); 590 mod->tracepoints_ptrs + mod->num_tracepoints);
590 break; 591 break;
591 } 592 }
592 return 0; 593 return 0;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d7ebdf4cea98..f37f974aa81b 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -27,7 +27,7 @@
27#include <asm/irq_regs.h> 27#include <asm/irq_regs.h>
28#include <linux/perf_event.h> 28#include <linux/perf_event.h>
29 29
30int watchdog_enabled; 30int watchdog_enabled = 1;
31int __read_mostly softlockup_thresh = 60; 31int __read_mostly softlockup_thresh = 60;
32 32
33static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); 33static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -43,9 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
43static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 43static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
44#endif 44#endif
45 45
46static int no_watchdog;
47
48
49/* boot commands */ 46/* boot commands */
50/* 47/*
51 * Should we panic when a soft-lockup or hard-lockup occurs: 48 * Should we panic when a soft-lockup or hard-lockup occurs:
@@ -58,7 +55,7 @@ static int __init hardlockup_panic_setup(char *str)
58 if (!strncmp(str, "panic", 5)) 55 if (!strncmp(str, "panic", 5))
59 hardlockup_panic = 1; 56 hardlockup_panic = 1;
60 else if (!strncmp(str, "0", 1)) 57 else if (!strncmp(str, "0", 1))
61 no_watchdog = 1; 58 watchdog_enabled = 0;
62 return 1; 59 return 1;
63} 60}
64__setup("nmi_watchdog=", hardlockup_panic_setup); 61__setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -77,7 +74,7 @@ __setup("softlockup_panic=", softlockup_panic_setup);
77 74
78static int __init nowatchdog_setup(char *str) 75static int __init nowatchdog_setup(char *str)
79{ 76{
80 no_watchdog = 1; 77 watchdog_enabled = 0;
81 return 1; 78 return 1;
82} 79}
83__setup("nowatchdog", nowatchdog_setup); 80__setup("nowatchdog", nowatchdog_setup);
@@ -85,7 +82,7 @@ __setup("nowatchdog", nowatchdog_setup);
85/* deprecated */ 82/* deprecated */
86static int __init nosoftlockup_setup(char *str) 83static int __init nosoftlockup_setup(char *str)
87{ 84{
88 no_watchdog = 1; 85 watchdog_enabled = 0;
89 return 1; 86 return 1;
90} 87}
91__setup("nosoftlockup", nosoftlockup_setup); 88__setup("nosoftlockup", nosoftlockup_setup);
@@ -432,9 +429,6 @@ static int watchdog_enable(int cpu)
432 wake_up_process(p); 429 wake_up_process(p);
433 } 430 }
434 431
435 /* if any cpu succeeds, watchdog is considered enabled for the system */
436 watchdog_enabled = 1;
437
438 return 0; 432 return 0;
439} 433}
440 434
@@ -462,12 +456,16 @@ static void watchdog_disable(int cpu)
462static void watchdog_enable_all_cpus(void) 456static void watchdog_enable_all_cpus(void)
463{ 457{
464 int cpu; 458 int cpu;
465 int result = 0; 459
460 watchdog_enabled = 0;
466 461
467 for_each_online_cpu(cpu) 462 for_each_online_cpu(cpu)
468 result += watchdog_enable(cpu); 463 if (!watchdog_enable(cpu))
464 /* if any cpu succeeds, watchdog is considered
465 enabled for the system */
466 watchdog_enabled = 1;
469 467
470 if (result) 468 if (!watchdog_enabled)
471 printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); 469 printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
472 470
473} 471}
@@ -476,9 +474,6 @@ static void watchdog_disable_all_cpus(void)
476{ 474{
477 int cpu; 475 int cpu;
478 476
479 if (no_watchdog)
480 return;
481
482 for_each_online_cpu(cpu) 477 for_each_online_cpu(cpu)
483 watchdog_disable(cpu); 478 watchdog_disable(cpu);
484 479
@@ -498,10 +493,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write,
498{ 493{
499 proc_dointvec(table, write, buffer, length, ppos); 494 proc_dointvec(table, write, buffer, length, ppos);
500 495
501 if (watchdog_enabled) 496 if (write) {
502 watchdog_enable_all_cpus(); 497 if (watchdog_enabled)
503 else 498 watchdog_enable_all_cpus();
504 watchdog_disable_all_cpus(); 499 else
500 watchdog_disable_all_cpus();
501 }
505 return 0; 502 return 0;
506} 503}
507 504
@@ -530,7 +527,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
530 break; 527 break;
531 case CPU_ONLINE: 528 case CPU_ONLINE:
532 case CPU_ONLINE_FROZEN: 529 case CPU_ONLINE_FROZEN:
533 err = watchdog_enable(hotcpu); 530 if (watchdog_enabled)
531 err = watchdog_enable(hotcpu);
534 break; 532 break;
535#ifdef CONFIG_HOTPLUG_CPU 533#ifdef CONFIG_HOTPLUG_CPU
536 case CPU_UP_CANCELED: 534 case CPU_UP_CANCELED:
@@ -555,9 +553,6 @@ void __init lockup_detector_init(void)
555 void *cpu = (void *)(long)smp_processor_id(); 553 void *cpu = (void *)(long)smp_processor_id();
556 int err; 554 int err;
557 555
558 if (no_watchdog)
559 return;
560
561 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 556 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
562 WARN_ON(notifier_to_errno(err)); 557 WARN_ON(notifier_to_errno(err));
563 558