diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-28 15:55:55 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-28 15:55:55 -0400 |
| commit | c4a227d89f758e582fd167bb15245f2704de99ef (patch) | |
| tree | f5b6e0091e6543c14d1cd7cf1f93e097a96bbd64 /kernel | |
| parent | 87367a0b71a5188e34a913c05673b5078f71a64d (diff) | |
| parent | f506b3dc0ec454a16d40cab9ee5d75435b39dc50 (diff) | |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (25 commits)
perf: Fix SIGIO handling
perf top: Don't stop if no kernel symtab is found
perf top: Handle kptr_restrict
perf top: Remove unused macro
perf events: initialize fd array to -1 instead of 0
perf tools: Make sure kptr_restrict warnings fit 80 col terms
perf tools: Fix build on older systems
perf symbols: Handle /proc/sys/kernel/kptr_restrict
perf: Remove duplicate headers
ftrace: Add internal recursive checks
tracing: Update btrfs's tracepoints to use u64 interface
tracing: Add __print_symbolic_u64 to avoid warnings on 32bit machine
ftrace: Set ops->flag to enabled even on static function tracing
tracing: Have event with function tracer check error return
ftrace: Have ftrace_startup() return failure code
jump_label: Check entries limit in __jump_label_update
ftrace/recordmcount: Avoid STT_FUNC symbols as base on ARM
scripts/tags.sh: Add magic for trace-events for etags too
scripts/tags.sh: Fix ctags for DEFINE_EVENT()
x86/ftrace: Fix compiler warning in ftrace.c
...
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/core.c | 8 | ||||
| -rw-r--r-- | kernel/jump_label.c | 18 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 31 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 10 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 15 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 7 | ||||
| -rw-r--r-- | kernel/trace/trace_output.c | 27 | ||||
| -rw-r--r-- | kernel/watchdog.c | 9 |
8 files changed, 101 insertions, 24 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index c09767f7db3e..d863b3c057bb 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -5028,6 +5028,14 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, | |||
| 5028 | else | 5028 | else |
| 5029 | perf_event_output(event, nmi, data, regs); | 5029 | perf_event_output(event, nmi, data, regs); |
| 5030 | 5030 | ||
| 5031 | if (event->fasync && event->pending_kill) { | ||
| 5032 | if (nmi) { | ||
| 5033 | event->pending_wakeup = 1; | ||
| 5034 | irq_work_queue(&event->pending); | ||
| 5035 | } else | ||
| 5036 | perf_event_wakeup(event); | ||
| 5037 | } | ||
| 5038 | |||
| 5031 | return ret; | 5039 | return ret; |
| 5032 | } | 5040 | } |
| 5033 | 5041 | ||
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 74d1c099fbd1..fa27e750dbc0 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
| @@ -105,9 +105,12 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start, | |||
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static void __jump_label_update(struct jump_label_key *key, | 107 | static void __jump_label_update(struct jump_label_key *key, |
| 108 | struct jump_entry *entry, int enable) | 108 | struct jump_entry *entry, |
| 109 | struct jump_entry *stop, int enable) | ||
| 109 | { | 110 | { |
| 110 | for (; entry->key == (jump_label_t)(unsigned long)key; entry++) { | 111 | for (; (entry < stop) && |
| 112 | (entry->key == (jump_label_t)(unsigned long)key); | ||
| 113 | entry++) { | ||
| 111 | /* | 114 | /* |
| 112 | * entry->code set to 0 invalidates module init text sections | 115 | * entry->code set to 0 invalidates module init text sections |
| 113 | * kernel_text_address() verifies we are not in core kernel | 116 | * kernel_text_address() verifies we are not in core kernel |
| @@ -181,7 +184,11 @@ static void __jump_label_mod_update(struct jump_label_key *key, int enable) | |||
| 181 | struct jump_label_mod *mod = key->next; | 184 | struct jump_label_mod *mod = key->next; |
| 182 | 185 | ||
| 183 | while (mod) { | 186 | while (mod) { |
| 184 | __jump_label_update(key, mod->entries, enable); | 187 | struct module *m = mod->mod; |
| 188 | |||
| 189 | __jump_label_update(key, mod->entries, | ||
| 190 | m->jump_entries + m->num_jump_entries, | ||
| 191 | enable); | ||
| 185 | mod = mod->next; | 192 | mod = mod->next; |
| 186 | } | 193 | } |
| 187 | } | 194 | } |
| @@ -245,7 +252,8 @@ static int jump_label_add_module(struct module *mod) | |||
| 245 | key->next = jlm; | 252 | key->next = jlm; |
| 246 | 253 | ||
| 247 | if (jump_label_enabled(key)) | 254 | if (jump_label_enabled(key)) |
| 248 | __jump_label_update(key, iter, JUMP_LABEL_ENABLE); | 255 | __jump_label_update(key, iter, iter_stop, |
| 256 | JUMP_LABEL_ENABLE); | ||
| 249 | } | 257 | } |
| 250 | 258 | ||
| 251 | return 0; | 259 | return 0; |
| @@ -371,7 +379,7 @@ static void jump_label_update(struct jump_label_key *key, int enable) | |||
| 371 | 379 | ||
| 372 | /* if there are no users, entry can be NULL */ | 380 | /* if there are no users, entry can be NULL */ |
| 373 | if (entry) | 381 | if (entry) |
| 374 | __jump_label_update(key, entry, enable); | 382 | __jump_label_update(key, entry, __stop___jump_table, enable); |
| 375 | 383 | ||
| 376 | #ifdef CONFIG_MODULES | 384 | #ifdef CONFIG_MODULES |
| 377 | __jump_label_mod_update(key, enable); | 385 | __jump_label_mod_update(key, enable); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d017c2c82c44..1ee417fcbfa5 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -109,12 +109,18 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); | |||
| 109 | static void ftrace_global_list_func(unsigned long ip, | 109 | static void ftrace_global_list_func(unsigned long ip, |
| 110 | unsigned long parent_ip) | 110 | unsigned long parent_ip) |
| 111 | { | 111 | { |
| 112 | struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/ | 112 | struct ftrace_ops *op; |
| 113 | |||
| 114 | if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) | ||
| 115 | return; | ||
| 113 | 116 | ||
| 117 | trace_recursion_set(TRACE_GLOBAL_BIT); | ||
| 118 | op = rcu_dereference_raw(ftrace_global_list); /*see above*/ | ||
| 114 | while (op != &ftrace_list_end) { | 119 | while (op != &ftrace_list_end) { |
| 115 | op->func(ip, parent_ip); | 120 | op->func(ip, parent_ip); |
| 116 | op = rcu_dereference_raw(op->next); /*see above*/ | 121 | op = rcu_dereference_raw(op->next); /*see above*/ |
| 117 | }; | 122 | }; |
| 123 | trace_recursion_clear(TRACE_GLOBAL_BIT); | ||
| 118 | } | 124 | } |
| 119 | 125 | ||
| 120 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) | 126 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) |
| @@ -1638,12 +1644,12 @@ static void ftrace_startup_enable(int command) | |||
| 1638 | ftrace_run_update_code(command); | 1644 | ftrace_run_update_code(command); |
| 1639 | } | 1645 | } |
| 1640 | 1646 | ||
| 1641 | static void ftrace_startup(struct ftrace_ops *ops, int command) | 1647 | static int ftrace_startup(struct ftrace_ops *ops, int command) |
| 1642 | { | 1648 | { |
| 1643 | bool hash_enable = true; | 1649 | bool hash_enable = true; |
| 1644 | 1650 | ||
| 1645 | if (unlikely(ftrace_disabled)) | 1651 | if (unlikely(ftrace_disabled)) |
| 1646 | return; | 1652 | return -ENODEV; |
| 1647 | 1653 | ||
| 1648 | ftrace_start_up++; | 1654 | ftrace_start_up++; |
| 1649 | command |= FTRACE_ENABLE_CALLS; | 1655 | command |= FTRACE_ENABLE_CALLS; |
| @@ -1662,6 +1668,8 @@ static void ftrace_startup(struct ftrace_ops *ops, int command) | |||
| 1662 | ftrace_hash_rec_enable(ops, 1); | 1668 | ftrace_hash_rec_enable(ops, 1); |
| 1663 | 1669 | ||
| 1664 | ftrace_startup_enable(command); | 1670 | ftrace_startup_enable(command); |
| 1671 | |||
| 1672 | return 0; | ||
| 1665 | } | 1673 | } |
| 1666 | 1674 | ||
| 1667 | static void ftrace_shutdown(struct ftrace_ops *ops, int command) | 1675 | static void ftrace_shutdown(struct ftrace_ops *ops, int command) |
| @@ -2501,7 +2509,7 @@ static void __enable_ftrace_function_probe(void) | |||
| 2501 | 2509 | ||
| 2502 | ret = __register_ftrace_function(&trace_probe_ops); | 2510 | ret = __register_ftrace_function(&trace_probe_ops); |
| 2503 | if (!ret) | 2511 | if (!ret) |
| 2504 | ftrace_startup(&trace_probe_ops, 0); | 2512 | ret = ftrace_startup(&trace_probe_ops, 0); |
| 2505 | 2513 | ||
| 2506 | ftrace_probe_registered = 1; | 2514 | ftrace_probe_registered = 1; |
| 2507 | } | 2515 | } |
| @@ -3466,7 +3474,11 @@ device_initcall(ftrace_nodyn_init); | |||
| 3466 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | 3474 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
| 3467 | static inline void ftrace_startup_enable(int command) { } | 3475 | static inline void ftrace_startup_enable(int command) { } |
| 3468 | /* Keep as macros so we do not need to define the commands */ | 3476 | /* Keep as macros so we do not need to define the commands */ |
| 3469 | # define ftrace_startup(ops, command) do { } while (0) | 3477 | # define ftrace_startup(ops, command) \ |
| 3478 | ({ \ | ||
| 3479 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ | ||
| 3480 | 0; \ | ||
| 3481 | }) | ||
| 3470 | # define ftrace_shutdown(ops, command) do { } while (0) | 3482 | # define ftrace_shutdown(ops, command) do { } while (0) |
| 3471 | # define ftrace_startup_sysctl() do { } while (0) | 3483 | # define ftrace_startup_sysctl() do { } while (0) |
| 3472 | # define ftrace_shutdown_sysctl() do { } while (0) | 3484 | # define ftrace_shutdown_sysctl() do { } while (0) |
| @@ -3484,6 +3496,10 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | |||
| 3484 | { | 3496 | { |
| 3485 | struct ftrace_ops *op; | 3497 | struct ftrace_ops *op; |
| 3486 | 3498 | ||
| 3499 | if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) | ||
| 3500 | return; | ||
| 3501 | |||
| 3502 | trace_recursion_set(TRACE_INTERNAL_BIT); | ||
| 3487 | /* | 3503 | /* |
| 3488 | * Some of the ops may be dynamically allocated, | 3504 | * Some of the ops may be dynamically allocated, |
| 3489 | * they must be freed after a synchronize_sched(). | 3505 | * they must be freed after a synchronize_sched(). |
| @@ -3496,6 +3512,7 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | |||
| 3496 | op = rcu_dereference_raw(op->next); | 3512 | op = rcu_dereference_raw(op->next); |
| 3497 | }; | 3513 | }; |
| 3498 | preempt_enable_notrace(); | 3514 | preempt_enable_notrace(); |
| 3515 | trace_recursion_clear(TRACE_INTERNAL_BIT); | ||
| 3499 | } | 3516 | } |
| 3500 | 3517 | ||
| 3501 | static void clear_ftrace_swapper(void) | 3518 | static void clear_ftrace_swapper(void) |
| @@ -3799,7 +3816,7 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
| 3799 | 3816 | ||
| 3800 | ret = __register_ftrace_function(ops); | 3817 | ret = __register_ftrace_function(ops); |
| 3801 | if (!ret) | 3818 | if (!ret) |
| 3802 | ftrace_startup(ops, 0); | 3819 | ret = ftrace_startup(ops, 0); |
| 3803 | 3820 | ||
| 3804 | 3821 | ||
| 3805 | out_unlock: | 3822 | out_unlock: |
| @@ -4045,7 +4062,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
| 4045 | ftrace_graph_return = retfunc; | 4062 | ftrace_graph_return = retfunc; |
| 4046 | ftrace_graph_entry = entryfunc; | 4063 | ftrace_graph_entry = entryfunc; |
| 4047 | 4064 | ||
| 4048 | ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); | 4065 | ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); |
| 4049 | 4066 | ||
| 4050 | out: | 4067 | out: |
| 4051 | mutex_unlock(&ftrace_lock); | 4068 | mutex_unlock(&ftrace_lock); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 0ef7b4b2a1f7..b0c7aa407943 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -2216,7 +2216,7 @@ static noinline void trace_recursive_fail(void) | |||
| 2216 | 2216 | ||
| 2217 | printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:" | 2217 | printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:" |
| 2218 | "HC[%lu]:SC[%lu]:NMI[%lu]\n", | 2218 | "HC[%lu]:SC[%lu]:NMI[%lu]\n", |
| 2219 | current->trace_recursion, | 2219 | trace_recursion_buffer(), |
| 2220 | hardirq_count() >> HARDIRQ_SHIFT, | 2220 | hardirq_count() >> HARDIRQ_SHIFT, |
| 2221 | softirq_count() >> SOFTIRQ_SHIFT, | 2221 | softirq_count() >> SOFTIRQ_SHIFT, |
| 2222 | in_nmi()); | 2222 | in_nmi()); |
| @@ -2226,9 +2226,9 @@ static noinline void trace_recursive_fail(void) | |||
| 2226 | 2226 | ||
| 2227 | static inline int trace_recursive_lock(void) | 2227 | static inline int trace_recursive_lock(void) |
| 2228 | { | 2228 | { |
| 2229 | current->trace_recursion++; | 2229 | trace_recursion_inc(); |
| 2230 | 2230 | ||
| 2231 | if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) | 2231 | if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH)) |
| 2232 | return 0; | 2232 | return 0; |
| 2233 | 2233 | ||
| 2234 | trace_recursive_fail(); | 2234 | trace_recursive_fail(); |
| @@ -2238,9 +2238,9 @@ static inline int trace_recursive_lock(void) | |||
| 2238 | 2238 | ||
| 2239 | static inline void trace_recursive_unlock(void) | 2239 | static inline void trace_recursive_unlock(void) |
| 2240 | { | 2240 | { |
| 2241 | WARN_ON_ONCE(!current->trace_recursion); | 2241 | WARN_ON_ONCE(!trace_recursion_buffer()); |
| 2242 | 2242 | ||
| 2243 | current->trace_recursion--; | 2243 | trace_recursion_dec(); |
| 2244 | } | 2244 | } |
| 2245 | 2245 | ||
| 2246 | #else | 2246 | #else |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6b69c4bd306f..229f8591f61d 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -784,4 +784,19 @@ extern const char *__stop___trace_bprintk_fmt[]; | |||
| 784 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) | 784 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
| 785 | #include "trace_entries.h" | 785 | #include "trace_entries.h" |
| 786 | 786 | ||
| 787 | /* Only current can touch trace_recursion */ | ||
| 788 | #define trace_recursion_inc() do { (current)->trace_recursion++; } while (0) | ||
| 789 | #define trace_recursion_dec() do { (current)->trace_recursion--; } while (0) | ||
| 790 | |||
| 791 | /* Ring buffer has the 10 LSB bits to count */ | ||
| 792 | #define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff) | ||
| 793 | |||
| 794 | /* for function tracing recursion */ | ||
| 795 | #define TRACE_INTERNAL_BIT (1<<11) | ||
| 796 | #define TRACE_GLOBAL_BIT (1<<12) | ||
| 797 | |||
| 798 | #define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0) | ||
| 799 | #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0) | ||
| 800 | #define trace_recursion_test(bit) ((current)->trace_recursion & (bit)) | ||
| 801 | |||
| 787 | #endif /* _LINUX_KERNEL_TRACE_H */ | 802 | #endif /* _LINUX_KERNEL_TRACE_H */ |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 2fe110341359..686ec399f2a8 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -1657,7 +1657,12 @@ static struct ftrace_ops trace_ops __initdata = | |||
| 1657 | 1657 | ||
| 1658 | static __init void event_trace_self_test_with_function(void) | 1658 | static __init void event_trace_self_test_with_function(void) |
| 1659 | { | 1659 | { |
| 1660 | register_ftrace_function(&trace_ops); | 1660 | int ret; |
| 1661 | ret = register_ftrace_function(&trace_ops); | ||
| 1662 | if (WARN_ON(ret < 0)) { | ||
| 1663 | pr_info("Failed to enable function tracer for event tests\n"); | ||
| 1664 | return; | ||
| 1665 | } | ||
| 1661 | pr_info("Running tests again, along with the function tracer\n"); | 1666 | pr_info("Running tests again, along with the function tracer\n"); |
| 1662 | event_trace_self_tests(); | 1667 | event_trace_self_tests(); |
| 1663 | unregister_ftrace_function(&trace_ops); | 1668 | unregister_ftrace_function(&trace_ops); |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index cf535ccedc86..e37de492a9e1 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
| @@ -353,6 +353,33 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | |||
| 353 | } | 353 | } |
| 354 | EXPORT_SYMBOL(ftrace_print_symbols_seq); | 354 | EXPORT_SYMBOL(ftrace_print_symbols_seq); |
| 355 | 355 | ||
| 356 | #if BITS_PER_LONG == 32 | ||
| 357 | const char * | ||
| 358 | ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, | ||
| 359 | const struct trace_print_flags_u64 *symbol_array) | ||
| 360 | { | ||
| 361 | int i; | ||
| 362 | const char *ret = p->buffer + p->len; | ||
| 363 | |||
| 364 | for (i = 0; symbol_array[i].name; i++) { | ||
| 365 | |||
| 366 | if (val != symbol_array[i].mask) | ||
| 367 | continue; | ||
| 368 | |||
| 369 | trace_seq_puts(p, symbol_array[i].name); | ||
| 370 | break; | ||
| 371 | } | ||
| 372 | |||
| 373 | if (!p->len) | ||
| 374 | trace_seq_printf(p, "0x%llx", val); | ||
| 375 | |||
| 376 | trace_seq_putc(p, 0); | ||
| 377 | |||
| 378 | return ret; | ||
| 379 | } | ||
| 380 | EXPORT_SYMBOL(ftrace_print_symbols_seq_u64); | ||
| 381 | #endif | ||
| 382 | |||
| 356 | const char * | 383 | const char * |
| 357 | ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) | 384 | ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) |
| 358 | { | 385 | { |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 7daa4b072e9f..3d0c56ad4792 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -415,15 +415,13 @@ static void watchdog_nmi_disable(int cpu) { return; } | |||
| 415 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ | 415 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
| 416 | 416 | ||
| 417 | /* prepare/enable/disable routines */ | 417 | /* prepare/enable/disable routines */ |
| 418 | static int watchdog_prepare_cpu(int cpu) | 418 | static void watchdog_prepare_cpu(int cpu) |
| 419 | { | 419 | { |
| 420 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); | 420 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); |
| 421 | 421 | ||
| 422 | WARN_ON(per_cpu(softlockup_watchdog, cpu)); | 422 | WARN_ON(per_cpu(softlockup_watchdog, cpu)); |
| 423 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 423 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 424 | hrtimer->function = watchdog_timer_fn; | 424 | hrtimer->function = watchdog_timer_fn; |
| 425 | |||
| 426 | return 0; | ||
| 427 | } | 425 | } |
| 428 | 426 | ||
| 429 | static int watchdog_enable(int cpu) | 427 | static int watchdog_enable(int cpu) |
| @@ -542,17 +540,16 @@ static int __cpuinit | |||
| 542 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | 540 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
| 543 | { | 541 | { |
| 544 | int hotcpu = (unsigned long)hcpu; | 542 | int hotcpu = (unsigned long)hcpu; |
| 545 | int err = 0; | ||
| 546 | 543 | ||
| 547 | switch (action) { | 544 | switch (action) { |
| 548 | case CPU_UP_PREPARE: | 545 | case CPU_UP_PREPARE: |
| 549 | case CPU_UP_PREPARE_FROZEN: | 546 | case CPU_UP_PREPARE_FROZEN: |
| 550 | err = watchdog_prepare_cpu(hotcpu); | 547 | watchdog_prepare_cpu(hotcpu); |
| 551 | break; | 548 | break; |
| 552 | case CPU_ONLINE: | 549 | case CPU_ONLINE: |
| 553 | case CPU_ONLINE_FROZEN: | 550 | case CPU_ONLINE_FROZEN: |
| 554 | if (watchdog_enabled) | 551 | if (watchdog_enabled) |
| 555 | err = watchdog_enable(hotcpu); | 552 | watchdog_enable(hotcpu); |
| 556 | break; | 553 | break; |
| 557 | #ifdef CONFIG_HOTPLUG_CPU | 554 | #ifdef CONFIG_HOTPLUG_CPU |
| 558 | case CPU_UP_CANCELED: | 555 | case CPU_UP_CANCELED: |
