diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-10-09 06:36:13 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-09 06:36:13 -0400 |
commit | 37bf06375c90a42fe07b9bebdb07bc316ae5a0ce (patch) | |
tree | de572dd6d3955b0725001776a7b03796f99e1e8e /kernel | |
parent | 6bfa687c19b7ab8adee03f0d43c197c2945dd869 (diff) | |
parent | d0e639c9e06d44e713170031fe05fb60ebe680af (diff) |
Merge tag 'v3.12-rc4' into sched/core
Merge Linux v3.12-rc4 to fix a conflict and also to refresh the tree
before applying more scheduler patches.
Conflicts:
arch/avr32/include/asm/Kbuild
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit.c | 5 | ||||
-rw-r--r-- | kernel/context_tracking.c | 12 | ||||
-rw-r--r-- | kernel/events/core.c | 21 | ||||
-rw-r--r-- | kernel/kmod.c | 4 | ||||
-rw-r--r-- | kernel/params.c | 6 | ||||
-rw-r--r-- | kernel/pid.c | 5 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 5 | ||||
-rw-r--r-- | kernel/power/user.c | 8 | ||||
-rw-r--r-- | kernel/reboot.c | 9 | ||||
-rw-r--r-- | kernel/softirq.c | 15 | ||||
-rw-r--r-- | kernel/time/ntp.c | 6 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 2 | ||||
-rw-r--r-- | kernel/watchdog.c | 60 |
13 files changed, 139 insertions, 19 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index 91e53d04b6a9..7b0e23a740ce 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, | |||
1117 | 1117 | ||
1118 | sleep_time = timeout_start + audit_backlog_wait_time - | 1118 | sleep_time = timeout_start + audit_backlog_wait_time - |
1119 | jiffies; | 1119 | jiffies; |
1120 | if ((long)sleep_time > 0) | 1120 | if ((long)sleep_time > 0) { |
1121 | wait_for_auditd(sleep_time); | 1121 | wait_for_auditd(sleep_time); |
1122 | continue; | 1122 | continue; |
1123 | } | ||
1123 | } | 1124 | } |
1124 | if (audit_rate_check() && printk_ratelimit()) | 1125 | if (audit_rate_check() && printk_ratelimit()) |
1125 | printk(KERN_WARNING | 1126 | printk(KERN_WARNING |
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 013161f1c807..e5f3917aa05b 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c | |||
@@ -51,6 +51,15 @@ void context_tracking_user_enter(void) | |||
51 | unsigned long flags; | 51 | unsigned long flags; |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Repeat the user_enter() check here because some archs may be calling | ||
55 | * this from asm and if no CPU needs context tracking, they shouldn't | ||
56 | * go further. Repeat the check here until they support the static key | ||
57 | * check. | ||
58 | */ | ||
59 | if (!static_key_false(&context_tracking_enabled)) | ||
60 | return; | ||
61 | |||
62 | /* | ||
54 | * Some contexts may involve an exception occuring in an irq, | 63 | * Some contexts may involve an exception occuring in an irq, |
55 | * leading to that nesting: | 64 | * leading to that nesting: |
56 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | 65 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() |
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void) | |||
151 | { | 160 | { |
152 | unsigned long flags; | 161 | unsigned long flags; |
153 | 162 | ||
163 | if (!static_key_false(&context_tracking_enabled)) | ||
164 | return; | ||
165 | |||
154 | if (in_interrupt()) | 166 | if (in_interrupt()) |
155 | return; | 167 | return; |
156 | 168 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index dd236b66ca3a..cb4238e85b38 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event, | |||
3660 | *running = ctx_time - event->tstamp_running; | 3660 | *running = ctx_time - event->tstamp_running; |
3661 | } | 3661 | } |
3662 | 3662 | ||
3663 | static void perf_event_init_userpage(struct perf_event *event) | ||
3664 | { | ||
3665 | struct perf_event_mmap_page *userpg; | ||
3666 | struct ring_buffer *rb; | ||
3667 | |||
3668 | rcu_read_lock(); | ||
3669 | rb = rcu_dereference(event->rb); | ||
3670 | if (!rb) | ||
3671 | goto unlock; | ||
3672 | |||
3673 | userpg = rb->user_page; | ||
3674 | |||
3675 | /* Allow new userspace to detect that bit 0 is deprecated */ | ||
3676 | userpg->cap_bit0_is_deprecated = 1; | ||
3677 | userpg->size = offsetof(struct perf_event_mmap_page, __reserved); | ||
3678 | |||
3679 | unlock: | ||
3680 | rcu_read_unlock(); | ||
3681 | } | ||
3682 | |||
3663 | void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) | 3683 | void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) |
3664 | { | 3684 | { |
3665 | } | 3685 | } |
@@ -4044,6 +4064,7 @@ again: | |||
4044 | ring_buffer_attach(event, rb); | 4064 | ring_buffer_attach(event, rb); |
4045 | rcu_assign_pointer(event->rb, rb); | 4065 | rcu_assign_pointer(event->rb, rb); |
4046 | 4066 | ||
4067 | perf_event_init_userpage(event); | ||
4047 | perf_event_update_userpage(event); | 4068 | perf_event_update_userpage(event); |
4048 | 4069 | ||
4049 | unlock: | 4070 | unlock: |
diff --git a/kernel/kmod.c b/kernel/kmod.c index fb326365b694..b086006c59e7 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) | |||
571 | DECLARE_COMPLETION_ONSTACK(done); | 571 | DECLARE_COMPLETION_ONSTACK(done); |
572 | int retval = 0; | 572 | int retval = 0; |
573 | 573 | ||
574 | if (!sub_info->path) { | ||
575 | call_usermodehelper_freeinfo(sub_info); | ||
576 | return -EINVAL; | ||
577 | } | ||
574 | helper_lock(); | 578 | helper_lock(); |
575 | if (!khelper_wq || usermodehelper_disabled) { | 579 | if (!khelper_wq || usermodehelper_disabled) { |
576 | retval = -EBUSY; | 580 | retval = -EBUSY; |
diff --git a/kernel/params.c b/kernel/params.c index 81c4e78c8f4c..c00d5b502aa4 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -254,11 +254,11 @@ int parse_args(const char *doing, | |||
254 | 254 | ||
255 | 255 | ||
256 | STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul); | 256 | STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul); |
257 | STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul); | 257 | STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol); |
258 | STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul); | 258 | STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul); |
259 | STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul); | 259 | STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol); |
260 | STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul); | 260 | STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul); |
261 | STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul); | 261 | STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol); |
262 | STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul); | 262 | STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul); |
263 | 263 | ||
264 | int param_set_charp(const char *val, const struct kernel_param *kp) | 264 | int param_set_charp(const char *val, const struct kernel_param *kp) |
diff --git a/kernel/pid.c b/kernel/pid.c index ebe5e80b10f8..9b9a26698144 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -273,6 +273,11 @@ void free_pid(struct pid *pid) | |||
273 | */ | 273 | */ |
274 | wake_up_process(ns->child_reaper); | 274 | wake_up_process(ns->child_reaper); |
275 | break; | 275 | break; |
276 | case PIDNS_HASH_ADDING: | ||
277 | /* Handle a fork failure of the first process */ | ||
278 | WARN_ON(ns->child_reaper); | ||
279 | ns->nr_hashed = 0; | ||
280 | /* fall through */ | ||
276 | case 0: | 281 | case 0: |
277 | schedule_work(&ns->proc_work); | 282 | schedule_work(&ns->proc_work); |
278 | break; | 283 | break; |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 358a146fd4da..98c3b34a4cff 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -743,7 +743,10 @@ int create_basic_memory_bitmaps(void) | |||
743 | struct memory_bitmap *bm1, *bm2; | 743 | struct memory_bitmap *bm1, *bm2; |
744 | int error = 0; | 744 | int error = 0; |
745 | 745 | ||
746 | BUG_ON(forbidden_pages_map || free_pages_map); | 746 | if (forbidden_pages_map && free_pages_map) |
747 | return 0; | ||
748 | else | ||
749 | BUG_ON(forbidden_pages_map || free_pages_map); | ||
747 | 750 | ||
748 | bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); | 751 | bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); |
749 | if (!bm1) | 752 | if (!bm1) |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 72e8f4fd616d..957f06164ad1 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -39,6 +39,7 @@ static struct snapshot_data { | |||
39 | char frozen; | 39 | char frozen; |
40 | char ready; | 40 | char ready; |
41 | char platform_support; | 41 | char platform_support; |
42 | bool free_bitmaps; | ||
42 | } snapshot_state; | 43 | } snapshot_state; |
43 | 44 | ||
44 | atomic_t snapshot_device_available = ATOMIC_INIT(1); | 45 | atomic_t snapshot_device_available = ATOMIC_INIT(1); |
@@ -82,6 +83,10 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
82 | data->swap = -1; | 83 | data->swap = -1; |
83 | data->mode = O_WRONLY; | 84 | data->mode = O_WRONLY; |
84 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); | 85 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); |
86 | if (!error) { | ||
87 | error = create_basic_memory_bitmaps(); | ||
88 | data->free_bitmaps = !error; | ||
89 | } | ||
85 | if (error) | 90 | if (error) |
86 | pm_notifier_call_chain(PM_POST_RESTORE); | 91 | pm_notifier_call_chain(PM_POST_RESTORE); |
87 | } | 92 | } |
@@ -111,6 +116,8 @@ static int snapshot_release(struct inode *inode, struct file *filp) | |||
111 | pm_restore_gfp_mask(); | 116 | pm_restore_gfp_mask(); |
112 | free_basic_memory_bitmaps(); | 117 | free_basic_memory_bitmaps(); |
113 | thaw_processes(); | 118 | thaw_processes(); |
119 | } else if (data->free_bitmaps) { | ||
120 | free_basic_memory_bitmaps(); | ||
114 | } | 121 | } |
115 | pm_notifier_call_chain(data->mode == O_RDONLY ? | 122 | pm_notifier_call_chain(data->mode == O_RDONLY ? |
116 | PM_POST_HIBERNATION : PM_POST_RESTORE); | 123 | PM_POST_HIBERNATION : PM_POST_RESTORE); |
@@ -231,6 +238,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
231 | break; | 238 | break; |
232 | pm_restore_gfp_mask(); | 239 | pm_restore_gfp_mask(); |
233 | free_basic_memory_bitmaps(); | 240 | free_basic_memory_bitmaps(); |
241 | data->free_bitmaps = false; | ||
234 | thaw_processes(); | 242 | thaw_processes(); |
235 | data->frozen = 0; | 243 | data->frozen = 0; |
236 | break; | 244 | break; |
diff --git a/kernel/reboot.c b/kernel/reboot.c index 269ed9384cc4..f813b3474646 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c | |||
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid); | |||
32 | #endif | 32 | #endif |
33 | enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; | 33 | enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; |
34 | 34 | ||
35 | int reboot_default; | 35 | /* |
36 | * This variable is used privately to keep track of whether or not | ||
37 | * reboot_type is still set to its default value (i.e., reboot= hasn't | ||
38 | * been set on the command line). This is needed so that we can | ||
39 | * suppress DMI scanning for reboot quirks. Without it, it's | ||
40 | * impossible to override a faulty reboot quirk without recompiling. | ||
41 | */ | ||
42 | int reboot_default = 1; | ||
36 | int reboot_cpu; | 43 | int reboot_cpu; |
37 | enum reboot_type reboot_type = BOOT_ACPI; | 44 | enum reboot_type reboot_type = BOOT_ACPI; |
38 | int reboot_force; | 45 | int reboot_force; |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 3e88612fc87e..dcab1d3fb53d 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -328,10 +328,19 @@ void irq_enter(void) | |||
328 | 328 | ||
329 | static inline void invoke_softirq(void) | 329 | static inline void invoke_softirq(void) |
330 | { | 330 | { |
331 | if (!force_irqthreads) | 331 | if (!force_irqthreads) { |
332 | __do_softirq(); | 332 | /* |
333 | else | 333 | * We can safely execute softirq on the current stack if |
334 | * it is the irq stack, because it should be near empty | ||
335 | * at this stage. But we have no way to know if the arch | ||
336 | * calls irq_exit() on the irq stack. So call softirq | ||
337 | * in its own stack to prevent from any overrun on top | ||
338 | * of a potentially deep task stack. | ||
339 | */ | ||
340 | do_softirq(); | ||
341 | } else { | ||
334 | wakeup_softirqd(); | 342 | wakeup_softirqd(); |
343 | } | ||
335 | } | 344 | } |
336 | 345 | ||
337 | static inline void tick_irq_exit(void) | 346 | static inline void tick_irq_exit(void) |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 8f5b3b98577b..bb2215174f05 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -516,13 +516,13 @@ static void sync_cmos_clock(struct work_struct *work) | |||
516 | schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); | 516 | schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); |
517 | } | 517 | } |
518 | 518 | ||
519 | static void notify_cmos_timer(void) | 519 | void ntp_notify_cmos_timer(void) |
520 | { | 520 | { |
521 | schedule_delayed_work(&sync_cmos_work, 0); | 521 | schedule_delayed_work(&sync_cmos_work, 0); |
522 | } | 522 | } |
523 | 523 | ||
524 | #else | 524 | #else |
525 | static inline void notify_cmos_timer(void) { } | 525 | void ntp_notify_cmos_timer(void) { } |
526 | #endif | 526 | #endif |
527 | 527 | ||
528 | 528 | ||
@@ -687,8 +687,6 @@ int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai) | |||
687 | if (!(time_status & STA_NANO)) | 687 | if (!(time_status & STA_NANO)) |
688 | txc->time.tv_usec /= NSEC_PER_USEC; | 688 | txc->time.tv_usec /= NSEC_PER_USEC; |
689 | 689 | ||
690 | notify_cmos_timer(); | ||
691 | |||
692 | return result; | 690 | return result; |
693 | } | 691 | } |
694 | 692 | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 48b9fffabdc2..947ba25a95a0 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -1703,6 +1703,8 @@ int do_adjtimex(struct timex *txc) | |||
1703 | write_seqcount_end(&timekeeper_seq); | 1703 | write_seqcount_end(&timekeeper_seq); |
1704 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1704 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1705 | 1705 | ||
1706 | ntp_notify_cmos_timer(); | ||
1707 | |||
1706 | return ret; | 1708 | return ret; |
1707 | } | 1709 | } |
1708 | 1710 | ||
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 51c4f34d258e..4431610f049a 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = { | |||
486 | .unpark = watchdog_enable, | 486 | .unpark = watchdog_enable, |
487 | }; | 487 | }; |
488 | 488 | ||
489 | static int watchdog_enable_all_cpus(void) | 489 | static void restart_watchdog_hrtimer(void *info) |
490 | { | ||
491 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); | ||
492 | int ret; | ||
493 | |||
494 | /* | ||
495 | * No need to cancel and restart hrtimer if it is currently executing | ||
496 | * because it will reprogram itself with the new period now. | ||
497 | * We should never see it unqueued here because we are running per-cpu | ||
498 | * with interrupts disabled. | ||
499 | */ | ||
500 | ret = hrtimer_try_to_cancel(hrtimer); | ||
501 | if (ret == 1) | ||
502 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), | ||
503 | HRTIMER_MODE_REL_PINNED); | ||
504 | } | ||
505 | |||
506 | static void update_timers(int cpu) | ||
507 | { | ||
508 | struct call_single_data data = {.func = restart_watchdog_hrtimer}; | ||
509 | /* | ||
510 | * Make sure that perf event counter will adopt to a new | ||
511 | * sampling period. Updating the sampling period directly would | ||
512 | * be much nicer but we do not have an API for that now so | ||
513 | * let's use a big hammer. | ||
514 | * Hrtimer will adopt the new period on the next tick but this | ||
515 | * might be late already so we have to restart the timer as well. | ||
516 | */ | ||
517 | watchdog_nmi_disable(cpu); | ||
518 | __smp_call_function_single(cpu, &data, 1); | ||
519 | watchdog_nmi_enable(cpu); | ||
520 | } | ||
521 | |||
522 | static void update_timers_all_cpus(void) | ||
523 | { | ||
524 | int cpu; | ||
525 | |||
526 | get_online_cpus(); | ||
527 | preempt_disable(); | ||
528 | for_each_online_cpu(cpu) | ||
529 | update_timers(cpu); | ||
530 | preempt_enable(); | ||
531 | put_online_cpus(); | ||
532 | } | ||
533 | |||
534 | static int watchdog_enable_all_cpus(bool sample_period_changed) | ||
490 | { | 535 | { |
491 | int err = 0; | 536 | int err = 0; |
492 | 537 | ||
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void) | |||
496 | pr_err("Failed to create watchdog threads, disabled\n"); | 541 | pr_err("Failed to create watchdog threads, disabled\n"); |
497 | else | 542 | else |
498 | watchdog_running = 1; | 543 | watchdog_running = 1; |
544 | } else if (sample_period_changed) { | ||
545 | update_timers_all_cpus(); | ||
499 | } | 546 | } |
500 | 547 | ||
501 | return err; | 548 | return err; |
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write, | |||
520 | void __user *buffer, size_t *lenp, loff_t *ppos) | 567 | void __user *buffer, size_t *lenp, loff_t *ppos) |
521 | { | 568 | { |
522 | int err, old_thresh, old_enabled; | 569 | int err, old_thresh, old_enabled; |
570 | static DEFINE_MUTEX(watchdog_proc_mutex); | ||
523 | 571 | ||
572 | mutex_lock(&watchdog_proc_mutex); | ||
524 | old_thresh = ACCESS_ONCE(watchdog_thresh); | 573 | old_thresh = ACCESS_ONCE(watchdog_thresh); |
525 | old_enabled = ACCESS_ONCE(watchdog_user_enabled); | 574 | old_enabled = ACCESS_ONCE(watchdog_user_enabled); |
526 | 575 | ||
527 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 576 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
528 | if (err || !write) | 577 | if (err || !write) |
529 | return err; | 578 | goto out; |
530 | 579 | ||
531 | set_sample_period(); | 580 | set_sample_period(); |
532 | /* | 581 | /* |
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write, | |||
535 | * watchdog_*_all_cpus() function takes care of this. | 584 | * watchdog_*_all_cpus() function takes care of this. |
536 | */ | 585 | */ |
537 | if (watchdog_user_enabled && watchdog_thresh) | 586 | if (watchdog_user_enabled && watchdog_thresh) |
538 | err = watchdog_enable_all_cpus(); | 587 | err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh); |
539 | else | 588 | else |
540 | watchdog_disable_all_cpus(); | 589 | watchdog_disable_all_cpus(); |
541 | 590 | ||
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write, | |||
544 | watchdog_thresh = old_thresh; | 593 | watchdog_thresh = old_thresh; |
545 | watchdog_user_enabled = old_enabled; | 594 | watchdog_user_enabled = old_enabled; |
546 | } | 595 | } |
547 | 596 | out: | |
597 | mutex_unlock(&watchdog_proc_mutex); | ||
548 | return err; | 598 | return err; |
549 | } | 599 | } |
550 | #endif /* CONFIG_SYSCTL */ | 600 | #endif /* CONFIG_SYSCTL */ |
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void) | |||
554 | set_sample_period(); | 604 | set_sample_period(); |
555 | 605 | ||
556 | if (watchdog_user_enabled) | 606 | if (watchdog_user_enabled) |
557 | watchdog_enable_all_cpus(); | 607 | watchdog_enable_all_cpus(false); |
558 | } | 608 | } |