aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c5
-rw-r--r--kernel/context_tracking.c12
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/params.c6
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/power/snapshot.c5
-rw-r--r--kernel/power/user.c8
-rw-r--r--kernel/reboot.c9
-rw-r--r--kernel/sched/debug.c6
-rw-r--r--kernel/sched/fair.c23
-rw-r--r--kernel/sched/stats.h5
-rw-r--r--kernel/softirq.c15
-rw-r--r--kernel/time/ntp.c6
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/trace/ftrace.c140
-rw-r--r--kernel/trace/trace.c7
-rw-r--r--kernel/trace/trace.h25
-rw-r--r--kernel/trace/trace_functions_graph.c56
-rw-r--r--kernel/trace/trace_stat.c41
-rw-r--r--kernel/watchdog.c60
21 files changed, 349 insertions, 112 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 91e53d04b6a9..7b0e23a740ce 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1117 1117
1118 sleep_time = timeout_start + audit_backlog_wait_time - 1118 sleep_time = timeout_start + audit_backlog_wait_time -
1119 jiffies; 1119 jiffies;
1120 if ((long)sleep_time > 0) 1120 if ((long)sleep_time > 0) {
1121 wait_for_auditd(sleep_time); 1121 wait_for_auditd(sleep_time);
1122 continue; 1122 continue;
1123 }
1123 } 1124 }
1124 if (audit_rate_check() && printk_ratelimit()) 1125 if (audit_rate_check() && printk_ratelimit())
1125 printk(KERN_WARNING 1126 printk(KERN_WARNING
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 247091bf0587..859c8dfd78a1 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -51,6 +51,15 @@ void context_tracking_user_enter(void)
51 unsigned long flags; 51 unsigned long flags;
52 52
53 /* 53 /*
54 * Repeat the user_enter() check here because some archs may be calling
55 * this from asm and if no CPU needs context tracking, they shouldn't
56 * go further. Repeat the check here until they support the static key
57 * check.
58 */
59 if (!static_key_false(&context_tracking_enabled))
60 return;
61
62 /*
54 * Some contexts may involve an exception occuring in an irq, 63 * Some contexts may involve an exception occuring in an irq,
55 * leading to that nesting: 64 * leading to that nesting:
56 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() 65 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void)
151{ 160{
152 unsigned long flags; 161 unsigned long flags;
153 162
163 if (!static_key_false(&context_tracking_enabled))
164 return;
165
154 if (in_interrupt()) 166 if (in_interrupt())
155 return; 167 return;
156 168
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dd236b66ca3a..cb4238e85b38 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
3660 *running = ctx_time - event->tstamp_running; 3660 *running = ctx_time - event->tstamp_running;
3661} 3661}
3662 3662
3663static void perf_event_init_userpage(struct perf_event *event)
3664{
3665 struct perf_event_mmap_page *userpg;
3666 struct ring_buffer *rb;
3667
3668 rcu_read_lock();
3669 rb = rcu_dereference(event->rb);
3670 if (!rb)
3671 goto unlock;
3672
3673 userpg = rb->user_page;
3674
3675 /* Allow new userspace to detect that bit 0 is deprecated */
3676 userpg->cap_bit0_is_deprecated = 1;
3677 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
3678
3679unlock:
3680 rcu_read_unlock();
3681}
3682
3663void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 3683void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3664{ 3684{
3665} 3685}
@@ -4044,6 +4064,7 @@ again:
4044 ring_buffer_attach(event, rb); 4064 ring_buffer_attach(event, rb);
4045 rcu_assign_pointer(event->rb, rb); 4065 rcu_assign_pointer(event->rb, rb);
4046 4066
4067 perf_event_init_userpage(event);
4047 perf_event_update_userpage(event); 4068 perf_event_update_userpage(event);
4048 4069
4049unlock: 4070unlock:
diff --git a/kernel/kmod.c b/kernel/kmod.c
index fb326365b694..b086006c59e7 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
571 DECLARE_COMPLETION_ONSTACK(done); 571 DECLARE_COMPLETION_ONSTACK(done);
572 int retval = 0; 572 int retval = 0;
573 573
574 if (!sub_info->path) {
575 call_usermodehelper_freeinfo(sub_info);
576 return -EINVAL;
577 }
574 helper_lock(); 578 helper_lock();
575 if (!khelper_wq || usermodehelper_disabled) { 579 if (!khelper_wq || usermodehelper_disabled) {
576 retval = -EBUSY; 580 retval = -EBUSY;
diff --git a/kernel/params.c b/kernel/params.c
index 81c4e78c8f4c..c00d5b502aa4 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -254,11 +254,11 @@ int parse_args(const char *doing,
254 254
255 255
256STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul); 256STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
257STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul); 257STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
258STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul); 258STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
259STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul); 259STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
260STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul); 260STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
261STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul); 261STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
262STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul); 262STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
263 263
264int param_set_charp(const char *val, const struct kernel_param *kp) 264int param_set_charp(const char *val, const struct kernel_param *kp)
diff --git a/kernel/pid.c b/kernel/pid.c
index ebe5e80b10f8..9b9a26698144 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -273,6 +273,11 @@ void free_pid(struct pid *pid)
273 */ 273 */
274 wake_up_process(ns->child_reaper); 274 wake_up_process(ns->child_reaper);
275 break; 275 break;
276 case PIDNS_HASH_ADDING:
277 /* Handle a fork failure of the first process */
278 WARN_ON(ns->child_reaper);
279 ns->nr_hashed = 0;
280 /* fall through */
276 case 0: 281 case 0:
277 schedule_work(&ns->proc_work); 282 schedule_work(&ns->proc_work);
278 break; 283 break;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 358a146fd4da..98c3b34a4cff 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -743,7 +743,10 @@ int create_basic_memory_bitmaps(void)
743 struct memory_bitmap *bm1, *bm2; 743 struct memory_bitmap *bm1, *bm2;
744 int error = 0; 744 int error = 0;
745 745
746 BUG_ON(forbidden_pages_map || free_pages_map); 746 if (forbidden_pages_map && free_pages_map)
747 return 0;
748 else
749 BUG_ON(forbidden_pages_map || free_pages_map);
747 750
748 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 751 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
749 if (!bm1) 752 if (!bm1)
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 72e8f4fd616d..957f06164ad1 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -39,6 +39,7 @@ static struct snapshot_data {
39 char frozen; 39 char frozen;
40 char ready; 40 char ready;
41 char platform_support; 41 char platform_support;
42 bool free_bitmaps;
42} snapshot_state; 43} snapshot_state;
43 44
44atomic_t snapshot_device_available = ATOMIC_INIT(1); 45atomic_t snapshot_device_available = ATOMIC_INIT(1);
@@ -82,6 +83,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
82 data->swap = -1; 83 data->swap = -1;
83 data->mode = O_WRONLY; 84 data->mode = O_WRONLY;
84 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 85 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
86 if (!error) {
87 error = create_basic_memory_bitmaps();
88 data->free_bitmaps = !error;
89 }
85 if (error) 90 if (error)
86 pm_notifier_call_chain(PM_POST_RESTORE); 91 pm_notifier_call_chain(PM_POST_RESTORE);
87 } 92 }
@@ -111,6 +116,8 @@ static int snapshot_release(struct inode *inode, struct file *filp)
111 pm_restore_gfp_mask(); 116 pm_restore_gfp_mask();
112 free_basic_memory_bitmaps(); 117 free_basic_memory_bitmaps();
113 thaw_processes(); 118 thaw_processes();
119 } else if (data->free_bitmaps) {
120 free_basic_memory_bitmaps();
114 } 121 }
115 pm_notifier_call_chain(data->mode == O_RDONLY ? 122 pm_notifier_call_chain(data->mode == O_RDONLY ?
116 PM_POST_HIBERNATION : PM_POST_RESTORE); 123 PM_POST_HIBERNATION : PM_POST_RESTORE);
@@ -231,6 +238,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
231 break; 238 break;
232 pm_restore_gfp_mask(); 239 pm_restore_gfp_mask();
233 free_basic_memory_bitmaps(); 240 free_basic_memory_bitmaps();
241 data->free_bitmaps = false;
234 thaw_processes(); 242 thaw_processes();
235 data->frozen = 0; 243 data->frozen = 0;
236 break; 244 break;
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 269ed9384cc4..f813b3474646 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
32#endif 32#endif
33enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; 33enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
34 34
35int reboot_default; 35/*
36 * This variable is used privately to keep track of whether or not
37 * reboot_type is still set to its default value (i.e., reboot= hasn't
38 * been set on the command line). This is needed so that we can
39 * suppress DMI scanning for reboot quirks. Without it, it's
40 * impossible to override a faulty reboot quirk without recompiling.
41 */
42int reboot_default = 1;
36int reboot_cpu; 43int reboot_cpu;
37enum reboot_type reboot_type = BOOT_ACPI; 44enum reboot_type reboot_type = BOOT_ACPI;
38int reboot_force; 45int reboot_force;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index e076bddd4c66..196559994f7c 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -124,7 +124,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
124 SEQ_printf(m, " "); 124 SEQ_printf(m, " ");
125 125
126 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", 126 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
127 p->comm, p->pid, 127 p->comm, task_pid_nr(p),
128 SPLIT_NS(p->se.vruntime), 128 SPLIT_NS(p->se.vruntime),
129 (long long)(p->nvcsw + p->nivcsw), 129 (long long)(p->nvcsw + p->nivcsw),
130 p->prio); 130 p->prio);
@@ -289,7 +289,7 @@ do { \
289 P(nr_load_updates); 289 P(nr_load_updates);
290 P(nr_uninterruptible); 290 P(nr_uninterruptible);
291 PN(next_balance); 291 PN(next_balance);
292 P(curr->pid); 292 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
293 PN(clock); 293 PN(clock);
294 P(cpu_load[0]); 294 P(cpu_load[0]);
295 P(cpu_load[1]); 295 P(cpu_load[1]);
@@ -492,7 +492,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
492{ 492{
493 unsigned long nr_switches; 493 unsigned long nr_switches;
494 494
495 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, 495 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
496 get_nr_threads(p)); 496 get_nr_threads(p));
497 SEQ_printf(m, 497 SEQ_printf(m,
498 "---------------------------------------------------------" 498 "---------------------------------------------------------"
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9b3fe1cd8f40..7c70201fbc61 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
4242 } 4242 }
4243 4243
4244 if (!se) { 4244 if (!se) {
4245 cfs_rq->h_load = rq->avg.load_avg_contrib; 4245 cfs_rq->h_load = cfs_rq->runnable_load_avg;
4246 cfs_rq->last_h_load_update = now; 4246 cfs_rq->last_h_load_update = now;
4247 } 4247 }
4248 4248
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4823 (busiest->load_per_task * SCHED_POWER_SCALE) / 4823 (busiest->load_per_task * SCHED_POWER_SCALE) /
4824 busiest->group_power; 4824 busiest->group_power;
4825 4825
4826 if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >= 4826 if (busiest->avg_load + scaled_busy_load_per_task >=
4827 (scaled_busy_load_per_task * imbn)) { 4827 local->avg_load + (scaled_busy_load_per_task * imbn)) {
4828 env->imbalance = busiest->load_per_task; 4828 env->imbalance = busiest->load_per_task;
4829 return; 4829 return;
4830 } 4830 }
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4896 * max load less than avg load(as we skip the groups at or below 4896 * max load less than avg load(as we skip the groups at or below
4897 * its cpu_power, while calculating max_load..) 4897 * its cpu_power, while calculating max_load..)
4898 */ 4898 */
4899 if (busiest->avg_load < sds->avg_load) { 4899 if (busiest->avg_load <= sds->avg_load ||
4900 local->avg_load >= sds->avg_load) {
4900 env->imbalance = 0; 4901 env->imbalance = 0;
4901 return fix_small_imbalance(env, sds); 4902 return fix_small_imbalance(env, sds);
4902 } 4903 }
@@ -5928,11 +5929,15 @@ static void task_fork_fair(struct task_struct *p)
5928 cfs_rq = task_cfs_rq(current); 5929 cfs_rq = task_cfs_rq(current);
5929 curr = cfs_rq->curr; 5930 curr = cfs_rq->curr;
5930 5931
5931 if (unlikely(task_cpu(p) != this_cpu)) { 5932 /*
5932 rcu_read_lock(); 5933 * Not only the cpu but also the task_group of the parent might have
5933 __set_task_cpu(p, this_cpu); 5934 * been changed after parent->se.parent,cfs_rq were copied to
5934 rcu_read_unlock(); 5935 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
5935 } 5936 * of child point to valid ones.
5937 */
5938 rcu_read_lock();
5939 __set_task_cpu(p, this_cpu);
5940 rcu_read_unlock();
5936 5941
5937 update_curr(cfs_rq); 5942 update_curr(cfs_rq);
5938 5943
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 5aef494fc8b4..c7edee71bce8 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -104,8 +104,9 @@ static inline void sched_info_queued(struct task_struct *t)
104} 104}
105 105
106/* 106/*
107 * Called when a process ceases being the active-running process, either 107 * Called when a process ceases being the active-running process involuntarily
108 * voluntarily or involuntarily. Now we can calculate how long we ran. 108 * due, typically, to expiring its time slice (this may also be called when
109 * switching to the idle task). Now we can calculate how long we ran.
109 * Also, if the process is still in the TASK_RUNNING state, call 110 * Also, if the process is still in the TASK_RUNNING state, call
110 * sched_info_queued() to mark that it has now again started waiting on 111 * sched_info_queued() to mark that it has now again started waiting on
111 * the runqueue. 112 * the runqueue.
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 53cc09ceb0b8..d7d498d8cc4f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -328,10 +328,19 @@ void irq_enter(void)
328 328
329static inline void invoke_softirq(void) 329static inline void invoke_softirq(void)
330{ 330{
331 if (!force_irqthreads) 331 if (!force_irqthreads) {
332 __do_softirq(); 332 /*
333 else 333 * We can safely execute softirq on the current stack if
334 * it is the irq stack, because it should be near empty
335 * at this stage. But we have no way to know if the arch
336 * calls irq_exit() on the irq stack. So call softirq
337 * in its own stack to prevent from any overrun on top
338 * of a potentially deep task stack.
339 */
340 do_softirq();
341 } else {
334 wakeup_softirqd(); 342 wakeup_softirqd();
343 }
335} 344}
336 345
337static inline void tick_irq_exit(void) 346static inline void tick_irq_exit(void)
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 8f5b3b98577b..bb2215174f05 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -516,13 +516,13 @@ static void sync_cmos_clock(struct work_struct *work)
516 schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); 516 schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
517} 517}
518 518
519static void notify_cmos_timer(void) 519void ntp_notify_cmos_timer(void)
520{ 520{
521 schedule_delayed_work(&sync_cmos_work, 0); 521 schedule_delayed_work(&sync_cmos_work, 0);
522} 522}
523 523
524#else 524#else
525static inline void notify_cmos_timer(void) { } 525void ntp_notify_cmos_timer(void) { }
526#endif 526#endif
527 527
528 528
@@ -687,8 +687,6 @@ int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai)
687 if (!(time_status & STA_NANO)) 687 if (!(time_status & STA_NANO))
688 txc->time.tv_usec /= NSEC_PER_USEC; 688 txc->time.tv_usec /= NSEC_PER_USEC;
689 689
690 notify_cmos_timer();
691
692 return result; 690 return result;
693} 691}
694 692
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 48b9fffabdc2..947ba25a95a0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1703,6 +1703,8 @@ int do_adjtimex(struct timex *txc)
1703 write_seqcount_end(&timekeeper_seq); 1703 write_seqcount_end(&timekeeper_seq);
1704 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1704 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1705 1705
1706 ntp_notify_cmos_timer();
1707
1706 return ret; 1708 return ret;
1707} 1709}
1708 1710
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 03cf44ac54d3..44e826a79665 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3641,7 +3641,7 @@ __setup("ftrace_filter=", set_ftrace_filter);
3641 3641
3642#ifdef CONFIG_FUNCTION_GRAPH_TRACER 3642#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3643static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 3643static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3644static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); 3644static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
3645 3645
3646static int __init set_graph_function(char *str) 3646static int __init set_graph_function(char *str)
3647{ 3647{
@@ -3659,7 +3659,7 @@ static void __init set_ftrace_early_graph(char *buf)
3659 func = strsep(&buf, ","); 3659 func = strsep(&buf, ",");
3660 /* we allow only one expression at a time */ 3660 /* we allow only one expression at a time */
3661 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, 3661 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3662 func); 3662 FTRACE_GRAPH_MAX_FUNCS, func);
3663 if (ret) 3663 if (ret)
3664 printk(KERN_DEBUG "ftrace: function %s not " 3664 printk(KERN_DEBUG "ftrace: function %s not "
3665 "traceable\n", func); 3665 "traceable\n", func);
@@ -3776,15 +3776,25 @@ static const struct file_operations ftrace_notrace_fops = {
3776static DEFINE_MUTEX(graph_lock); 3776static DEFINE_MUTEX(graph_lock);
3777 3777
3778int ftrace_graph_count; 3778int ftrace_graph_count;
3779int ftrace_graph_filter_enabled; 3779int ftrace_graph_notrace_count;
3780unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 3780unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3781unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3782
3783struct ftrace_graph_data {
3784 unsigned long *table;
3785 size_t size;
3786 int *count;
3787 const struct seq_operations *seq_ops;
3788};
3781 3789
3782static void * 3790static void *
3783__g_next(struct seq_file *m, loff_t *pos) 3791__g_next(struct seq_file *m, loff_t *pos)
3784{ 3792{
3785 if (*pos >= ftrace_graph_count) 3793 struct ftrace_graph_data *fgd = m->private;
3794
3795 if (*pos >= *fgd->count)
3786 return NULL; 3796 return NULL;
3787 return &ftrace_graph_funcs[*pos]; 3797 return &fgd->table[*pos];
3788} 3798}
3789 3799
3790static void * 3800static void *
@@ -3796,10 +3806,12 @@ g_next(struct seq_file *m, void *v, loff_t *pos)
3796 3806
3797static void *g_start(struct seq_file *m, loff_t *pos) 3807static void *g_start(struct seq_file *m, loff_t *pos)
3798{ 3808{
3809 struct ftrace_graph_data *fgd = m->private;
3810
3799 mutex_lock(&graph_lock); 3811 mutex_lock(&graph_lock);
3800 3812
3801 /* Nothing, tell g_show to print all functions are enabled */ 3813 /* Nothing, tell g_show to print all functions are enabled */
3802 if (!ftrace_graph_filter_enabled && !*pos) 3814 if (!*fgd->count && !*pos)
3803 return (void *)1; 3815 return (void *)1;
3804 3816
3805 return __g_next(m, pos); 3817 return __g_next(m, pos);
@@ -3835,38 +3847,88 @@ static const struct seq_operations ftrace_graph_seq_ops = {
3835}; 3847};
3836 3848
3837static int 3849static int
3838ftrace_graph_open(struct inode *inode, struct file *file) 3850__ftrace_graph_open(struct inode *inode, struct file *file,
3851 struct ftrace_graph_data *fgd)
3839{ 3852{
3840 int ret = 0; 3853 int ret = 0;
3841 3854
3842 if (unlikely(ftrace_disabled))
3843 return -ENODEV;
3844
3845 mutex_lock(&graph_lock); 3855 mutex_lock(&graph_lock);
3846 if ((file->f_mode & FMODE_WRITE) && 3856 if ((file->f_mode & FMODE_WRITE) &&
3847 (file->f_flags & O_TRUNC)) { 3857 (file->f_flags & O_TRUNC)) {
3848 ftrace_graph_filter_enabled = 0; 3858 *fgd->count = 0;
3849 ftrace_graph_count = 0; 3859 memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
3850 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3851 } 3860 }
3852 mutex_unlock(&graph_lock); 3861 mutex_unlock(&graph_lock);
3853 3862
3854 if (file->f_mode & FMODE_READ) 3863 if (file->f_mode & FMODE_READ) {
3855 ret = seq_open(file, &ftrace_graph_seq_ops); 3864 ret = seq_open(file, fgd->seq_ops);
3865 if (!ret) {
3866 struct seq_file *m = file->private_data;
3867 m->private = fgd;
3868 }
3869 } else
3870 file->private_data = fgd;
3856 3871
3857 return ret; 3872 return ret;
3858} 3873}
3859 3874
3860static int 3875static int
3876ftrace_graph_open(struct inode *inode, struct file *file)
3877{
3878 struct ftrace_graph_data *fgd;
3879
3880 if (unlikely(ftrace_disabled))
3881 return -ENODEV;
3882
3883 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
3884 if (fgd == NULL)
3885 return -ENOMEM;
3886
3887 fgd->table = ftrace_graph_funcs;
3888 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
3889 fgd->count = &ftrace_graph_count;
3890 fgd->seq_ops = &ftrace_graph_seq_ops;
3891
3892 return __ftrace_graph_open(inode, file, fgd);
3893}
3894
3895static int
3896ftrace_graph_notrace_open(struct inode *inode, struct file *file)
3897{
3898 struct ftrace_graph_data *fgd;
3899
3900 if (unlikely(ftrace_disabled))
3901 return -ENODEV;
3902
3903 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
3904 if (fgd == NULL)
3905 return -ENOMEM;
3906
3907 fgd->table = ftrace_graph_notrace_funcs;
3908 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
3909 fgd->count = &ftrace_graph_notrace_count;
3910 fgd->seq_ops = &ftrace_graph_seq_ops;
3911
3912 return __ftrace_graph_open(inode, file, fgd);
3913}
3914
3915static int
3861ftrace_graph_release(struct inode *inode, struct file *file) 3916ftrace_graph_release(struct inode *inode, struct file *file)
3862{ 3917{
3863 if (file->f_mode & FMODE_READ) 3918 if (file->f_mode & FMODE_READ) {
3919 struct seq_file *m = file->private_data;
3920
3921 kfree(m->private);
3864 seq_release(inode, file); 3922 seq_release(inode, file);
3923 } else {
3924 kfree(file->private_data);
3925 }
3926
3865 return 0; 3927 return 0;
3866} 3928}
3867 3929
3868static int 3930static int
3869ftrace_set_func(unsigned long *array, int *idx, char *buffer) 3931ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
3870{ 3932{
3871 struct dyn_ftrace *rec; 3933 struct dyn_ftrace *rec;
3872 struct ftrace_page *pg; 3934 struct ftrace_page *pg;
@@ -3879,7 +3941,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3879 3941
3880 /* decode regex */ 3942 /* decode regex */
3881 type = filter_parse_regex(buffer, strlen(buffer), &search, &not); 3943 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3882 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) 3944 if (!not && *idx >= size)
3883 return -EBUSY; 3945 return -EBUSY;
3884 3946
3885 search_len = strlen(search); 3947 search_len = strlen(search);
@@ -3907,7 +3969,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3907 fail = 0; 3969 fail = 0;
3908 if (!exists) { 3970 if (!exists) {
3909 array[(*idx)++] = rec->ip; 3971 array[(*idx)++] = rec->ip;
3910 if (*idx >= FTRACE_GRAPH_MAX_FUNCS) 3972 if (*idx >= size)
3911 goto out; 3973 goto out;
3912 } 3974 }
3913 } else { 3975 } else {
@@ -3925,8 +3987,6 @@ out:
3925 if (fail) 3987 if (fail)
3926 return -EINVAL; 3988 return -EINVAL;
3927 3989
3928 ftrace_graph_filter_enabled = !!(*idx);
3929
3930 return 0; 3990 return 0;
3931} 3991}
3932 3992
@@ -3935,36 +3995,33 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
3935 size_t cnt, loff_t *ppos) 3995 size_t cnt, loff_t *ppos)
3936{ 3996{
3937 struct trace_parser parser; 3997 struct trace_parser parser;
3938 ssize_t read, ret; 3998 ssize_t read, ret = 0;
3999 struct ftrace_graph_data *fgd = file->private_data;
3939 4000
3940 if (!cnt) 4001 if (!cnt)
3941 return 0; 4002 return 0;
3942 4003
3943 mutex_lock(&graph_lock); 4004 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
3944 4005 return -ENOMEM;
3945 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3946 ret = -ENOMEM;
3947 goto out_unlock;
3948 }
3949 4006
3950 read = trace_get_user(&parser, ubuf, cnt, ppos); 4007 read = trace_get_user(&parser, ubuf, cnt, ppos);
3951 4008
3952 if (read >= 0 && trace_parser_loaded((&parser))) { 4009 if (read >= 0 && trace_parser_loaded((&parser))) {
3953 parser.buffer[parser.idx] = 0; 4010 parser.buffer[parser.idx] = 0;
3954 4011
4012 mutex_lock(&graph_lock);
4013
3955 /* we allow only one expression at a time */ 4014 /* we allow only one expression at a time */
3956 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, 4015 ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
3957 parser.buffer); 4016 parser.buffer);
3958 if (ret) 4017
3959 goto out_free; 4018 mutex_unlock(&graph_lock);
3960 } 4019 }
3961 4020
3962 ret = read; 4021 if (!ret)
4022 ret = read;
3963 4023
3964out_free:
3965 trace_parser_put(&parser); 4024 trace_parser_put(&parser);
3966out_unlock:
3967 mutex_unlock(&graph_lock);
3968 4025
3969 return ret; 4026 return ret;
3970} 4027}
@@ -3976,6 +4033,14 @@ static const struct file_operations ftrace_graph_fops = {
3976 .llseek = ftrace_filter_lseek, 4033 .llseek = ftrace_filter_lseek,
3977 .release = ftrace_graph_release, 4034 .release = ftrace_graph_release,
3978}; 4035};
4036
4037static const struct file_operations ftrace_graph_notrace_fops = {
4038 .open = ftrace_graph_notrace_open,
4039 .read = seq_read,
4040 .write = ftrace_graph_write,
4041 .llseek = ftrace_filter_lseek,
4042 .release = ftrace_graph_release,
4043};
3979#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4044#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3980 4045
3981static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) 4046static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
@@ -3997,6 +4062,9 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3997 trace_create_file("set_graph_function", 0444, d_tracer, 4062 trace_create_file("set_graph_function", 0444, d_tracer,
3998 NULL, 4063 NULL,
3999 &ftrace_graph_fops); 4064 &ftrace_graph_fops);
4065 trace_create_file("set_graph_notrace", 0444, d_tracer,
4066 NULL,
4067 &ftrace_graph_notrace_fops);
4000#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4068#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4001 4069
4002 return 0; 4070 return 0;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 7974ba20557d..063a92bad578 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -843,9 +843,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
843 if (isspace(ch)) { 843 if (isspace(ch)) {
844 parser->buffer[parser->idx] = 0; 844 parser->buffer[parser->idx] = 0;
845 parser->cont = false; 845 parser->cont = false;
846 } else { 846 } else if (parser->idx < parser->size - 1) {
847 parser->cont = true; 847 parser->cont = true;
848 parser->buffer[parser->idx++] = ch; 848 parser->buffer[parser->idx++] = ch;
849 } else {
850 ret = -EINVAL;
851 goto out;
849 } 852 }
850 853
851 *ppos += read; 854 *ppos += read;
@@ -2760,7 +2763,7 @@ static void show_snapshot_main_help(struct seq_file *m)
2760 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); 2763 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2761 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2764 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2762 seq_printf(m, "# Takes a snapshot of the main buffer.\n"); 2765 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2763 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); 2766 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
2764 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2767 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2765 seq_printf(m, "# is not a '0' or '1')\n"); 2768 seq_printf(m, "# is not a '0' or '1')\n");
2766} 2769}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 10c86fb7a2b4..d1cf5159bec0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -730,15 +730,16 @@ extern void __trace_graph_return(struct trace_array *tr,
730#ifdef CONFIG_DYNAMIC_FTRACE 730#ifdef CONFIG_DYNAMIC_FTRACE
731/* TODO: make this variable */ 731/* TODO: make this variable */
732#define FTRACE_GRAPH_MAX_FUNCS 32 732#define FTRACE_GRAPH_MAX_FUNCS 32
733extern int ftrace_graph_filter_enabled;
734extern int ftrace_graph_count; 733extern int ftrace_graph_count;
735extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; 734extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
735extern int ftrace_graph_notrace_count;
736extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
736 737
737static inline int ftrace_graph_addr(unsigned long addr) 738static inline int ftrace_graph_addr(unsigned long addr)
738{ 739{
739 int i; 740 int i;
740 741
741 if (!ftrace_graph_filter_enabled) 742 if (!ftrace_graph_count)
742 return 1; 743 return 1;
743 744
744 for (i = 0; i < ftrace_graph_count; i++) { 745 for (i = 0; i < ftrace_graph_count; i++) {
@@ -758,11 +759,31 @@ static inline int ftrace_graph_addr(unsigned long addr)
758 759
759 return 0; 760 return 0;
760} 761}
762
763static inline int ftrace_graph_notrace_addr(unsigned long addr)
764{
765 int i;
766
767 if (!ftrace_graph_notrace_count)
768 return 0;
769
770 for (i = 0; i < ftrace_graph_notrace_count; i++) {
771 if (addr == ftrace_graph_notrace_funcs[i])
772 return 1;
773 }
774
775 return 0;
776}
761#else 777#else
762static inline int ftrace_graph_addr(unsigned long addr) 778static inline int ftrace_graph_addr(unsigned long addr)
763{ 779{
764 return 1; 780 return 1;
765} 781}
782
783static inline int ftrace_graph_notrace_addr(unsigned long addr)
784{
785 return 0;
786}
766#endif /* CONFIG_DYNAMIC_FTRACE */ 787#endif /* CONFIG_DYNAMIC_FTRACE */
767#else /* CONFIG_FUNCTION_GRAPH_TRACER */ 788#else /* CONFIG_FUNCTION_GRAPH_TRACER */
768static inline enum print_line_t 789static inline enum print_line_t
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index b5c09242683d..e08c030b8f38 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -114,16 +114,37 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
114 return -EBUSY; 114 return -EBUSY;
115 } 115 }
116 116
117 /*
118 * The curr_ret_stack is an index to ftrace return stack of
119 * current task. Its value should be in [0, FTRACE_RETFUNC_
120 * DEPTH) when the function graph tracer is used. To support
121 * filtering out specific functions, it makes the index
122 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
123 * so when it sees a negative index the ftrace will ignore
124 * the record. And the index gets recovered when returning
125 * from the filtered function by adding the FTRACE_NOTRACE_
126 * DEPTH and then it'll continue to record functions normally.
127 *
128 * The curr_ret_stack is initialized to -1 and get increased
129 * in this function. So it can be less than -1 only if it was
130 * filtered out via ftrace_graph_notrace_addr() which can be
131 * set from set_graph_notrace file in debugfs by user.
132 */
133 if (current->curr_ret_stack < -1)
134 return -EBUSY;
135
117 calltime = trace_clock_local(); 136 calltime = trace_clock_local();
118 137
119 index = ++current->curr_ret_stack; 138 index = ++current->curr_ret_stack;
139 if (ftrace_graph_notrace_addr(func))
140 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
120 barrier(); 141 barrier();
121 current->ret_stack[index].ret = ret; 142 current->ret_stack[index].ret = ret;
122 current->ret_stack[index].func = func; 143 current->ret_stack[index].func = func;
123 current->ret_stack[index].calltime = calltime; 144 current->ret_stack[index].calltime = calltime;
124 current->ret_stack[index].subtime = 0; 145 current->ret_stack[index].subtime = 0;
125 current->ret_stack[index].fp = frame_pointer; 146 current->ret_stack[index].fp = frame_pointer;
126 *depth = index; 147 *depth = current->curr_ret_stack;
127 148
128 return 0; 149 return 0;
129} 150}
@@ -137,7 +158,17 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
137 158
138 index = current->curr_ret_stack; 159 index = current->curr_ret_stack;
139 160
140 if (unlikely(index < 0)) { 161 /*
162 * A negative index here means that it's just returned from a
163 * notrace'd function. Recover index to get an original
164 * return address. See ftrace_push_return_trace().
165 *
166 * TODO: Need to check whether the stack gets corrupted.
167 */
168 if (index < 0)
169 index += FTRACE_NOTRACE_DEPTH;
170
171 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
141 ftrace_graph_stop(); 172 ftrace_graph_stop();
142 WARN_ON(1); 173 WARN_ON(1);
143 /* Might as well panic, otherwise we have no where to go */ 174 /* Might as well panic, otherwise we have no where to go */
@@ -193,6 +224,15 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
193 trace.rettime = trace_clock_local(); 224 trace.rettime = trace_clock_local();
194 barrier(); 225 barrier();
195 current->curr_ret_stack--; 226 current->curr_ret_stack--;
227 /*
228 * The curr_ret_stack can be less than -1 only if it was
229 * filtered out and it's about to return from the function.
230 * Recover the index and continue to trace normal functions.
231 */
232 if (current->curr_ret_stack < -1) {
233 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
234 return ret;
235 }
196 236
197 /* 237 /*
198 * The trace should run after decrementing the ret counter 238 * The trace should run after decrementing the ret counter
@@ -259,10 +299,20 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
259 299
260 /* trace it when it is-nested-in or is a function enabled. */ 300 /* trace it when it is-nested-in or is a function enabled. */
261 if ((!(trace->depth || ftrace_graph_addr(trace->func)) || 301 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
262 ftrace_graph_ignore_irqs()) || 302 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
263 (max_depth && trace->depth >= max_depth)) 303 (max_depth && trace->depth >= max_depth))
264 return 0; 304 return 0;
265 305
306 /*
307 * Do not trace a function if it's filtered by set_graph_notrace.
308 * Make the index of ret stack negative to indicate that it should
309 * ignore further functions. But it needs its own ret stack entry
310 * to recover the original index in order to continue tracing after
311 * returning from the function.
312 */
313 if (ftrace_graph_notrace_addr(trace->func))
314 return 1;
315
266 local_irq_save(flags); 316 local_irq_save(flags);
267 cpu = raw_smp_processor_id(); 317 cpu = raw_smp_processor_id();
268 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 318 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 847f88a6194b..7af67360b330 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -43,46 +43,15 @@ static DEFINE_MUTEX(all_stat_sessions_mutex);
43/* The root directory for all stat files */ 43/* The root directory for all stat files */
44static struct dentry *stat_dir; 44static struct dentry *stat_dir;
45 45
46/* 46static void __reset_stat_session(struct stat_session *session)
47 * Iterate through the rbtree using a post order traversal path
48 * to release the next node.
49 * It won't necessary release one at each iteration
50 * but it will at least advance closer to the next one
51 * to be released.
52 */
53static struct rb_node *release_next(struct tracer_stat *ts,
54 struct rb_node *node)
55{ 47{
56 struct stat_node *snode; 48 struct stat_node *snode, *n;
57 struct rb_node *parent = rb_parent(node);
58
59 if (node->rb_left)
60 return node->rb_left;
61 else if (node->rb_right)
62 return node->rb_right;
63 else {
64 if (!parent)
65 ;
66 else if (parent->rb_left == node)
67 parent->rb_left = NULL;
68 else
69 parent->rb_right = NULL;
70 49
71 snode = container_of(node, struct stat_node, node); 50 rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) {
72 if (ts->stat_release) 51 if (session->ts->stat_release)
73 ts->stat_release(snode->stat); 52 session->ts->stat_release(snode->stat);
74 kfree(snode); 53 kfree(snode);
75
76 return parent;
77 } 54 }
78}
79
80static void __reset_stat_session(struct stat_session *session)
81{
82 struct rb_node *node = session->stat_root.rb_node;
83
84 while (node)
85 node = release_next(session->ts, node);
86 55
87 session->stat_root = RB_ROOT; 56 session->stat_root = RB_ROOT;
88} 57}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 51c4f34d258e..4431610f049a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
486 .unpark = watchdog_enable, 486 .unpark = watchdog_enable,
487}; 487};
488 488
489static int watchdog_enable_all_cpus(void) 489static void restart_watchdog_hrtimer(void *info)
490{
491 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
492 int ret;
493
494 /*
495 * No need to cancel and restart hrtimer if it is currently executing
496 * because it will reprogram itself with the new period now.
497 * We should never see it unqueued here because we are running per-cpu
498 * with interrupts disabled.
499 */
500 ret = hrtimer_try_to_cancel(hrtimer);
501 if (ret == 1)
502 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
503 HRTIMER_MODE_REL_PINNED);
504}
505
506static void update_timers(int cpu)
507{
508 struct call_single_data data = {.func = restart_watchdog_hrtimer};
509 /*
510 * Make sure that perf event counter will adopt to a new
511 * sampling period. Updating the sampling period directly would
512 * be much nicer but we do not have an API for that now so
513 * let's use a big hammer.
514 * Hrtimer will adopt the new period on the next tick but this
515 * might be late already so we have to restart the timer as well.
516 */
517 watchdog_nmi_disable(cpu);
518 __smp_call_function_single(cpu, &data, 1);
519 watchdog_nmi_enable(cpu);
520}
521
522static void update_timers_all_cpus(void)
523{
524 int cpu;
525
526 get_online_cpus();
527 preempt_disable();
528 for_each_online_cpu(cpu)
529 update_timers(cpu);
530 preempt_enable();
531 put_online_cpus();
532}
533
534static int watchdog_enable_all_cpus(bool sample_period_changed)
490{ 535{
491 int err = 0; 536 int err = 0;
492 537
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
496 pr_err("Failed to create watchdog threads, disabled\n"); 541 pr_err("Failed to create watchdog threads, disabled\n");
497 else 542 else
498 watchdog_running = 1; 543 watchdog_running = 1;
544 } else if (sample_period_changed) {
545 update_timers_all_cpus();
499 } 546 }
500 547
501 return err; 548 return err;
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
520 void __user *buffer, size_t *lenp, loff_t *ppos) 567 void __user *buffer, size_t *lenp, loff_t *ppos)
521{ 568{
522 int err, old_thresh, old_enabled; 569 int err, old_thresh, old_enabled;
570 static DEFINE_MUTEX(watchdog_proc_mutex);
523 571
572 mutex_lock(&watchdog_proc_mutex);
524 old_thresh = ACCESS_ONCE(watchdog_thresh); 573 old_thresh = ACCESS_ONCE(watchdog_thresh);
525 old_enabled = ACCESS_ONCE(watchdog_user_enabled); 574 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
526 575
527 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 576 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
528 if (err || !write) 577 if (err || !write)
529 return err; 578 goto out;
530 579
531 set_sample_period(); 580 set_sample_period();
532 /* 581 /*
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
535 * watchdog_*_all_cpus() function takes care of this. 584 * watchdog_*_all_cpus() function takes care of this.
536 */ 585 */
537 if (watchdog_user_enabled && watchdog_thresh) 586 if (watchdog_user_enabled && watchdog_thresh)
538 err = watchdog_enable_all_cpus(); 587 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
539 else 588 else
540 watchdog_disable_all_cpus(); 589 watchdog_disable_all_cpus();
541 590
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
544 watchdog_thresh = old_thresh; 593 watchdog_thresh = old_thresh;
545 watchdog_user_enabled = old_enabled; 594 watchdog_user_enabled = old_enabled;
546 } 595 }
547 596out:
597 mutex_unlock(&watchdog_proc_mutex);
548 return err; 598 return err;
549} 599}
550#endif /* CONFIG_SYSCTL */ 600#endif /* CONFIG_SYSCTL */
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
554 set_sample_period(); 604 set_sample_period();
555 605
556 if (watchdog_user_enabled) 606 if (watchdog_user_enabled)
557 watchdog_enable_all_cpus(); 607 watchdog_enable_all_cpus(false);
558} 608}