aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c5
-rw-r--r--kernel/context_tracking.c12
-rw-r--r--kernel/events/core.c27
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/params.c6
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/power/snapshot.c5
-rw-r--r--kernel/power/user.c8
-rw-r--r--kernel/reboot.c9
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/softirq.c15
-rw-r--r--kernel/watchdog.c60
12 files changed, 143 insertions, 22 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 91e53d04b6a9..7b0e23a740ce 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1117 1117
1118 sleep_time = timeout_start + audit_backlog_wait_time - 1118 sleep_time = timeout_start + audit_backlog_wait_time -
1119 jiffies; 1119 jiffies;
1120 if ((long)sleep_time > 0) 1120 if ((long)sleep_time > 0) {
1121 wait_for_auditd(sleep_time); 1121 wait_for_auditd(sleep_time);
1122 continue; 1122 continue;
1123 }
1123 } 1124 }
1124 if (audit_rate_check() && printk_ratelimit()) 1125 if (audit_rate_check() && printk_ratelimit())
1125 printk(KERN_WARNING 1126 printk(KERN_WARNING
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 247091bf0587..859c8dfd78a1 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -51,6 +51,15 @@ void context_tracking_user_enter(void)
51 unsigned long flags; 51 unsigned long flags;
52 52
53 /* 53 /*
54 * Repeat the user_enter() check here because some archs may be calling
55 * this from asm and if no CPU needs context tracking, they shouldn't
56 * go further. Repeat the check here until they support the static key
57 * check.
58 */
59 if (!static_key_false(&context_tracking_enabled))
60 return;
61
62 /*
54 * Some contexts may involve an exception occuring in an irq, 63 * Some contexts may involve an exception occuring in an irq,
55 * leading to that nesting: 64 * leading to that nesting:
56 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() 65 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void)
151{ 160{
152 unsigned long flags; 161 unsigned long flags;
153 162
163 if (!static_key_false(&context_tracking_enabled))
164 return;
165
154 if (in_interrupt()) 166 if (in_interrupt())
155 return; 167 return;
156 168
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dd236b66ca3a..d49a9d29334c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
3660 *running = ctx_time - event->tstamp_running; 3660 *running = ctx_time - event->tstamp_running;
3661} 3661}
3662 3662
3663static void perf_event_init_userpage(struct perf_event *event)
3664{
3665 struct perf_event_mmap_page *userpg;
3666 struct ring_buffer *rb;
3667
3668 rcu_read_lock();
3669 rb = rcu_dereference(event->rb);
3670 if (!rb)
3671 goto unlock;
3672
3673 userpg = rb->user_page;
3674
3675 /* Allow new userspace to detect that bit 0 is deprecated */
3676 userpg->cap_bit0_is_deprecated = 1;
3677 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
3678
3679unlock:
3680 rcu_read_unlock();
3681}
3682
3663void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 3683void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3664{ 3684{
3665} 3685}
@@ -4044,6 +4064,7 @@ again:
4044 ring_buffer_attach(event, rb); 4064 ring_buffer_attach(event, rb);
4045 rcu_assign_pointer(event->rb, rb); 4065 rcu_assign_pointer(event->rb, rb);
4046 4066
4067 perf_event_init_userpage(event);
4047 perf_event_update_userpage(event); 4068 perf_event_update_userpage(event);
4048 4069
4049unlock: 4070unlock:
@@ -7213,15 +7234,15 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7213 perf_remove_from_context(event); 7234 perf_remove_from_context(event);
7214 unaccount_event_cpu(event, src_cpu); 7235 unaccount_event_cpu(event, src_cpu);
7215 put_ctx(src_ctx); 7236 put_ctx(src_ctx);
7216 list_add(&event->event_entry, &events); 7237 list_add(&event->migrate_entry, &events);
7217 } 7238 }
7218 mutex_unlock(&src_ctx->mutex); 7239 mutex_unlock(&src_ctx->mutex);
7219 7240
7220 synchronize_rcu(); 7241 synchronize_rcu();
7221 7242
7222 mutex_lock(&dst_ctx->mutex); 7243 mutex_lock(&dst_ctx->mutex);
7223 list_for_each_entry_safe(event, tmp, &events, event_entry) { 7244 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
7224 list_del(&event->event_entry); 7245 list_del(&event->migrate_entry);
7225 if (event->state >= PERF_EVENT_STATE_OFF) 7246 if (event->state >= PERF_EVENT_STATE_OFF)
7226 event->state = PERF_EVENT_STATE_INACTIVE; 7247 event->state = PERF_EVENT_STATE_INACTIVE;
7227 account_event_cpu(event, dst_cpu); 7248 account_event_cpu(event, dst_cpu);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index fb326365b694..b086006c59e7 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
571 DECLARE_COMPLETION_ONSTACK(done); 571 DECLARE_COMPLETION_ONSTACK(done);
572 int retval = 0; 572 int retval = 0;
573 573
574 if (!sub_info->path) {
575 call_usermodehelper_freeinfo(sub_info);
576 return -EINVAL;
577 }
574 helper_lock(); 578 helper_lock();
575 if (!khelper_wq || usermodehelper_disabled) { 579 if (!khelper_wq || usermodehelper_disabled) {
576 retval = -EBUSY; 580 retval = -EBUSY;
diff --git a/kernel/params.c b/kernel/params.c
index 81c4e78c8f4c..c00d5b502aa4 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -254,11 +254,11 @@ int parse_args(const char *doing,
254 254
255 255
256STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul); 256STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
257STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul); 257STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
258STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul); 258STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
259STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul); 259STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
260STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul); 260STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
261STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul); 261STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
262STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul); 262STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
263 263
264int param_set_charp(const char *val, const struct kernel_param *kp) 264int param_set_charp(const char *val, const struct kernel_param *kp)
diff --git a/kernel/pid.c b/kernel/pid.c
index ebe5e80b10f8..9b9a26698144 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -273,6 +273,11 @@ void free_pid(struct pid *pid)
273 */ 273 */
274 wake_up_process(ns->child_reaper); 274 wake_up_process(ns->child_reaper);
275 break; 275 break;
276 case PIDNS_HASH_ADDING:
277 /* Handle a fork failure of the first process */
278 WARN_ON(ns->child_reaper);
279 ns->nr_hashed = 0;
280 /* fall through */
276 case 0: 281 case 0:
277 schedule_work(&ns->proc_work); 282 schedule_work(&ns->proc_work);
278 break; 283 break;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 358a146fd4da..98c3b34a4cff 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -743,7 +743,10 @@ int create_basic_memory_bitmaps(void)
743 struct memory_bitmap *bm1, *bm2; 743 struct memory_bitmap *bm1, *bm2;
744 int error = 0; 744 int error = 0;
745 745
746 BUG_ON(forbidden_pages_map || free_pages_map); 746 if (forbidden_pages_map && free_pages_map)
747 return 0;
748 else
749 BUG_ON(forbidden_pages_map || free_pages_map);
747 750
748 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 751 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
749 if (!bm1) 752 if (!bm1)
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 72e8f4fd616d..957f06164ad1 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -39,6 +39,7 @@ static struct snapshot_data {
39 char frozen; 39 char frozen;
40 char ready; 40 char ready;
41 char platform_support; 41 char platform_support;
42 bool free_bitmaps;
42} snapshot_state; 43} snapshot_state;
43 44
44atomic_t snapshot_device_available = ATOMIC_INIT(1); 45atomic_t snapshot_device_available = ATOMIC_INIT(1);
@@ -82,6 +83,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
82 data->swap = -1; 83 data->swap = -1;
83 data->mode = O_WRONLY; 84 data->mode = O_WRONLY;
84 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 85 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
86 if (!error) {
87 error = create_basic_memory_bitmaps();
88 data->free_bitmaps = !error;
89 }
85 if (error) 90 if (error)
86 pm_notifier_call_chain(PM_POST_RESTORE); 91 pm_notifier_call_chain(PM_POST_RESTORE);
87 } 92 }
@@ -111,6 +116,8 @@ static int snapshot_release(struct inode *inode, struct file *filp)
111 pm_restore_gfp_mask(); 116 pm_restore_gfp_mask();
112 free_basic_memory_bitmaps(); 117 free_basic_memory_bitmaps();
113 thaw_processes(); 118 thaw_processes();
119 } else if (data->free_bitmaps) {
120 free_basic_memory_bitmaps();
114 } 121 }
115 pm_notifier_call_chain(data->mode == O_RDONLY ? 122 pm_notifier_call_chain(data->mode == O_RDONLY ?
116 PM_POST_HIBERNATION : PM_POST_RESTORE); 123 PM_POST_HIBERNATION : PM_POST_RESTORE);
@@ -231,6 +238,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
231 break; 238 break;
232 pm_restore_gfp_mask(); 239 pm_restore_gfp_mask();
233 free_basic_memory_bitmaps(); 240 free_basic_memory_bitmaps();
241 data->free_bitmaps = false;
234 thaw_processes(); 242 thaw_processes();
235 data->frozen = 0; 243 data->frozen = 0;
236 break; 244 break;
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 269ed9384cc4..f813b3474646 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
32#endif 32#endif
33enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; 33enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
34 34
35int reboot_default; 35/*
36 * This variable is used privately to keep track of whether or not
37 * reboot_type is still set to its default value (i.e., reboot= hasn't
38 * been set on the command line). This is needed so that we can
39 * suppress DMI scanning for reboot quirks. Without it, it's
40 * impossible to override a faulty reboot quirk without recompiling.
41 */
42int reboot_default = 1;
36int reboot_cpu; 43int reboot_cpu;
37enum reboot_type reboot_type = BOOT_ACPI; 44enum reboot_type reboot_type = BOOT_ACPI;
38int reboot_force; 45int reboot_force;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 11cd13667359..7c70201fbc61 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
4242 } 4242 }
4243 4243
4244 if (!se) { 4244 if (!se) {
4245 cfs_rq->h_load = rq->avg.load_avg_contrib; 4245 cfs_rq->h_load = cfs_rq->runnable_load_avg;
4246 cfs_rq->last_h_load_update = now; 4246 cfs_rq->last_h_load_update = now;
4247 } 4247 }
4248 4248
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4823 (busiest->load_per_task * SCHED_POWER_SCALE) / 4823 (busiest->load_per_task * SCHED_POWER_SCALE) /
4824 busiest->group_power; 4824 busiest->group_power;
4825 4825
4826 if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >= 4826 if (busiest->avg_load + scaled_busy_load_per_task >=
4827 (scaled_busy_load_per_task * imbn)) { 4827 local->avg_load + (scaled_busy_load_per_task * imbn)) {
4828 env->imbalance = busiest->load_per_task; 4828 env->imbalance = busiest->load_per_task;
4829 return; 4829 return;
4830 } 4830 }
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4896 * max load less than avg load(as we skip the groups at or below 4896 * max load less than avg load(as we skip the groups at or below
4897 * its cpu_power, while calculating max_load..) 4897 * its cpu_power, while calculating max_load..)
4898 */ 4898 */
4899 if (busiest->avg_load < sds->avg_load) { 4899 if (busiest->avg_load <= sds->avg_load ||
4900 local->avg_load >= sds->avg_load) {
4900 env->imbalance = 0; 4901 env->imbalance = 0;
4901 return fix_small_imbalance(env, sds); 4902 return fix_small_imbalance(env, sds);
4902 } 4903 }
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 53cc09ceb0b8..d7d498d8cc4f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -328,10 +328,19 @@ void irq_enter(void)
328 328
329static inline void invoke_softirq(void) 329static inline void invoke_softirq(void)
330{ 330{
331 if (!force_irqthreads) 331 if (!force_irqthreads) {
332 __do_softirq(); 332 /*
333 else 333 * We can safely execute softirq on the current stack if
334 * it is the irq stack, because it should be near empty
335 * at this stage. But we have no way to know if the arch
336 * calls irq_exit() on the irq stack. So call softirq
337 * in its own stack to prevent from any overrun on top
338 * of a potentially deep task stack.
339 */
340 do_softirq();
341 } else {
334 wakeup_softirqd(); 342 wakeup_softirqd();
343 }
335} 344}
336 345
337static inline void tick_irq_exit(void) 346static inline void tick_irq_exit(void)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 51c4f34d258e..4431610f049a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
486 .unpark = watchdog_enable, 486 .unpark = watchdog_enable,
487}; 487};
488 488
489static int watchdog_enable_all_cpus(void) 489static void restart_watchdog_hrtimer(void *info)
490{
491 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
492 int ret;
493
494 /*
495 * No need to cancel and restart hrtimer if it is currently executing
496 * because it will reprogram itself with the new period now.
497 * We should never see it unqueued here because we are running per-cpu
498 * with interrupts disabled.
499 */
500 ret = hrtimer_try_to_cancel(hrtimer);
501 if (ret == 1)
502 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
503 HRTIMER_MODE_REL_PINNED);
504}
505
506static void update_timers(int cpu)
507{
508 struct call_single_data data = {.func = restart_watchdog_hrtimer};
509 /*
510 * Make sure that perf event counter will adopt to a new
511 * sampling period. Updating the sampling period directly would
512 * be much nicer but we do not have an API for that now so
513 * let's use a big hammer.
514 * Hrtimer will adopt the new period on the next tick but this
515 * might be late already so we have to restart the timer as well.
516 */
517 watchdog_nmi_disable(cpu);
518 __smp_call_function_single(cpu, &data, 1);
519 watchdog_nmi_enable(cpu);
520}
521
522static void update_timers_all_cpus(void)
523{
524 int cpu;
525
526 get_online_cpus();
527 preempt_disable();
528 for_each_online_cpu(cpu)
529 update_timers(cpu);
530 preempt_enable();
531 put_online_cpus();
532}
533
534static int watchdog_enable_all_cpus(bool sample_period_changed)
490{ 535{
491 int err = 0; 536 int err = 0;
492 537
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
496 pr_err("Failed to create watchdog threads, disabled\n"); 541 pr_err("Failed to create watchdog threads, disabled\n");
497 else 542 else
498 watchdog_running = 1; 543 watchdog_running = 1;
544 } else if (sample_period_changed) {
545 update_timers_all_cpus();
499 } 546 }
500 547
501 return err; 548 return err;
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
520 void __user *buffer, size_t *lenp, loff_t *ppos) 567 void __user *buffer, size_t *lenp, loff_t *ppos)
521{ 568{
522 int err, old_thresh, old_enabled; 569 int err, old_thresh, old_enabled;
570 static DEFINE_MUTEX(watchdog_proc_mutex);
523 571
572 mutex_lock(&watchdog_proc_mutex);
524 old_thresh = ACCESS_ONCE(watchdog_thresh); 573 old_thresh = ACCESS_ONCE(watchdog_thresh);
525 old_enabled = ACCESS_ONCE(watchdog_user_enabled); 574 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
526 575
527 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 576 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
528 if (err || !write) 577 if (err || !write)
529 return err; 578 goto out;
530 579
531 set_sample_period(); 580 set_sample_period();
532 /* 581 /*
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
535 * watchdog_*_all_cpus() function takes care of this. 584 * watchdog_*_all_cpus() function takes care of this.
536 */ 585 */
537 if (watchdog_user_enabled && watchdog_thresh) 586 if (watchdog_user_enabled && watchdog_thresh)
538 err = watchdog_enable_all_cpus(); 587 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
539 else 588 else
540 watchdog_disable_all_cpus(); 589 watchdog_disable_all_cpus();
541 590
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
544 watchdog_thresh = old_thresh; 593 watchdog_thresh = old_thresh;
545 watchdog_user_enabled = old_enabled; 594 watchdog_user_enabled = old_enabled;
546 } 595 }
547 596out:
597 mutex_unlock(&watchdog_proc_mutex);
548 return err; 598 return err;
549} 599}
550#endif /* CONFIG_SYSCTL */ 600#endif /* CONFIG_SYSCTL */
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
554 set_sample_period(); 604 set_sample_period();
555 605
556 if (watchdog_user_enabled) 606 if (watchdog_user_enabled)
557 watchdog_enable_all_cpus(); 607 watchdog_enable_all_cpus(false);
558} 608}