diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-09-29 21:29:23 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-09-29 21:29:23 -0400 |
commit | 88502b9c0a5dcc884c0dbfb6ddf964ff5da5d8d3 (patch) | |
tree | f79f728c308100bc3e57d0d2f5d1e00d90406a0d /kernel | |
parent | e18945b159a1cdbc031f1d3b0b7e515a33bdcbf7 (diff) | |
parent | 15c03dd4859ab16f9212238f29dd315654aa94f6 (diff) |
Merge 3.12-rc3 into driver-core-next
We want the driver core and sysfs fixes in here to make merges and
development easier.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit.c | 5 | ||||
-rw-r--r-- | kernel/context_tracking.c | 12 | ||||
-rw-r--r-- | kernel/events/core.c | 21 | ||||
-rw-r--r-- | kernel/params.c | 6 | ||||
-rw-r--r-- | kernel/reboot.c | 9 | ||||
-rw-r--r-- | kernel/sched/fair.c | 9 | ||||
-rw-r--r-- | kernel/watchdog.c | 60 |
7 files changed, 107 insertions, 15 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index 91e53d04b6a9..7b0e23a740ce 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, | |||
1117 | 1117 | ||
1118 | sleep_time = timeout_start + audit_backlog_wait_time - | 1118 | sleep_time = timeout_start + audit_backlog_wait_time - |
1119 | jiffies; | 1119 | jiffies; |
1120 | if ((long)sleep_time > 0) | 1120 | if ((long)sleep_time > 0) { |
1121 | wait_for_auditd(sleep_time); | 1121 | wait_for_auditd(sleep_time); |
1122 | continue; | 1122 | continue; |
1123 | } | ||
1123 | } | 1124 | } |
1124 | if (audit_rate_check() && printk_ratelimit()) | 1125 | if (audit_rate_check() && printk_ratelimit()) |
1125 | printk(KERN_WARNING | 1126 | printk(KERN_WARNING |
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 247091bf0587..859c8dfd78a1 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c | |||
@@ -51,6 +51,15 @@ void context_tracking_user_enter(void) | |||
51 | unsigned long flags; | 51 | unsigned long flags; |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Repeat the user_enter() check here because some archs may be calling | ||
55 | * this from asm and if no CPU needs context tracking, they shouldn't | ||
56 | * go further. Repeat the check here until they support the static key | ||
57 | * check. | ||
58 | */ | ||
59 | if (!static_key_false(&context_tracking_enabled)) | ||
60 | return; | ||
61 | |||
62 | /* | ||
54 | * Some contexts may involve an exception occuring in an irq, | 63 | * Some contexts may involve an exception occuring in an irq, |
55 | * leading to that nesting: | 64 | * leading to that nesting: |
56 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | 65 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() |
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void) | |||
151 | { | 160 | { |
152 | unsigned long flags; | 161 | unsigned long flags; |
153 | 162 | ||
163 | if (!static_key_false(&context_tracking_enabled)) | ||
164 | return; | ||
165 | |||
154 | if (in_interrupt()) | 166 | if (in_interrupt()) |
155 | return; | 167 | return; |
156 | 168 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 1a825a486a25..4ccb29bb761e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event, | |||
3660 | *running = ctx_time - event->tstamp_running; | 3660 | *running = ctx_time - event->tstamp_running; |
3661 | } | 3661 | } |
3662 | 3662 | ||
3663 | static void perf_event_init_userpage(struct perf_event *event) | ||
3664 | { | ||
3665 | struct perf_event_mmap_page *userpg; | ||
3666 | struct ring_buffer *rb; | ||
3667 | |||
3668 | rcu_read_lock(); | ||
3669 | rb = rcu_dereference(event->rb); | ||
3670 | if (!rb) | ||
3671 | goto unlock; | ||
3672 | |||
3673 | userpg = rb->user_page; | ||
3674 | |||
3675 | /* Allow new userspace to detect that bit 0 is deprecated */ | ||
3676 | userpg->cap_bit0_is_deprecated = 1; | ||
3677 | userpg->size = offsetof(struct perf_event_mmap_page, __reserved); | ||
3678 | |||
3679 | unlock: | ||
3680 | rcu_read_unlock(); | ||
3681 | } | ||
3682 | |||
3663 | void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) | 3683 | void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) |
3664 | { | 3684 | { |
3665 | } | 3685 | } |
@@ -4044,6 +4064,7 @@ again: | |||
4044 | ring_buffer_attach(event, rb); | 4064 | ring_buffer_attach(event, rb); |
4045 | rcu_assign_pointer(event->rb, rb); | 4065 | rcu_assign_pointer(event->rb, rb); |
4046 | 4066 | ||
4067 | perf_event_init_userpage(event); | ||
4047 | perf_event_update_userpage(event); | 4068 | perf_event_update_userpage(event); |
4048 | 4069 | ||
4049 | unlock: | 4070 | unlock: |
diff --git a/kernel/params.c b/kernel/params.c index 81c4e78c8f4c..c00d5b502aa4 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -254,11 +254,11 @@ int parse_args(const char *doing, | |||
254 | 254 | ||
255 | 255 | ||
256 | STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul); | 256 | STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul); |
257 | STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul); | 257 | STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol); |
258 | STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul); | 258 | STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul); |
259 | STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul); | 259 | STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol); |
260 | STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul); | 260 | STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul); |
261 | STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul); | 261 | STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol); |
262 | STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul); | 262 | STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul); |
263 | 263 | ||
264 | int param_set_charp(const char *val, const struct kernel_param *kp) | 264 | int param_set_charp(const char *val, const struct kernel_param *kp) |
diff --git a/kernel/reboot.c b/kernel/reboot.c index 269ed9384cc4..f813b3474646 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c | |||
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid); | |||
32 | #endif | 32 | #endif |
33 | enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; | 33 | enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; |
34 | 34 | ||
35 | int reboot_default; | 35 | /* |
36 | * This variable is used privately to keep track of whether or not | ||
37 | * reboot_type is still set to its default value (i.e., reboot= hasn't | ||
38 | * been set on the command line). This is needed so that we can | ||
39 | * suppress DMI scanning for reboot quirks. Without it, it's | ||
40 | * impossible to override a faulty reboot quirk without recompiling. | ||
41 | */ | ||
42 | int reboot_default = 1; | ||
36 | int reboot_cpu; | 43 | int reboot_cpu; |
37 | enum reboot_type reboot_type = BOOT_ACPI; | 44 | enum reboot_type reboot_type = BOOT_ACPI; |
38 | int reboot_force; | 45 | int reboot_force; |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 11cd13667359..7c70201fbc61 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) | |||
4242 | } | 4242 | } |
4243 | 4243 | ||
4244 | if (!se) { | 4244 | if (!se) { |
4245 | cfs_rq->h_load = rq->avg.load_avg_contrib; | 4245 | cfs_rq->h_load = cfs_rq->runnable_load_avg; |
4246 | cfs_rq->last_h_load_update = now; | 4246 | cfs_rq->last_h_load_update = now; |
4247 | } | 4247 | } |
4248 | 4248 | ||
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) | |||
4823 | (busiest->load_per_task * SCHED_POWER_SCALE) / | 4823 | (busiest->load_per_task * SCHED_POWER_SCALE) / |
4824 | busiest->group_power; | 4824 | busiest->group_power; |
4825 | 4825 | ||
4826 | if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >= | 4826 | if (busiest->avg_load + scaled_busy_load_per_task >= |
4827 | (scaled_busy_load_per_task * imbn)) { | 4827 | local->avg_load + (scaled_busy_load_per_task * imbn)) { |
4828 | env->imbalance = busiest->load_per_task; | 4828 | env->imbalance = busiest->load_per_task; |
4829 | return; | 4829 | return; |
4830 | } | 4830 | } |
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
4896 | * max load less than avg load(as we skip the groups at or below | 4896 | * max load less than avg load(as we skip the groups at or below |
4897 | * its cpu_power, while calculating max_load..) | 4897 | * its cpu_power, while calculating max_load..) |
4898 | */ | 4898 | */ |
4899 | if (busiest->avg_load < sds->avg_load) { | 4899 | if (busiest->avg_load <= sds->avg_load || |
4900 | local->avg_load >= sds->avg_load) { | ||
4900 | env->imbalance = 0; | 4901 | env->imbalance = 0; |
4901 | return fix_small_imbalance(env, sds); | 4902 | return fix_small_imbalance(env, sds); |
4902 | } | 4903 | } |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 51c4f34d258e..4431610f049a 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = { | |||
486 | .unpark = watchdog_enable, | 486 | .unpark = watchdog_enable, |
487 | }; | 487 | }; |
488 | 488 | ||
489 | static int watchdog_enable_all_cpus(void) | 489 | static void restart_watchdog_hrtimer(void *info) |
490 | { | ||
491 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); | ||
492 | int ret; | ||
493 | |||
494 | /* | ||
495 | * No need to cancel and restart hrtimer if it is currently executing | ||
496 | * because it will reprogram itself with the new period now. | ||
497 | * We should never see it unqueued here because we are running per-cpu | ||
498 | * with interrupts disabled. | ||
499 | */ | ||
500 | ret = hrtimer_try_to_cancel(hrtimer); | ||
501 | if (ret == 1) | ||
502 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), | ||
503 | HRTIMER_MODE_REL_PINNED); | ||
504 | } | ||
505 | |||
506 | static void update_timers(int cpu) | ||
507 | { | ||
508 | struct call_single_data data = {.func = restart_watchdog_hrtimer}; | ||
509 | /* | ||
510 | * Make sure that perf event counter will adopt to a new | ||
511 | * sampling period. Updating the sampling period directly would | ||
512 | * be much nicer but we do not have an API for that now so | ||
513 | * let's use a big hammer. | ||
514 | * Hrtimer will adopt the new period on the next tick but this | ||
515 | * might be late already so we have to restart the timer as well. | ||
516 | */ | ||
517 | watchdog_nmi_disable(cpu); | ||
518 | __smp_call_function_single(cpu, &data, 1); | ||
519 | watchdog_nmi_enable(cpu); | ||
520 | } | ||
521 | |||
522 | static void update_timers_all_cpus(void) | ||
523 | { | ||
524 | int cpu; | ||
525 | |||
526 | get_online_cpus(); | ||
527 | preempt_disable(); | ||
528 | for_each_online_cpu(cpu) | ||
529 | update_timers(cpu); | ||
530 | preempt_enable(); | ||
531 | put_online_cpus(); | ||
532 | } | ||
533 | |||
534 | static int watchdog_enable_all_cpus(bool sample_period_changed) | ||
490 | { | 535 | { |
491 | int err = 0; | 536 | int err = 0; |
492 | 537 | ||
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void) | |||
496 | pr_err("Failed to create watchdog threads, disabled\n"); | 541 | pr_err("Failed to create watchdog threads, disabled\n"); |
497 | else | 542 | else |
498 | watchdog_running = 1; | 543 | watchdog_running = 1; |
544 | } else if (sample_period_changed) { | ||
545 | update_timers_all_cpus(); | ||
499 | } | 546 | } |
500 | 547 | ||
501 | return err; | 548 | return err; |
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write, | |||
520 | void __user *buffer, size_t *lenp, loff_t *ppos) | 567 | void __user *buffer, size_t *lenp, loff_t *ppos) |
521 | { | 568 | { |
522 | int err, old_thresh, old_enabled; | 569 | int err, old_thresh, old_enabled; |
570 | static DEFINE_MUTEX(watchdog_proc_mutex); | ||
523 | 571 | ||
572 | mutex_lock(&watchdog_proc_mutex); | ||
524 | old_thresh = ACCESS_ONCE(watchdog_thresh); | 573 | old_thresh = ACCESS_ONCE(watchdog_thresh); |
525 | old_enabled = ACCESS_ONCE(watchdog_user_enabled); | 574 | old_enabled = ACCESS_ONCE(watchdog_user_enabled); |
526 | 575 | ||
527 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 576 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
528 | if (err || !write) | 577 | if (err || !write) |
529 | return err; | 578 | goto out; |
530 | 579 | ||
531 | set_sample_period(); | 580 | set_sample_period(); |
532 | /* | 581 | /* |
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write, | |||
535 | * watchdog_*_all_cpus() function takes care of this. | 584 | * watchdog_*_all_cpus() function takes care of this. |
536 | */ | 585 | */ |
537 | if (watchdog_user_enabled && watchdog_thresh) | 586 | if (watchdog_user_enabled && watchdog_thresh) |
538 | err = watchdog_enable_all_cpus(); | 587 | err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh); |
539 | else | 588 | else |
540 | watchdog_disable_all_cpus(); | 589 | watchdog_disable_all_cpus(); |
541 | 590 | ||
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write, | |||
544 | watchdog_thresh = old_thresh; | 593 | watchdog_thresh = old_thresh; |
545 | watchdog_user_enabled = old_enabled; | 594 | watchdog_user_enabled = old_enabled; |
546 | } | 595 | } |
547 | 596 | out: | |
597 | mutex_unlock(&watchdog_proc_mutex); | ||
548 | return err; | 598 | return err; |
549 | } | 599 | } |
550 | #endif /* CONFIG_SYSCTL */ | 600 | #endif /* CONFIG_SYSCTL */ |
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void) | |||
554 | set_sample_period(); | 604 | set_sample_period(); |
555 | 605 | ||
556 | if (watchdog_user_enabled) | 606 | if (watchdog_user_enabled) |
557 | watchdog_enable_all_cpus(); | 607 | watchdog_enable_all_cpus(false); |
558 | } | 608 | } |