diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-04 11:52:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-04 11:52:09 -0500 |
commit | 1ab231b274ba51a54acebec23c6aded0f3cdf54e (patch) | |
tree | 2ce15cb4ba9adf3e041eb9d1b26f63292c953094 /kernel | |
parent | dea4f48a0a301b23c65af8e4fe8ccf360c272fbf (diff) | |
parent | 0e576acbc1d9600cf2d9b4a141a2554639959d50 (diff) |
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer fixes from Thomas Gleixner:
- timekeeping: Cure a subtle drift issue on GENERIC_TIME_VSYSCALL_OLD
- nohz: Make CONFIG_NO_HZ=n and nohz=off command line option behave the
same way. Fixes a long standing load accounting wreckage.
- clocksource/ARM: Kconfig update to avoid ARM=n wreckage
- clocksource/ARM: Fixlets for the AT91 and SH clocksource/clockevents
- Trivial documentation update and kzalloc conversion from akpms pile
* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
nohz: Fix another inconsistency between CONFIG_NO_HZ=n and nohz=off
time: Fix 1ns/tick drift w/ GENERIC_TIME_VSYSCALL_OLD
clocksource: arm_arch_timer: Hide eventstream Kconfig on non-ARM
clocksource: sh_tmu: Add clk_prepare/unprepare support
clocksource: sh_tmu: Release clock when sh_tmu_register() fails
clocksource: sh_mtu2: Add clk_prepare/unprepare support
clocksource: sh_mtu2: Release clock when sh_mtu2_register() fails
ARM: at91: rm9200: switch back to clockevents_config_and_register
tick: Document tick_do_timer_cpu
timer: Convert kmalloc_node(...GFP_ZERO...) to kzalloc_node(...)
NOHZ: Check for nohz active instead of nohz enabled
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcu/tree_plugin.h | 4 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 15 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 25 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 2 | ||||
-rw-r--r-- | kernel/timer.c | 5 |
5 files changed, 32 insertions, 19 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 6abb03dff5c0..08a765232432 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -1632,7 +1632,7 @@ module_param(rcu_idle_gp_delay, int, 0644); | |||
1632 | static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; | 1632 | static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; |
1633 | module_param(rcu_idle_lazy_gp_delay, int, 0644); | 1633 | module_param(rcu_idle_lazy_gp_delay, int, 0644); |
1634 | 1634 | ||
1635 | extern int tick_nohz_enabled; | 1635 | extern int tick_nohz_active; |
1636 | 1636 | ||
1637 | /* | 1637 | /* |
1638 | * Try to advance callbacks for all flavors of RCU on the current CPU, but | 1638 | * Try to advance callbacks for all flavors of RCU on the current CPU, but |
@@ -1729,7 +1729,7 @@ static void rcu_prepare_for_idle(int cpu) | |||
1729 | int tne; | 1729 | int tne; |
1730 | 1730 | ||
1731 | /* Handle nohz enablement switches conservatively. */ | 1731 | /* Handle nohz enablement switches conservatively. */ |
1732 | tne = ACCESS_ONCE(tick_nohz_enabled); | 1732 | tne = ACCESS_ONCE(tick_nohz_active); |
1733 | if (tne != rdtp->tick_nohz_enabled_snap) { | 1733 | if (tne != rdtp->tick_nohz_enabled_snap) { |
1734 | if (rcu_cpu_has_callbacks(cpu, NULL)) | 1734 | if (rcu_cpu_has_callbacks(cpu, NULL)) |
1735 | invoke_rcu_core(); /* force nohz to see update. */ | 1735 | invoke_rcu_core(); /* force nohz to see update. */ |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 64522ecdfe0e..162b03ab0ad2 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -33,6 +33,21 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
33 | */ | 33 | */ |
34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
36 | |||
37 | /* | ||
38 | * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR | ||
39 | * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This | ||
40 | * variable has two functions: | ||
41 | * | ||
42 | * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the | ||
43 | * timekeeping lock all at once. Only the CPU which is assigned to do the | ||
44 | * update is handling it. | ||
45 | * | ||
46 | * 2) Hand off the duty in the NOHZ idle case by setting the value to | ||
47 | * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks | ||
48 | * at it will take over and keep the time keeping alive. The handover | ||
49 | * procedure also covers cpu hotplug. | ||
50 | */ | ||
36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; | 51 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
37 | 52 | ||
38 | /* | 53 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 3612fc77f834..ea20f7d1ac2c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -361,8 +361,8 @@ void __init tick_nohz_init(void) | |||
361 | /* | 361 | /* |
362 | * NO HZ enabled ? | 362 | * NO HZ enabled ? |
363 | */ | 363 | */ |
364 | int tick_nohz_enabled __read_mostly = 1; | 364 | static int tick_nohz_enabled __read_mostly = 1; |
365 | 365 | int tick_nohz_active __read_mostly; | |
366 | /* | 366 | /* |
367 | * Enable / Disable tickless mode | 367 | * Enable / Disable tickless mode |
368 | */ | 368 | */ |
@@ -465,7 +465,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
465 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 465 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
466 | ktime_t now, idle; | 466 | ktime_t now, idle; |
467 | 467 | ||
468 | if (!tick_nohz_enabled) | 468 | if (!tick_nohz_active) |
469 | return -1; | 469 | return -1; |
470 | 470 | ||
471 | now = ktime_get(); | 471 | now = ktime_get(); |
@@ -506,7 +506,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |||
506 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 506 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
507 | ktime_t now, iowait; | 507 | ktime_t now, iowait; |
508 | 508 | ||
509 | if (!tick_nohz_enabled) | 509 | if (!tick_nohz_active) |
510 | return -1; | 510 | return -1; |
511 | 511 | ||
512 | now = ktime_get(); | 512 | now = ktime_get(); |
@@ -711,8 +711,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |||
711 | return false; | 711 | return false; |
712 | } | 712 | } |
713 | 713 | ||
714 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 714 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { |
715 | ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; | ||
715 | return false; | 716 | return false; |
717 | } | ||
716 | 718 | ||
717 | if (need_resched()) | 719 | if (need_resched()) |
718 | return false; | 720 | return false; |
@@ -799,11 +801,6 @@ void tick_nohz_idle_enter(void) | |||
799 | local_irq_disable(); | 801 | local_irq_disable(); |
800 | 802 | ||
801 | ts = &__get_cpu_var(tick_cpu_sched); | 803 | ts = &__get_cpu_var(tick_cpu_sched); |
802 | /* | ||
803 | * set ts->inidle unconditionally. even if the system did not | ||
804 | * switch to nohz mode the cpu frequency governers rely on the | ||
805 | * update of the idle time accounting in tick_nohz_start_idle(). | ||
806 | */ | ||
807 | ts->inidle = 1; | 804 | ts->inidle = 1; |
808 | __tick_nohz_idle_enter(ts); | 805 | __tick_nohz_idle_enter(ts); |
809 | 806 | ||
@@ -973,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void) | |||
973 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 970 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
974 | ktime_t next; | 971 | ktime_t next; |
975 | 972 | ||
976 | if (!tick_nohz_enabled) | 973 | if (!tick_nohz_active) |
977 | return; | 974 | return; |
978 | 975 | ||
979 | local_irq_disable(); | 976 | local_irq_disable(); |
@@ -981,7 +978,7 @@ static void tick_nohz_switch_to_nohz(void) | |||
981 | local_irq_enable(); | 978 | local_irq_enable(); |
982 | return; | 979 | return; |
983 | } | 980 | } |
984 | 981 | tick_nohz_active = 1; | |
985 | ts->nohz_mode = NOHZ_MODE_LOWRES; | 982 | ts->nohz_mode = NOHZ_MODE_LOWRES; |
986 | 983 | ||
987 | /* | 984 | /* |
@@ -1139,8 +1136,10 @@ void tick_setup_sched_timer(void) | |||
1139 | } | 1136 | } |
1140 | 1137 | ||
1141 | #ifdef CONFIG_NO_HZ_COMMON | 1138 | #ifdef CONFIG_NO_HZ_COMMON |
1142 | if (tick_nohz_enabled) | 1139 | if (tick_nohz_enabled) { |
1143 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | 1140 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
1141 | tick_nohz_active = 1; | ||
1142 | } | ||
1144 | #endif | 1143 | #endif |
1145 | } | 1144 | } |
1146 | #endif /* HIGH_RES_TIMERS */ | 1145 | #endif /* HIGH_RES_TIMERS */ |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 3abf53418b67..87b4f00284c9 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -1347,7 +1347,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk) | |||
1347 | tk->xtime_nsec -= remainder; | 1347 | tk->xtime_nsec -= remainder; |
1348 | tk->xtime_nsec += 1ULL << tk->shift; | 1348 | tk->xtime_nsec += 1ULL << tk->shift; |
1349 | tk->ntp_error += remainder << tk->ntp_error_shift; | 1349 | tk->ntp_error += remainder << tk->ntp_error_shift; |
1350 | 1350 | tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift; | |
1351 | } | 1351 | } |
1352 | #else | 1352 | #else |
1353 | #define old_vsyscall_fixup(tk) | 1353 | #define old_vsyscall_fixup(tk) |
diff --git a/kernel/timer.c b/kernel/timer.c index 6582b82fa966..accfd241b9e5 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1518,9 +1518,8 @@ static int init_timers_cpu(int cpu) | |||
1518 | /* | 1518 | /* |
1519 | * The APs use this path later in boot | 1519 | * The APs use this path later in boot |
1520 | */ | 1520 | */ |
1521 | base = kmalloc_node(sizeof(*base), | 1521 | base = kzalloc_node(sizeof(*base), GFP_KERNEL, |
1522 | GFP_KERNEL | __GFP_ZERO, | 1522 | cpu_to_node(cpu)); |
1523 | cpu_to_node(cpu)); | ||
1524 | if (!base) | 1523 | if (!base) |
1525 | return -ENOMEM; | 1524 | return -ENOMEM; |
1526 | 1525 | ||