diff options
-rw-r--r-- | arch/arm/mach-at91/at91rm9200_time.c | 7 | ||||
-rw-r--r-- | drivers/clocksource/Kconfig | 1 | ||||
-rw-r--r-- | drivers/clocksource/sh_mtu2.c | 16 | ||||
-rw-r--r-- | drivers/clocksource/sh_tmu.c | 20 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 4 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 15 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 25 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 2 | ||||
-rw-r--r-- | kernel/timer.c | 5 |
9 files changed, 66 insertions, 29 deletions
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index f607deb40f4d..bc7b363a3083 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c | |||
@@ -174,7 +174,6 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev) | |||
174 | static struct clock_event_device clkevt = { | 174 | static struct clock_event_device clkevt = { |
175 | .name = "at91_tick", | 175 | .name = "at91_tick", |
176 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 176 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
177 | .shift = 32, | ||
178 | .rating = 150, | 177 | .rating = 150, |
179 | .set_next_event = clkevt32k_next_event, | 178 | .set_next_event = clkevt32k_next_event, |
180 | .set_mode = clkevt32k_mode, | 179 | .set_mode = clkevt32k_mode, |
@@ -265,11 +264,9 @@ void __init at91rm9200_timer_init(void) | |||
265 | at91_st_write(AT91_ST_RTMR, 1); | 264 | at91_st_write(AT91_ST_RTMR, 1); |
266 | 265 | ||
267 | /* Setup timer clockevent, with minimum of two ticks (important!!) */ | 266 | /* Setup timer clockevent, with minimum of two ticks (important!!) */ |
268 | clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); | ||
269 | clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); | ||
270 | clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; | ||
271 | clkevt.cpumask = cpumask_of(0); | 267 | clkevt.cpumask = cpumask_of(0); |
272 | clockevents_register_device(&clkevt); | 268 | clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK, |
269 | 2, AT91_ST_ALMV); | ||
273 | 270 | ||
274 | /* register clocksource */ | 271 | /* register clocksource */ |
275 | clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); | 272 | clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index bdb953e15d2a..5c07a56962db 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -87,6 +87,7 @@ config ARM_ARCH_TIMER | |||
87 | config ARM_ARCH_TIMER_EVTSTREAM | 87 | config ARM_ARCH_TIMER_EVTSTREAM |
88 | bool "Support for ARM architected timer event stream generation" | 88 | bool "Support for ARM architected timer event stream generation" |
89 | default y if ARM_ARCH_TIMER | 89 | default y if ARM_ARCH_TIMER |
90 | depends on ARM_ARCH_TIMER | ||
90 | help | 91 | help |
91 | This option enables support for event stream generation based on | 92 | This option enables support for event stream generation based on |
92 | the ARM architected timer. It is used for waking up CPUs executing | 93 | the ARM architected timer. It is used for waking up CPUs executing |
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 4aac9ee0d0c0..3cf12834681e 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c | |||
@@ -313,8 +313,20 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) | |||
313 | goto err1; | 313 | goto err1; |
314 | } | 314 | } |
315 | 315 | ||
316 | return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), | 316 | ret = clk_prepare(p->clk); |
317 | cfg->clockevent_rating); | 317 | if (ret < 0) |
318 | goto err2; | ||
319 | |||
320 | ret = sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), | ||
321 | cfg->clockevent_rating); | ||
322 | if (ret < 0) | ||
323 | goto err3; | ||
324 | |||
325 | return 0; | ||
326 | err3: | ||
327 | clk_unprepare(p->clk); | ||
328 | err2: | ||
329 | clk_put(p->clk); | ||
318 | err1: | 330 | err1: |
319 | iounmap(p->mapbase); | 331 | iounmap(p->mapbase); |
320 | err0: | 332 | err0: |
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 78b8dae49628..63557cda0a7d 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c | |||
@@ -472,12 +472,26 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) | |||
472 | ret = PTR_ERR(p->clk); | 472 | ret = PTR_ERR(p->clk); |
473 | goto err1; | 473 | goto err1; |
474 | } | 474 | } |
475 | |||
476 | ret = clk_prepare(p->clk); | ||
477 | if (ret < 0) | ||
478 | goto err2; | ||
479 | |||
475 | p->cs_enabled = false; | 480 | p->cs_enabled = false; |
476 | p->enable_count = 0; | 481 | p->enable_count = 0; |
477 | 482 | ||
478 | return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), | 483 | ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), |
479 | cfg->clockevent_rating, | 484 | cfg->clockevent_rating, |
480 | cfg->clocksource_rating); | 485 | cfg->clocksource_rating); |
486 | if (ret < 0) | ||
487 | goto err3; | ||
488 | |||
489 | return 0; | ||
490 | |||
491 | err3: | ||
492 | clk_unprepare(p->clk); | ||
493 | err2: | ||
494 | clk_put(p->clk); | ||
481 | err1: | 495 | err1: |
482 | iounmap(p->mapbase); | 496 | iounmap(p->mapbase); |
483 | err0: | 497 | err0: |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 6abb03dff5c0..08a765232432 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -1632,7 +1632,7 @@ module_param(rcu_idle_gp_delay, int, 0644); | |||
1632 | static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; | 1632 | static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; |
1633 | module_param(rcu_idle_lazy_gp_delay, int, 0644); | 1633 | module_param(rcu_idle_lazy_gp_delay, int, 0644); |
1634 | 1634 | ||
1635 | extern int tick_nohz_enabled; | 1635 | extern int tick_nohz_active; |
1636 | 1636 | ||
1637 | /* | 1637 | /* |
1638 | * Try to advance callbacks for all flavors of RCU on the current CPU, but | 1638 | * Try to advance callbacks for all flavors of RCU on the current CPU, but |
@@ -1729,7 +1729,7 @@ static void rcu_prepare_for_idle(int cpu) | |||
1729 | int tne; | 1729 | int tne; |
1730 | 1730 | ||
1731 | /* Handle nohz enablement switches conservatively. */ | 1731 | /* Handle nohz enablement switches conservatively. */ |
1732 | tne = ACCESS_ONCE(tick_nohz_enabled); | 1732 | tne = ACCESS_ONCE(tick_nohz_active); |
1733 | if (tne != rdtp->tick_nohz_enabled_snap) { | 1733 | if (tne != rdtp->tick_nohz_enabled_snap) { |
1734 | if (rcu_cpu_has_callbacks(cpu, NULL)) | 1734 | if (rcu_cpu_has_callbacks(cpu, NULL)) |
1735 | invoke_rcu_core(); /* force nohz to see update. */ | 1735 | invoke_rcu_core(); /* force nohz to see update. */ |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 64522ecdfe0e..162b03ab0ad2 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -33,6 +33,21 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
33 | */ | 33 | */ |
34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
36 | |||
37 | /* | ||
38 | * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR | ||
39 | * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This | ||
40 | * variable has two functions: | ||
41 | * | ||
42 | * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the | ||
43 | * timekeeping lock all at once. Only the CPU which is assigned to do the | ||
44 | * update is handling it. | ||
45 | * | ||
46 | * 2) Hand off the duty in the NOHZ idle case by setting the value to | ||
47 | * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks | ||
48 | * at it will take over and keep the time keeping alive. The handover | ||
49 | * procedure also covers cpu hotplug. | ||
50 | */ | ||
36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; | 51 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
37 | 52 | ||
38 | /* | 53 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 3612fc77f834..ea20f7d1ac2c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -361,8 +361,8 @@ void __init tick_nohz_init(void) | |||
361 | /* | 361 | /* |
362 | * NO HZ enabled ? | 362 | * NO HZ enabled ? |
363 | */ | 363 | */ |
364 | int tick_nohz_enabled __read_mostly = 1; | 364 | static int tick_nohz_enabled __read_mostly = 1; |
365 | 365 | int tick_nohz_active __read_mostly; | |
366 | /* | 366 | /* |
367 | * Enable / Disable tickless mode | 367 | * Enable / Disable tickless mode |
368 | */ | 368 | */ |
@@ -465,7 +465,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
465 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 465 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
466 | ktime_t now, idle; | 466 | ktime_t now, idle; |
467 | 467 | ||
468 | if (!tick_nohz_enabled) | 468 | if (!tick_nohz_active) |
469 | return -1; | 469 | return -1; |
470 | 470 | ||
471 | now = ktime_get(); | 471 | now = ktime_get(); |
@@ -506,7 +506,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |||
506 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 506 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
507 | ktime_t now, iowait; | 507 | ktime_t now, iowait; |
508 | 508 | ||
509 | if (!tick_nohz_enabled) | 509 | if (!tick_nohz_active) |
510 | return -1; | 510 | return -1; |
511 | 511 | ||
512 | now = ktime_get(); | 512 | now = ktime_get(); |
@@ -711,8 +711,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |||
711 | return false; | 711 | return false; |
712 | } | 712 | } |
713 | 713 | ||
714 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 714 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { |
715 | ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; | ||
715 | return false; | 716 | return false; |
717 | } | ||
716 | 718 | ||
717 | if (need_resched()) | 719 | if (need_resched()) |
718 | return false; | 720 | return false; |
@@ -799,11 +801,6 @@ void tick_nohz_idle_enter(void) | |||
799 | local_irq_disable(); | 801 | local_irq_disable(); |
800 | 802 | ||
801 | ts = &__get_cpu_var(tick_cpu_sched); | 803 | ts = &__get_cpu_var(tick_cpu_sched); |
802 | /* | ||
803 | * set ts->inidle unconditionally. even if the system did not | ||
804 | * switch to nohz mode the cpu frequency governers rely on the | ||
805 | * update of the idle time accounting in tick_nohz_start_idle(). | ||
806 | */ | ||
807 | ts->inidle = 1; | 804 | ts->inidle = 1; |
808 | __tick_nohz_idle_enter(ts); | 805 | __tick_nohz_idle_enter(ts); |
809 | 806 | ||
@@ -973,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void) | |||
973 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 970 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
974 | ktime_t next; | 971 | ktime_t next; |
975 | 972 | ||
976 | if (!tick_nohz_enabled) | 973 | if (!tick_nohz_active) |
977 | return; | 974 | return; |
978 | 975 | ||
979 | local_irq_disable(); | 976 | local_irq_disable(); |
@@ -981,7 +978,7 @@ static void tick_nohz_switch_to_nohz(void) | |||
981 | local_irq_enable(); | 978 | local_irq_enable(); |
982 | return; | 979 | return; |
983 | } | 980 | } |
984 | 981 | tick_nohz_active = 1; | |
985 | ts->nohz_mode = NOHZ_MODE_LOWRES; | 982 | ts->nohz_mode = NOHZ_MODE_LOWRES; |
986 | 983 | ||
987 | /* | 984 | /* |
@@ -1139,8 +1136,10 @@ void tick_setup_sched_timer(void) | |||
1139 | } | 1136 | } |
1140 | 1137 | ||
1141 | #ifdef CONFIG_NO_HZ_COMMON | 1138 | #ifdef CONFIG_NO_HZ_COMMON |
1142 | if (tick_nohz_enabled) | 1139 | if (tick_nohz_enabled) { |
1143 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | 1140 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
1141 | tick_nohz_active = 1; | ||
1142 | } | ||
1144 | #endif | 1143 | #endif |
1145 | } | 1144 | } |
1146 | #endif /* HIGH_RES_TIMERS */ | 1145 | #endif /* HIGH_RES_TIMERS */ |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 3abf53418b67..87b4f00284c9 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -1347,7 +1347,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk) | |||
1347 | tk->xtime_nsec -= remainder; | 1347 | tk->xtime_nsec -= remainder; |
1348 | tk->xtime_nsec += 1ULL << tk->shift; | 1348 | tk->xtime_nsec += 1ULL << tk->shift; |
1349 | tk->ntp_error += remainder << tk->ntp_error_shift; | 1349 | tk->ntp_error += remainder << tk->ntp_error_shift; |
1350 | 1350 | tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift; | |
1351 | } | 1351 | } |
1352 | #else | 1352 | #else |
1353 | #define old_vsyscall_fixup(tk) | 1353 | #define old_vsyscall_fixup(tk) |
diff --git a/kernel/timer.c b/kernel/timer.c index 6582b82fa966..accfd241b9e5 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1518,9 +1518,8 @@ static int init_timers_cpu(int cpu) | |||
1518 | /* | 1518 | /* |
1519 | * The APs use this path later in boot | 1519 | * The APs use this path later in boot |
1520 | */ | 1520 | */ |
1521 | base = kmalloc_node(sizeof(*base), | 1521 | base = kzalloc_node(sizeof(*base), GFP_KERNEL, |
1522 | GFP_KERNEL | __GFP_ZERO, | 1522 | cpu_to_node(cpu)); |
1523 | cpu_to_node(cpu)); | ||
1524 | if (!base) | 1523 | if (!base) |
1525 | return -ENOMEM; | 1524 | return -ENOMEM; |
1526 | 1525 | ||