diff options
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/tick-common.c | 8 | ||||
-rw-r--r-- | kernel/time/tick-internal.h | 1 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 42 |
3 files changed, 48 insertions, 3 deletions
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index bfda3f7f0716..a96ec9ab3454 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
31 | */ | 31 | */ |
32 | ktime_t tick_next_period; | 32 | ktime_t tick_next_period; |
33 | ktime_t tick_period; | 33 | ktime_t tick_period; |
34 | static int tick_do_timer_cpu = -1; | 34 | int tick_do_timer_cpu __read_mostly = -1; |
35 | DEFINE_SPINLOCK(tick_device_lock); | 35 | DEFINE_SPINLOCK(tick_device_lock); |
36 | 36 | ||
37 | /* | 37 | /* |
@@ -295,6 +295,12 @@ static void tick_shutdown(unsigned int *cpup) | |||
295 | clockevents_exchange_device(dev, NULL); | 295 | clockevents_exchange_device(dev, NULL); |
296 | td->evtdev = NULL; | 296 | td->evtdev = NULL; |
297 | } | 297 | } |
298 | /* Transfer the do_timer job away from this cpu */ | ||
299 | if (*cpup == tick_do_timer_cpu) { | ||
300 | int cpu = first_cpu(cpu_online_map); | ||
301 | |||
302 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; | ||
303 | } | ||
298 | spin_unlock_irqrestore(&tick_device_lock, flags); | 304 | spin_unlock_irqrestore(&tick_device_lock, flags); |
299 | } | 305 | } |
300 | 306 | ||
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index c9d203bde518..bb13f2724905 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -5,6 +5,7 @@ DECLARE_PER_CPU(struct tick_device, tick_cpu_device); | |||
5 | extern spinlock_t tick_device_lock; | 5 | extern spinlock_t tick_device_lock; |
6 | extern ktime_t tick_next_period; | 6 | extern ktime_t tick_next_period; |
7 | extern ktime_t tick_period; | 7 | extern ktime_t tick_period; |
8 | extern int tick_do_timer_cpu __read_mostly; | ||
8 | 9 | ||
9 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); | 10 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); |
10 | extern void tick_handle_periodic(struct clock_event_device *dev); | 11 | extern void tick_handle_periodic(struct clock_event_device *dev); |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 51556b95f60f..f4fc867f467d 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -221,6 +221,18 @@ void tick_nohz_stop_sched_tick(void) | |||
221 | ts->tick_stopped = 1; | 221 | ts->tick_stopped = 1; |
222 | ts->idle_jiffies = last_jiffies; | 222 | ts->idle_jiffies = last_jiffies; |
223 | } | 223 | } |
224 | |||
225 | /* | ||
226 | * If this cpu is the one which updates jiffies, then | ||
227 | * give up the assignment and let it be taken by the | ||
228 | * cpu which runs the tick timer next, which might be | ||
229 | * this cpu as well. If we don't drop this here the | ||
230 | * jiffies might be stale and do_timer() never | ||
231 | * invoked. | ||
232 | */ | ||
233 | if (cpu == tick_do_timer_cpu) | ||
234 | tick_do_timer_cpu = -1; | ||
235 | |||
224 | /* | 236 | /* |
225 | * calculate the expiry time for the next timer wheel | 237 | * calculate the expiry time for the next timer wheel |
226 | * timer | 238 | * timer |
@@ -338,12 +350,24 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
338 | { | 350 | { |
339 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 351 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
340 | struct pt_regs *regs = get_irq_regs(); | 352 | struct pt_regs *regs = get_irq_regs(); |
353 | int cpu = smp_processor_id(); | ||
341 | ktime_t now = ktime_get(); | 354 | ktime_t now = ktime_get(); |
342 | 355 | ||
343 | dev->next_event.tv64 = KTIME_MAX; | 356 | dev->next_event.tv64 = KTIME_MAX; |
344 | 357 | ||
358 | /* | ||
359 | * Check if the do_timer duty was dropped. We don't care about | ||
360 | * concurrency: This happens only when the cpu in charge went | ||
361 | * into a long sleep. If two cpus happen to assign themself to | ||
362 | * this duty, then the jiffies update is still serialized by | ||
363 | * xtime_lock. | ||
364 | */ | ||
365 | if (unlikely(tick_do_timer_cpu == -1)) | ||
366 | tick_do_timer_cpu = cpu; | ||
367 | |||
345 | /* Check, if the jiffies need an update */ | 368 | /* Check, if the jiffies need an update */ |
346 | tick_do_update_jiffies64(now); | 369 | if (tick_do_timer_cpu == cpu) |
370 | tick_do_update_jiffies64(now); | ||
347 | 371 | ||
348 | /* | 372 | /* |
349 | * When we are idle and the tick is stopped, we have to touch | 373 | * When we are idle and the tick is stopped, we have to touch |
@@ -431,9 +455,23 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
431 | struct hrtimer_cpu_base *base = timer->base->cpu_base; | 455 | struct hrtimer_cpu_base *base = timer->base->cpu_base; |
432 | struct pt_regs *regs = get_irq_regs(); | 456 | struct pt_regs *regs = get_irq_regs(); |
433 | ktime_t now = ktime_get(); | 457 | ktime_t now = ktime_get(); |
458 | int cpu = smp_processor_id(); | ||
459 | |||
460 | #ifdef CONFIG_NO_HZ | ||
461 | /* | ||
462 | * Check if the do_timer duty was dropped. We don't care about | ||
463 | * concurrency: This happens only when the cpu in charge went | ||
464 | * into a long sleep. If two cpus happen to assign themself to | ||
465 | * this duty, then the jiffies update is still serialized by | ||
466 | * xtime_lock. | ||
467 | */ | ||
468 | if (unlikely(tick_do_timer_cpu == -1)) | ||
469 | tick_do_timer_cpu = cpu; | ||
470 | #endif | ||
434 | 471 | ||
435 | /* Check, if the jiffies need an update */ | 472 | /* Check, if the jiffies need an update */ |
436 | tick_do_update_jiffies64(now); | 473 | if (tick_do_timer_cpu == cpu) |
474 | tick_do_update_jiffies64(now); | ||
437 | 475 | ||
438 | /* | 476 | /* |
439 | * Do not call, when we are not in irq context and have | 477 | * Do not call, when we are not in irq context and have |