aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-05-08 03:30:03 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:15:10 -0400
commitd3ed782458f315c30ea679b919a2cc59f2b82565 (patch)
treeb87fffc87acf5632566a6384f5c8be8f5c2e03b2 /kernel/time
parentd5d3b736e3264934ec832a657a9a434b65f3d51f (diff)
highres/dyntick: prevent xtime lock contention
While the !highres/!dyntick code assigns the duty of the do_timer() call to one specific CPU, this was dropped in the highres/dyntick part during development. Steven Rostedt discovered the xtime lock contention on highres/dyntick due to several CPUs trying to update jiffies. Add the single CPU assignement back. In the dyntick case this needs to be handled carefully, as the CPU which has the do_timer() duty must drop the assignement and let it be grabbed by another CPU, which is active. Otherwise the do_timer() calls would not happen during the long sleep. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: Steven Rostedt <rostedt@goodmis.org> Acked-by: Mark Lord <mlord@pobox.com> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/tick-common.c8
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/tick-sched.c42
3 files changed, 48 insertions, 3 deletions
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index bfda3f7f0716..a96ec9ab3454 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
31 */ 31 */
32ktime_t tick_next_period; 32ktime_t tick_next_period;
33ktime_t tick_period; 33ktime_t tick_period;
34static int tick_do_timer_cpu = -1; 34int tick_do_timer_cpu __read_mostly = -1;
35DEFINE_SPINLOCK(tick_device_lock); 35DEFINE_SPINLOCK(tick_device_lock);
36 36
37/* 37/*
@@ -295,6 +295,12 @@ static void tick_shutdown(unsigned int *cpup)
295 clockevents_exchange_device(dev, NULL); 295 clockevents_exchange_device(dev, NULL);
296 td->evtdev = NULL; 296 td->evtdev = NULL;
297 } 297 }
298 /* Transfer the do_timer job away from this cpu */
299 if (*cpup == tick_do_timer_cpu) {
300 int cpu = first_cpu(cpu_online_map);
301
302 tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1;
303 }
298 spin_unlock_irqrestore(&tick_device_lock, flags); 304 spin_unlock_irqrestore(&tick_device_lock, flags);
299} 305}
300 306
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index c9d203bde518..bb13f2724905 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -5,6 +5,7 @@ DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
5extern spinlock_t tick_device_lock; 5extern spinlock_t tick_device_lock;
6extern ktime_t tick_next_period; 6extern ktime_t tick_next_period;
7extern ktime_t tick_period; 7extern ktime_t tick_period;
8extern int tick_do_timer_cpu __read_mostly;
8 9
9extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); 10extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
10extern void tick_handle_periodic(struct clock_event_device *dev); 11extern void tick_handle_periodic(struct clock_event_device *dev);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 51556b95f60f..f4fc867f467d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -221,6 +221,18 @@ void tick_nohz_stop_sched_tick(void)
221 ts->tick_stopped = 1; 221 ts->tick_stopped = 1;
222 ts->idle_jiffies = last_jiffies; 222 ts->idle_jiffies = last_jiffies;
223 } 223 }
224
225 /*
226 * If this cpu is the one which updates jiffies, then
227 * give up the assignment and let it be taken by the
228 * cpu which runs the tick timer next, which might be
229 * this cpu as well. If we don't drop this here the
230 * jiffies might be stale and do_timer() never
231 * invoked.
232 */
233 if (cpu == tick_do_timer_cpu)
234 tick_do_timer_cpu = -1;
235
224 /* 236 /*
225 * calculate the expiry time for the next timer wheel 237 * calculate the expiry time for the next timer wheel
226 * timer 238 * timer
@@ -338,12 +350,24 @@ static void tick_nohz_handler(struct clock_event_device *dev)
338{ 350{
339 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 351 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
340 struct pt_regs *regs = get_irq_regs(); 352 struct pt_regs *regs = get_irq_regs();
353 int cpu = smp_processor_id();
341 ktime_t now = ktime_get(); 354 ktime_t now = ktime_get();
342 355
343 dev->next_event.tv64 = KTIME_MAX; 356 dev->next_event.tv64 = KTIME_MAX;
344 357
358 /*
359 * Check if the do_timer duty was dropped. We don't care about
360 * concurrency: This happens only when the cpu in charge went
361 * into a long sleep. If two cpus happen to assign themself to
362 * this duty, then the jiffies update is still serialized by
363 * xtime_lock.
364 */
365 if (unlikely(tick_do_timer_cpu == -1))
366 tick_do_timer_cpu = cpu;
367
345 /* Check, if the jiffies need an update */ 368 /* Check, if the jiffies need an update */
346 tick_do_update_jiffies64(now); 369 if (tick_do_timer_cpu == cpu)
370 tick_do_update_jiffies64(now);
347 371
348 /* 372 /*
349 * When we are idle and the tick is stopped, we have to touch 373 * When we are idle and the tick is stopped, we have to touch
@@ -431,9 +455,23 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
431 struct hrtimer_cpu_base *base = timer->base->cpu_base; 455 struct hrtimer_cpu_base *base = timer->base->cpu_base;
432 struct pt_regs *regs = get_irq_regs(); 456 struct pt_regs *regs = get_irq_regs();
433 ktime_t now = ktime_get(); 457 ktime_t now = ktime_get();
458 int cpu = smp_processor_id();
459
460#ifdef CONFIG_NO_HZ
461 /*
462 * Check if the do_timer duty was dropped. We don't care about
463 * concurrency: This happens only when the cpu in charge went
464 * into a long sleep. If two cpus happen to assign themself to
465 * this duty, then the jiffies update is still serialized by
466 * xtime_lock.
467 */
468 if (unlikely(tick_do_timer_cpu == -1))
469 tick_do_timer_cpu = cpu;
470#endif
434 471
435 /* Check, if the jiffies need an update */ 472 /* Check, if the jiffies need an update */
436 tick_do_update_jiffies64(now); 473 if (tick_do_timer_cpu == cpu)
474 tick_do_update_jiffies64(now);
437 475
438 /* 476 /*
439 * Do not call, when we are not in irq context and have 477 * Do not call, when we are not in irq context and have