aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c34
-rw-r--r--kernel/time/tick-common.c26
2 files changed, 52 insertions, 8 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 2dc30c59c5fd..2c40ee8f44bd 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1156,6 +1156,29 @@ static void __run_hrtimer(struct hrtimer *timer)
1156 1156
1157#ifdef CONFIG_HIGH_RES_TIMERS 1157#ifdef CONFIG_HIGH_RES_TIMERS
1158 1158
1159static int force_clock_reprogram;
1160
1161/*
1162 * After 5 iteration's attempts, we consider that hrtimer_interrupt()
1163 * is hanging, which could happen with something that slows the interrupt
1164 * such as the tracing. Then we force the clock reprogramming for each future
1165 * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
1166 * threshold that we will overwrite.
1167 * The next tick event will be scheduled to 3 times we currently spend on
1168 * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
1169 * 1/4 of their time to process the hrtimer interrupts. This is enough to
1170 * let it running without serious starvation.
1171 */
1172
1173static inline void
1174hrtimer_interrupt_hanging(struct clock_event_device *dev,
1175 ktime_t try_time)
1176{
1177 force_clock_reprogram = 1;
1178 dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
1179 printk(KERN_WARNING "hrtimer: interrupt too slow, "
1180 "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
1181}
1159/* 1182/*
1160 * High resolution timer interrupt 1183 * High resolution timer interrupt
1161 * Called with interrupts disabled 1184 * Called with interrupts disabled
@@ -1165,6 +1188,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1165 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1188 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1166 struct hrtimer_clock_base *base; 1189 struct hrtimer_clock_base *base;
1167 ktime_t expires_next, now; 1190 ktime_t expires_next, now;
1191 int nr_retries = 0;
1168 int i; 1192 int i;
1169 1193
1170 BUG_ON(!cpu_base->hres_active); 1194 BUG_ON(!cpu_base->hres_active);
@@ -1172,6 +1196,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1172 dev->next_event.tv64 = KTIME_MAX; 1196 dev->next_event.tv64 = KTIME_MAX;
1173 1197
1174 retry: 1198 retry:
1199 /* 5 retries is enough to notice a hang */
1200 if (!(++nr_retries % 5))
1201 hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
1202
1175 now = ktime_get(); 1203 now = ktime_get();
1176 1204
1177 expires_next.tv64 = KTIME_MAX; 1205 expires_next.tv64 = KTIME_MAX;
@@ -1224,7 +1252,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1224 1252
1225 /* Reprogramming necessary ? */ 1253 /* Reprogramming necessary ? */
1226 if (expires_next.tv64 != KTIME_MAX) { 1254 if (expires_next.tv64 != KTIME_MAX) {
1227 if (tick_program_event(expires_next, 0)) 1255 if (tick_program_event(expires_next, force_clock_reprogram))
1228 goto retry; 1256 goto retry;
1229 } 1257 }
1230} 1258}
@@ -1578,6 +1606,10 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1578 break; 1606 break;
1579 1607
1580#ifdef CONFIG_HOTPLUG_CPU 1608#ifdef CONFIG_HOTPLUG_CPU
1609 case CPU_DYING:
1610 case CPU_DYING_FROZEN:
1611 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
1612 break;
1581 case CPU_DEAD: 1613 case CPU_DEAD:
1582 case CPU_DEAD_FROZEN: 1614 case CPU_DEAD_FROZEN:
1583 { 1615 {
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 63e05d423a09..21a5ca849514 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -274,6 +274,21 @@ out_bc:
274} 274}
275 275
276/* 276/*
277 * Transfer the do_timer job away from a dying cpu.
278 *
279 * Called with interrupts disabled.
280 */
281static void tick_handover_do_timer(int *cpup)
282{
283 if (*cpup == tick_do_timer_cpu) {
284 int cpu = cpumask_first(cpu_online_mask);
285
286 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
287 TICK_DO_TIMER_NONE;
288 }
289}
290
291/*
277 * Shutdown an event device on a given cpu: 292 * Shutdown an event device on a given cpu:
278 * 293 *
279 * This is called on a life CPU, when a CPU is dead. So we cannot 294 * This is called on a life CPU, when a CPU is dead. So we cannot
@@ -297,13 +312,6 @@ static void tick_shutdown(unsigned int *cpup)
297 clockevents_exchange_device(dev, NULL); 312 clockevents_exchange_device(dev, NULL);
298 td->evtdev = NULL; 313 td->evtdev = NULL;
299 } 314 }
300 /* Transfer the do_timer job away from this cpu */
301 if (*cpup == tick_do_timer_cpu) {
302 int cpu = cpumask_first(cpu_online_mask);
303
304 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
305 TICK_DO_TIMER_NONE;
306 }
307 spin_unlock_irqrestore(&tick_device_lock, flags); 315 spin_unlock_irqrestore(&tick_device_lock, flags);
308} 316}
309 317
@@ -357,6 +365,10 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
357 tick_broadcast_oneshot_control(reason); 365 tick_broadcast_oneshot_control(reason);
358 break; 366 break;
359 367
368 case CLOCK_EVT_NOTIFY_CPU_DYING:
369 tick_handover_do_timer(dev);
370 break;
371
360 case CLOCK_EVT_NOTIFY_CPU_DEAD: 372 case CLOCK_EVT_NOTIFY_CPU_DEAD:
361 tick_shutdown_broadcast_oneshot(dev); 373 tick_shutdown_broadcast_oneshot(dev);
362 tick_shutdown_broadcast(dev); 374 tick_shutdown_broadcast(dev);