diff options
| -rw-r--r-- | include/linux/clockchips.h | 1 | ||||
| -rw-r--r-- | kernel/hrtimer.c | 41 | ||||
| -rw-r--r-- | kernel/time/tick-common.c | 26 |
3 files changed, 60 insertions, 8 deletions
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index cea153697ec7..3a1dbba4d3ae 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
| @@ -36,6 +36,7 @@ enum clock_event_nofitiers { | |||
| 36 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT, | 36 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT, |
| 37 | CLOCK_EVT_NOTIFY_SUSPEND, | 37 | CLOCK_EVT_NOTIFY_SUSPEND, |
| 38 | CLOCK_EVT_NOTIFY_RESUME, | 38 | CLOCK_EVT_NOTIFY_RESUME, |
| 39 | CLOCK_EVT_NOTIFY_CPU_DYING, | ||
| 39 | CLOCK_EVT_NOTIFY_CPU_DEAD, | 40 | CLOCK_EVT_NOTIFY_CPU_DEAD, |
| 40 | }; | 41 | }; |
| 41 | 42 | ||
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index f33afb0407bc..f394d2a42ca3 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -501,6 +501,13 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) | |||
| 501 | continue; | 501 | continue; |
| 502 | timer = rb_entry(base->first, struct hrtimer, node); | 502 | timer = rb_entry(base->first, struct hrtimer, node); |
| 503 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 503 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
| 504 | /* | ||
| 505 | * clock_was_set() has changed base->offset so the | ||
| 506 | * result might be negative. Fix it up to prevent a | ||
| 507 | * false positive in clockevents_program_event() | ||
| 508 | */ | ||
| 509 | if (expires.tv64 < 0) | ||
| 510 | expires.tv64 = 0; | ||
| 504 | if (expires.tv64 < cpu_base->expires_next.tv64) | 511 | if (expires.tv64 < cpu_base->expires_next.tv64) |
| 505 | cpu_base->expires_next = expires; | 512 | cpu_base->expires_next = expires; |
| 506 | } | 513 | } |
| @@ -1158,6 +1165,29 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
| 1158 | 1165 | ||
| 1159 | #ifdef CONFIG_HIGH_RES_TIMERS | 1166 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 1160 | 1167 | ||
| 1168 | static int force_clock_reprogram; | ||
| 1169 | |||
| 1170 | /* | ||
| 1171 | * After 5 iteration's attempts, we consider that hrtimer_interrupt() | ||
| 1172 | * is hanging, which could happen with something that slows the interrupt | ||
| 1173 | * such as the tracing. Then we force the clock reprogramming for each future | ||
| 1174 | * hrtimer interrupts to avoid infinite loops and use the min_delta_ns | ||
| 1175 | * threshold that we will overwrite. | ||
| 1176 | * The next tick event will be scheduled to 3 times we currently spend on | ||
| 1177 | * hrtimer_interrupt(). This gives a good compromise, the cpus will spend | ||
| 1178 | * 1/4 of their time to process the hrtimer interrupts. This is enough to | ||
| 1179 | * let it running without serious starvation. | ||
| 1180 | */ | ||
| 1181 | |||
| 1182 | static inline void | ||
| 1183 | hrtimer_interrupt_hanging(struct clock_event_device *dev, | ||
| 1184 | ktime_t try_time) | ||
| 1185 | { | ||
| 1186 | force_clock_reprogram = 1; | ||
| 1187 | dev->min_delta_ns = (unsigned long)try_time.tv64 * 3; | ||
| 1188 | printk(KERN_WARNING "hrtimer: interrupt too slow, " | ||
| 1189 | "forcing clock min delta to %lu ns\n", dev->min_delta_ns); | ||
| 1190 | } | ||
| 1161 | /* | 1191 | /* |
| 1162 | * High resolution timer interrupt | 1192 | * High resolution timer interrupt |
| 1163 | * Called with interrupts disabled | 1193 | * Called with interrupts disabled |
| @@ -1167,6 +1197,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1167 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1197 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
| 1168 | struct hrtimer_clock_base *base; | 1198 | struct hrtimer_clock_base *base; |
| 1169 | ktime_t expires_next, now; | 1199 | ktime_t expires_next, now; |
| 1200 | int nr_retries = 0; | ||
| 1170 | int i; | 1201 | int i; |
| 1171 | 1202 | ||
| 1172 | BUG_ON(!cpu_base->hres_active); | 1203 | BUG_ON(!cpu_base->hres_active); |
| @@ -1174,6 +1205,10 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1174 | dev->next_event.tv64 = KTIME_MAX; | 1205 | dev->next_event.tv64 = KTIME_MAX; |
| 1175 | 1206 | ||
| 1176 | retry: | 1207 | retry: |
| 1208 | /* 5 retries is enough to notice a hang */ | ||
| 1209 | if (!(++nr_retries % 5)) | ||
| 1210 | hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now)); | ||
| 1211 | |||
| 1177 | now = ktime_get(); | 1212 | now = ktime_get(); |
| 1178 | 1213 | ||
| 1179 | expires_next.tv64 = KTIME_MAX; | 1214 | expires_next.tv64 = KTIME_MAX; |
| @@ -1226,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1226 | 1261 | ||
| 1227 | /* Reprogramming necessary ? */ | 1262 | /* Reprogramming necessary ? */ |
| 1228 | if (expires_next.tv64 != KTIME_MAX) { | 1263 | if (expires_next.tv64 != KTIME_MAX) { |
| 1229 | if (tick_program_event(expires_next, 0)) | 1264 | if (tick_program_event(expires_next, force_clock_reprogram)) |
| 1230 | goto retry; | 1265 | goto retry; |
| 1231 | } | 1266 | } |
| 1232 | } | 1267 | } |
| @@ -1580,6 +1615,10 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | |||
| 1580 | break; | 1615 | break; |
| 1581 | 1616 | ||
| 1582 | #ifdef CONFIG_HOTPLUG_CPU | 1617 | #ifdef CONFIG_HOTPLUG_CPU |
| 1618 | case CPU_DYING: | ||
| 1619 | case CPU_DYING_FROZEN: | ||
| 1620 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu); | ||
| 1621 | break; | ||
| 1583 | case CPU_DEAD: | 1622 | case CPU_DEAD: |
| 1584 | case CPU_DEAD_FROZEN: | 1623 | case CPU_DEAD_FROZEN: |
| 1585 | { | 1624 | { |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 63e05d423a09..21a5ca849514 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -274,6 +274,21 @@ out_bc: | |||
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | /* | 276 | /* |
| 277 | * Transfer the do_timer job away from a dying cpu. | ||
| 278 | * | ||
| 279 | * Called with interrupts disabled. | ||
| 280 | */ | ||
| 281 | static void tick_handover_do_timer(int *cpup) | ||
| 282 | { | ||
| 283 | if (*cpup == tick_do_timer_cpu) { | ||
| 284 | int cpu = cpumask_first(cpu_online_mask); | ||
| 285 | |||
| 286 | tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : | ||
| 287 | TICK_DO_TIMER_NONE; | ||
| 288 | } | ||
| 289 | } | ||
| 290 | |||
| 291 | /* | ||
| 277 | * Shutdown an event device on a given cpu: | 292 | * Shutdown an event device on a given cpu: |
| 278 | * | 293 | * |
| 279 | * This is called on a life CPU, when a CPU is dead. So we cannot | 294 | * This is called on a life CPU, when a CPU is dead. So we cannot |
| @@ -297,13 +312,6 @@ static void tick_shutdown(unsigned int *cpup) | |||
| 297 | clockevents_exchange_device(dev, NULL); | 312 | clockevents_exchange_device(dev, NULL); |
| 298 | td->evtdev = NULL; | 313 | td->evtdev = NULL; |
| 299 | } | 314 | } |
| 300 | /* Transfer the do_timer job away from this cpu */ | ||
| 301 | if (*cpup == tick_do_timer_cpu) { | ||
| 302 | int cpu = cpumask_first(cpu_online_mask); | ||
| 303 | |||
| 304 | tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : | ||
| 305 | TICK_DO_TIMER_NONE; | ||
| 306 | } | ||
| 307 | spin_unlock_irqrestore(&tick_device_lock, flags); | 315 | spin_unlock_irqrestore(&tick_device_lock, flags); |
| 308 | } | 316 | } |
| 309 | 317 | ||
| @@ -357,6 +365,10 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason, | |||
| 357 | tick_broadcast_oneshot_control(reason); | 365 | tick_broadcast_oneshot_control(reason); |
| 358 | break; | 366 | break; |
| 359 | 367 | ||
| 368 | case CLOCK_EVT_NOTIFY_CPU_DYING: | ||
| 369 | tick_handover_do_timer(dev); | ||
| 370 | break; | ||
| 371 | |||
| 360 | case CLOCK_EVT_NOTIFY_CPU_DEAD: | 372 | case CLOCK_EVT_NOTIFY_CPU_DEAD: |
| 361 | tick_shutdown_broadcast_oneshot(dev); | 373 | tick_shutdown_broadcast_oneshot(dev); |
| 362 | tick_shutdown_broadcast(dev); | 374 | tick_shutdown_broadcast(dev); |
