diff options
Diffstat (limited to 'kernel/hrtimer.c')
| -rw-r--r-- | kernel/hrtimer.c | 49 |
1 files changed, 45 insertions, 4 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 1455b7651b6b..f394d2a42ca3 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -501,6 +501,13 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) | |||
| 501 | continue; | 501 | continue; |
| 502 | timer = rb_entry(base->first, struct hrtimer, node); | 502 | timer = rb_entry(base->first, struct hrtimer, node); |
| 503 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 503 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
| 504 | /* | ||
| 505 | * clock_was_set() has changed base->offset so the | ||
| 506 | * result might be negative. Fix it up to prevent a | ||
| 507 | * false positive in clockevents_program_event() | ||
| 508 | */ | ||
| 509 | if (expires.tv64 < 0) | ||
| 510 | expires.tv64 = 0; | ||
| 504 | if (expires.tv64 < cpu_base->expires_next.tv64) | 511 | if (expires.tv64 < cpu_base->expires_next.tv64) |
| 505 | cpu_base->expires_next = expires; | 512 | cpu_base->expires_next = expires; |
| 506 | } | 513 | } |
| @@ -614,7 +621,9 @@ void clock_was_set(void) | |||
| 614 | */ | 621 | */ |
| 615 | void hres_timers_resume(void) | 622 | void hres_timers_resume(void) |
| 616 | { | 623 | { |
| 617 | /* Retrigger the CPU local events: */ | 624 | WARN_ONCE(!irqs_disabled(), |
| 625 | KERN_INFO "hres_timers_resume() called with IRQs enabled!"); | ||
| 626 | |||
| 618 | retrigger_next_event(NULL); | 627 | retrigger_next_event(NULL); |
| 619 | } | 628 | } |
| 620 | 629 | ||
| @@ -1156,6 +1165,29 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
| 1156 | 1165 | ||
| 1157 | #ifdef CONFIG_HIGH_RES_TIMERS | 1166 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 1158 | 1167 | ||
| 1168 | static int force_clock_reprogram; | ||
| 1169 | |||
| 1170 | /* | ||
| 1171 | * After 5 iteration's attempts, we consider that hrtimer_interrupt() | ||
| 1172 | * is hanging, which could happen with something that slows the interrupt | ||
| 1173 | * such as the tracing. Then we force the clock reprogramming for each future | ||
| 1174 | * hrtimer interrupts to avoid infinite loops and use the min_delta_ns | ||
| 1175 | * threshold that we will overwrite. | ||
| 1176 | * The next tick event will be scheduled to 3 times we currently spend on | ||
| 1177 | * hrtimer_interrupt(). This gives a good compromise, the cpus will spend | ||
| 1178 | * 1/4 of their time to process the hrtimer interrupts. This is enough to | ||
| 1179 | * let it running without serious starvation. | ||
| 1180 | */ | ||
| 1181 | |||
| 1182 | static inline void | ||
| 1183 | hrtimer_interrupt_hanging(struct clock_event_device *dev, | ||
| 1184 | ktime_t try_time) | ||
| 1185 | { | ||
| 1186 | force_clock_reprogram = 1; | ||
| 1187 | dev->min_delta_ns = (unsigned long)try_time.tv64 * 3; | ||
| 1188 | printk(KERN_WARNING "hrtimer: interrupt too slow, " | ||
| 1189 | "forcing clock min delta to %lu ns\n", dev->min_delta_ns); | ||
| 1190 | } | ||
| 1159 | /* | 1191 | /* |
| 1160 | * High resolution timer interrupt | 1192 | * High resolution timer interrupt |
| 1161 | * Called with interrupts disabled | 1193 | * Called with interrupts disabled |
| @@ -1165,6 +1197,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1165 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1197 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
| 1166 | struct hrtimer_clock_base *base; | 1198 | struct hrtimer_clock_base *base; |
| 1167 | ktime_t expires_next, now; | 1199 | ktime_t expires_next, now; |
| 1200 | int nr_retries = 0; | ||
| 1168 | int i; | 1201 | int i; |
| 1169 | 1202 | ||
| 1170 | BUG_ON(!cpu_base->hres_active); | 1203 | BUG_ON(!cpu_base->hres_active); |
| @@ -1172,6 +1205,10 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1172 | dev->next_event.tv64 = KTIME_MAX; | 1205 | dev->next_event.tv64 = KTIME_MAX; |
| 1173 | 1206 | ||
| 1174 | retry: | 1207 | retry: |
| 1208 | /* 5 retries is enough to notice a hang */ | ||
| 1209 | if (!(++nr_retries % 5)) | ||
| 1210 | hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now)); | ||
| 1211 | |||
| 1175 | now = ktime_get(); | 1212 | now = ktime_get(); |
| 1176 | 1213 | ||
| 1177 | expires_next.tv64 = KTIME_MAX; | 1214 | expires_next.tv64 = KTIME_MAX; |
| @@ -1224,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1224 | 1261 | ||
| 1225 | /* Reprogramming necessary ? */ | 1262 | /* Reprogramming necessary ? */ |
| 1226 | if (expires_next.tv64 != KTIME_MAX) { | 1263 | if (expires_next.tv64 != KTIME_MAX) { |
| 1227 | if (tick_program_event(expires_next, 0)) | 1264 | if (tick_program_event(expires_next, force_clock_reprogram)) |
| 1228 | goto retry; | 1265 | goto retry; |
| 1229 | } | 1266 | } |
| 1230 | } | 1267 | } |
| @@ -1467,8 +1504,8 @@ out: | |||
| 1467 | return ret; | 1504 | return ret; |
| 1468 | } | 1505 | } |
| 1469 | 1506 | ||
| 1470 | asmlinkage long | 1507 | SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, |
| 1471 | sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) | 1508 | struct timespec __user *, rmtp) |
| 1472 | { | 1509 | { |
| 1473 | struct timespec tu; | 1510 | struct timespec tu; |
| 1474 | 1511 | ||
| @@ -1578,6 +1615,10 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | |||
| 1578 | break; | 1615 | break; |
| 1579 | 1616 | ||
| 1580 | #ifdef CONFIG_HOTPLUG_CPU | 1617 | #ifdef CONFIG_HOTPLUG_CPU |
| 1618 | case CPU_DYING: | ||
| 1619 | case CPU_DYING_FROZEN: | ||
| 1620 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu); | ||
| 1621 | break; | ||
| 1581 | case CPU_DEAD: | 1622 | case CPU_DEAD: |
| 1582 | case CPU_DEAD_FROZEN: | 1623 | case CPU_DEAD_FROZEN: |
| 1583 | { | 1624 | { |
