diff options
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 30 |
1 files changed, 29 insertions, 1 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index f33afb0407bc..8fea312ca36c 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1158,6 +1158,29 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
1158 | 1158 | ||
1159 | #ifdef CONFIG_HIGH_RES_TIMERS | 1159 | #ifdef CONFIG_HIGH_RES_TIMERS |
1160 | 1160 | ||
1161 | static int force_clock_reprogram; | ||
1162 | |||
1163 | /* | ||
1164 | * After 5 iteration's attempts, we consider that hrtimer_interrupt() | ||
1165 | * is hanging, which could happen with something that slows the interrupt | ||
1166 | * such as the tracing. Then we force the clock reprogramming for each future | ||
1167 | * hrtimer interrupts to avoid infinite loops and use the min_delta_ns | ||
1168 | * threshold that we will overwrite. | ||
1169 | * The next tick event will be scheduled to 3 times we currently spend on | ||
1170 | * hrtimer_interrupt(). This gives a good compromise, the cpus will spend | ||
1171 | * 1/4 of their time to process the hrtimer interrupts. This is enough to | ||
1172 | * let it running without serious starvation. | ||
1173 | */ | ||
1174 | |||
1175 | static inline void | ||
1176 | hrtimer_interrupt_hanging(struct clock_event_device *dev, | ||
1177 | ktime_t try_time) | ||
1178 | { | ||
1179 | force_clock_reprogram = 1; | ||
1180 | dev->min_delta_ns = (unsigned long)try_time.tv64 * 3; | ||
1181 | printk(KERN_WARNING "hrtimer: interrupt too slow, " | ||
1182 | "forcing clock min delta to %lu ns\n", dev->min_delta_ns); | ||
1183 | } | ||
1161 | /* | 1184 | /* |
1162 | * High resolution timer interrupt | 1185 | * High resolution timer interrupt |
1163 | * Called with interrupts disabled | 1186 | * Called with interrupts disabled |
@@ -1167,6 +1190,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1167 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1190 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1168 | struct hrtimer_clock_base *base; | 1191 | struct hrtimer_clock_base *base; |
1169 | ktime_t expires_next, now; | 1192 | ktime_t expires_next, now; |
1193 | int nr_retries = 0; | ||
1170 | int i; | 1194 | int i; |
1171 | 1195 | ||
1172 | BUG_ON(!cpu_base->hres_active); | 1196 | BUG_ON(!cpu_base->hres_active); |
@@ -1174,6 +1198,10 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1174 | dev->next_event.tv64 = KTIME_MAX; | 1198 | dev->next_event.tv64 = KTIME_MAX; |
1175 | 1199 | ||
1176 | retry: | 1200 | retry: |
1201 | /* 5 retries is enough to notice a hang */ | ||
1202 | if (!(++nr_retries % 5)) | ||
1203 | hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now)); | ||
1204 | |||
1177 | now = ktime_get(); | 1205 | now = ktime_get(); |
1178 | 1206 | ||
1179 | expires_next.tv64 = KTIME_MAX; | 1207 | expires_next.tv64 = KTIME_MAX; |
@@ -1226,7 +1254,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1226 | 1254 | ||
1227 | /* Reprogramming necessary ? */ | 1255 | /* Reprogramming necessary ? */ |
1228 | if (expires_next.tv64 != KTIME_MAX) { | 1256 | if (expires_next.tv64 != KTIME_MAX) { |
1229 | if (tick_program_event(expires_next, 0)) | 1257 | if (tick_program_event(expires_next, force_clock_reprogram)) |
1230 | goto retry; | 1258 | goto retry; |
1231 | } | 1259 | } |
1232 | } | 1260 | } |