diff options
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 225 |
1 files changed, 137 insertions, 88 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index e5d98ce50f89..0086628b6e97 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -127,11 +127,11 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |||
127 | for (;;) { | 127 | for (;;) { |
128 | base = timer->base; | 128 | base = timer->base; |
129 | if (likely(base != NULL)) { | 129 | if (likely(base != NULL)) { |
130 | spin_lock_irqsave(&base->cpu_base->lock, *flags); | 130 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); |
131 | if (likely(base == timer->base)) | 131 | if (likely(base == timer->base)) |
132 | return base; | 132 | return base; |
133 | /* The timer has migrated to another CPU: */ | 133 | /* The timer has migrated to another CPU: */ |
134 | spin_unlock_irqrestore(&base->cpu_base->lock, *flags); | 134 | raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); |
135 | } | 135 | } |
136 | cpu_relax(); | 136 | cpu_relax(); |
137 | } | 137 | } |
@@ -208,13 +208,13 @@ again: | |||
208 | 208 | ||
209 | /* See the comment in lock_timer_base() */ | 209 | /* See the comment in lock_timer_base() */ |
210 | timer->base = NULL; | 210 | timer->base = NULL; |
211 | spin_unlock(&base->cpu_base->lock); | 211 | raw_spin_unlock(&base->cpu_base->lock); |
212 | spin_lock(&new_base->cpu_base->lock); | 212 | raw_spin_lock(&new_base->cpu_base->lock); |
213 | 213 | ||
214 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { | 214 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { |
215 | cpu = this_cpu; | 215 | cpu = this_cpu; |
216 | spin_unlock(&new_base->cpu_base->lock); | 216 | raw_spin_unlock(&new_base->cpu_base->lock); |
217 | spin_lock(&base->cpu_base->lock); | 217 | raw_spin_lock(&base->cpu_base->lock); |
218 | timer->base = base; | 218 | timer->base = base; |
219 | goto again; | 219 | goto again; |
220 | } | 220 | } |
@@ -230,7 +230,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |||
230 | { | 230 | { |
231 | struct hrtimer_clock_base *base = timer->base; | 231 | struct hrtimer_clock_base *base = timer->base; |
232 | 232 | ||
233 | spin_lock_irqsave(&base->cpu_base->lock, *flags); | 233 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); |
234 | 234 | ||
235 | return base; | 235 | return base; |
236 | } | 236 | } |
@@ -509,13 +509,14 @@ static inline int hrtimer_hres_active(void) | |||
509 | * next event | 509 | * next event |
510 | * Called with interrupts disabled and base->lock held | 510 | * Called with interrupts disabled and base->lock held |
511 | */ | 511 | */ |
512 | static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) | 512 | static void |
513 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | ||
513 | { | 514 | { |
514 | int i; | 515 | int i; |
515 | struct hrtimer_clock_base *base = cpu_base->clock_base; | 516 | struct hrtimer_clock_base *base = cpu_base->clock_base; |
516 | ktime_t expires; | 517 | ktime_t expires, expires_next; |
517 | 518 | ||
518 | cpu_base->expires_next.tv64 = KTIME_MAX; | 519 | expires_next.tv64 = KTIME_MAX; |
519 | 520 | ||
520 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 521 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { |
521 | struct hrtimer *timer; | 522 | struct hrtimer *timer; |
@@ -531,10 +532,15 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) | |||
531 | */ | 532 | */ |
532 | if (expires.tv64 < 0) | 533 | if (expires.tv64 < 0) |
533 | expires.tv64 = 0; | 534 | expires.tv64 = 0; |
534 | if (expires.tv64 < cpu_base->expires_next.tv64) | 535 | if (expires.tv64 < expires_next.tv64) |
535 | cpu_base->expires_next = expires; | 536 | expires_next = expires; |
536 | } | 537 | } |
537 | 538 | ||
539 | if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) | ||
540 | return; | ||
541 | |||
542 | cpu_base->expires_next.tv64 = expires_next.tv64; | ||
543 | |||
538 | if (cpu_base->expires_next.tv64 != KTIME_MAX) | 544 | if (cpu_base->expires_next.tv64 != KTIME_MAX) |
539 | tick_program_event(cpu_base->expires_next, 1); | 545 | tick_program_event(cpu_base->expires_next, 1); |
540 | } | 546 | } |
@@ -551,7 +557,7 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) | |||
551 | static int hrtimer_reprogram(struct hrtimer *timer, | 557 | static int hrtimer_reprogram(struct hrtimer *timer, |
552 | struct hrtimer_clock_base *base) | 558 | struct hrtimer_clock_base *base) |
553 | { | 559 | { |
554 | ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; | 560 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
555 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 561 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
556 | int res; | 562 | int res; |
557 | 563 | ||
@@ -576,7 +582,16 @@ static int hrtimer_reprogram(struct hrtimer *timer, | |||
576 | if (expires.tv64 < 0) | 582 | if (expires.tv64 < 0) |
577 | return -ETIME; | 583 | return -ETIME; |
578 | 584 | ||
579 | if (expires.tv64 >= expires_next->tv64) | 585 | if (expires.tv64 >= cpu_base->expires_next.tv64) |
586 | return 0; | ||
587 | |||
588 | /* | ||
589 | * If a hang was detected in the last timer interrupt then we | ||
590 | * do not schedule a timer which is earlier than the expiry | ||
591 | * which we enforced in the hang detection. We want the system | ||
592 | * to make progress. | ||
593 | */ | ||
594 | if (cpu_base->hang_detected) | ||
580 | return 0; | 595 | return 0; |
581 | 596 | ||
582 | /* | 597 | /* |
@@ -584,7 +599,7 @@ static int hrtimer_reprogram(struct hrtimer *timer, | |||
584 | */ | 599 | */ |
585 | res = tick_program_event(expires, 0); | 600 | res = tick_program_event(expires, 0); |
586 | if (!IS_ERR_VALUE(res)) | 601 | if (!IS_ERR_VALUE(res)) |
587 | *expires_next = expires; | 602 | cpu_base->expires_next = expires; |
588 | return res; | 603 | return res; |
589 | } | 604 | } |
590 | 605 | ||
@@ -613,12 +628,12 @@ static void retrigger_next_event(void *arg) | |||
613 | base = &__get_cpu_var(hrtimer_bases); | 628 | base = &__get_cpu_var(hrtimer_bases); |
614 | 629 | ||
615 | /* Adjust CLOCK_REALTIME offset */ | 630 | /* Adjust CLOCK_REALTIME offset */ |
616 | spin_lock(&base->lock); | 631 | raw_spin_lock(&base->lock); |
617 | base->clock_base[CLOCK_REALTIME].offset = | 632 | base->clock_base[CLOCK_REALTIME].offset = |
618 | timespec_to_ktime(realtime_offset); | 633 | timespec_to_ktime(realtime_offset); |
619 | 634 | ||
620 | hrtimer_force_reprogram(base); | 635 | hrtimer_force_reprogram(base, 0); |
621 | spin_unlock(&base->lock); | 636 | raw_spin_unlock(&base->lock); |
622 | } | 637 | } |
623 | 638 | ||
624 | /* | 639 | /* |
@@ -679,9 +694,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
679 | { | 694 | { |
680 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | 695 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { |
681 | if (wakeup) { | 696 | if (wakeup) { |
682 | spin_unlock(&base->cpu_base->lock); | 697 | raw_spin_unlock(&base->cpu_base->lock); |
683 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 698 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
684 | spin_lock(&base->cpu_base->lock); | 699 | raw_spin_lock(&base->cpu_base->lock); |
685 | } else | 700 | } else |
686 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 701 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
687 | 702 | ||
@@ -720,8 +735,6 @@ static int hrtimer_switch_to_hres(void) | |||
720 | /* "Retrigger" the interrupt to get things going */ | 735 | /* "Retrigger" the interrupt to get things going */ |
721 | retrigger_next_event(NULL); | 736 | retrigger_next_event(NULL); |
722 | local_irq_restore(flags); | 737 | local_irq_restore(flags); |
723 | printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n", | ||
724 | smp_processor_id()); | ||
725 | return 1; | 738 | return 1; |
726 | } | 739 | } |
727 | 740 | ||
@@ -730,7 +743,8 @@ static int hrtimer_switch_to_hres(void) | |||
730 | static inline int hrtimer_hres_active(void) { return 0; } | 743 | static inline int hrtimer_hres_active(void) { return 0; } |
731 | static inline int hrtimer_is_hres_enabled(void) { return 0; } | 744 | static inline int hrtimer_is_hres_enabled(void) { return 0; } |
732 | static inline int hrtimer_switch_to_hres(void) { return 0; } | 745 | static inline int hrtimer_switch_to_hres(void) { return 0; } |
733 | static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } | 746 | static inline void |
747 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } | ||
734 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 748 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
735 | struct hrtimer_clock_base *base, | 749 | struct hrtimer_clock_base *base, |
736 | int wakeup) | 750 | int wakeup) |
@@ -742,17 +756,33 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } | |||
742 | 756 | ||
743 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 757 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
744 | 758 | ||
745 | #ifdef CONFIG_TIMER_STATS | 759 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) |
746 | void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr) | ||
747 | { | 760 | { |
761 | #ifdef CONFIG_TIMER_STATS | ||
748 | if (timer->start_site) | 762 | if (timer->start_site) |
749 | return; | 763 | return; |
750 | 764 | timer->start_site = __builtin_return_address(0); | |
751 | timer->start_site = addr; | ||
752 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | 765 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); |
753 | timer->start_pid = current->pid; | 766 | timer->start_pid = current->pid; |
767 | #endif | ||
768 | } | ||
769 | |||
770 | static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer) | ||
771 | { | ||
772 | #ifdef CONFIG_TIMER_STATS | ||
773 | timer->start_site = NULL; | ||
774 | #endif | ||
754 | } | 775 | } |
776 | |||
777 | static inline void timer_stats_account_hrtimer(struct hrtimer *timer) | ||
778 | { | ||
779 | #ifdef CONFIG_TIMER_STATS | ||
780 | if (likely(!timer_stats_active)) | ||
781 | return; | ||
782 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | ||
783 | timer->function, timer->start_comm, 0); | ||
755 | #endif | 784 | #endif |
785 | } | ||
756 | 786 | ||
757 | /* | 787 | /* |
758 | * Counterpart to lock_hrtimer_base above: | 788 | * Counterpart to lock_hrtimer_base above: |
@@ -760,7 +790,7 @@ void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr) | |||
760 | static inline | 790 | static inline |
761 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | 791 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
762 | { | 792 | { |
763 | spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); | 793 | raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); |
764 | } | 794 | } |
765 | 795 | ||
766 | /** | 796 | /** |
@@ -873,19 +903,29 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
873 | struct hrtimer_clock_base *base, | 903 | struct hrtimer_clock_base *base, |
874 | unsigned long newstate, int reprogram) | 904 | unsigned long newstate, int reprogram) |
875 | { | 905 | { |
876 | if (timer->state & HRTIMER_STATE_ENQUEUED) { | 906 | if (!(timer->state & HRTIMER_STATE_ENQUEUED)) |
877 | /* | 907 | goto out; |
878 | * Remove the timer from the rbtree and replace the | 908 | |
879 | * first entry pointer if necessary. | 909 | /* |
880 | */ | 910 | * Remove the timer from the rbtree and replace the first |
881 | if (base->first == &timer->node) { | 911 | * entry pointer if necessary. |
882 | base->first = rb_next(&timer->node); | 912 | */ |
883 | /* Reprogram the clock event device. if enabled */ | 913 | if (base->first == &timer->node) { |
884 | if (reprogram && hrtimer_hres_active()) | 914 | base->first = rb_next(&timer->node); |
885 | hrtimer_force_reprogram(base->cpu_base); | 915 | #ifdef CONFIG_HIGH_RES_TIMERS |
916 | /* Reprogram the clock event device. if enabled */ | ||
917 | if (reprogram && hrtimer_hres_active()) { | ||
918 | ktime_t expires; | ||
919 | |||
920 | expires = ktime_sub(hrtimer_get_expires(timer), | ||
921 | base->offset); | ||
922 | if (base->cpu_base->expires_next.tv64 == expires.tv64) | ||
923 | hrtimer_force_reprogram(base->cpu_base, 1); | ||
886 | } | 924 | } |
887 | rb_erase(&timer->node, &base->active); | 925 | #endif |
888 | } | 926 | } |
927 | rb_erase(&timer->node, &base->active); | ||
928 | out: | ||
889 | timer->state = newstate; | 929 | timer->state = newstate; |
890 | } | 930 | } |
891 | 931 | ||
@@ -1083,7 +1123,7 @@ ktime_t hrtimer_get_next_event(void) | |||
1083 | unsigned long flags; | 1123 | unsigned long flags; |
1084 | int i; | 1124 | int i; |
1085 | 1125 | ||
1086 | spin_lock_irqsave(&cpu_base->lock, flags); | 1126 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
1087 | 1127 | ||
1088 | if (!hrtimer_hres_active()) { | 1128 | if (!hrtimer_hres_active()) { |
1089 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 1129 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { |
@@ -1100,7 +1140,7 @@ ktime_t hrtimer_get_next_event(void) | |||
1100 | } | 1140 | } |
1101 | } | 1141 | } |
1102 | 1142 | ||
1103 | spin_unlock_irqrestore(&cpu_base->lock, flags); | 1143 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
1104 | 1144 | ||
1105 | if (mindelta.tv64 < 0) | 1145 | if (mindelta.tv64 < 0) |
1106 | mindelta.tv64 = 0; | 1146 | mindelta.tv64 = 0; |
@@ -1182,11 +1222,11 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |||
1182 | * they get migrated to another cpu, therefore its safe to unlock | 1222 | * they get migrated to another cpu, therefore its safe to unlock |
1183 | * the timer base. | 1223 | * the timer base. |
1184 | */ | 1224 | */ |
1185 | spin_unlock(&cpu_base->lock); | 1225 | raw_spin_unlock(&cpu_base->lock); |
1186 | trace_hrtimer_expire_entry(timer, now); | 1226 | trace_hrtimer_expire_entry(timer, now); |
1187 | restart = fn(timer); | 1227 | restart = fn(timer); |
1188 | trace_hrtimer_expire_exit(timer); | 1228 | trace_hrtimer_expire_exit(timer); |
1189 | spin_lock(&cpu_base->lock); | 1229 | raw_spin_lock(&cpu_base->lock); |
1190 | 1230 | ||
1191 | /* | 1231 | /* |
1192 | * Note: We clear the CALLBACK bit after enqueue_hrtimer and | 1232 | * Note: We clear the CALLBACK bit after enqueue_hrtimer and |
@@ -1202,29 +1242,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |||
1202 | 1242 | ||
1203 | #ifdef CONFIG_HIGH_RES_TIMERS | 1243 | #ifdef CONFIG_HIGH_RES_TIMERS |
1204 | 1244 | ||
1205 | static int force_clock_reprogram; | ||
1206 | |||
1207 | /* | ||
1208 | * After 5 iteration's attempts, we consider that hrtimer_interrupt() | ||
1209 | * is hanging, which could happen with something that slows the interrupt | ||
1210 | * such as the tracing. Then we force the clock reprogramming for each future | ||
1211 | * hrtimer interrupts to avoid infinite loops and use the min_delta_ns | ||
1212 | * threshold that we will overwrite. | ||
1213 | * The next tick event will be scheduled to 3 times we currently spend on | ||
1214 | * hrtimer_interrupt(). This gives a good compromise, the cpus will spend | ||
1215 | * 1/4 of their time to process the hrtimer interrupts. This is enough to | ||
1216 | * let it running without serious starvation. | ||
1217 | */ | ||
1218 | |||
1219 | static inline void | ||
1220 | hrtimer_interrupt_hanging(struct clock_event_device *dev, | ||
1221 | ktime_t try_time) | ||
1222 | { | ||
1223 | force_clock_reprogram = 1; | ||
1224 | dev->min_delta_ns = (unsigned long)try_time.tv64 * 3; | ||
1225 | printk(KERN_WARNING "hrtimer: interrupt too slow, " | ||
1226 | "forcing clock min delta to %lu ns\n", dev->min_delta_ns); | ||
1227 | } | ||
1228 | /* | 1245 | /* |
1229 | * High resolution timer interrupt | 1246 | * High resolution timer interrupt |
1230 | * Called with interrupts disabled | 1247 | * Called with interrupts disabled |
@@ -1233,24 +1250,18 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1233 | { | 1250 | { |
1234 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1251 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1235 | struct hrtimer_clock_base *base; | 1252 | struct hrtimer_clock_base *base; |
1236 | ktime_t expires_next, now; | 1253 | ktime_t expires_next, now, entry_time, delta; |
1237 | int nr_retries = 0; | 1254 | int i, retries = 0; |
1238 | int i; | ||
1239 | 1255 | ||
1240 | BUG_ON(!cpu_base->hres_active); | 1256 | BUG_ON(!cpu_base->hres_active); |
1241 | cpu_base->nr_events++; | 1257 | cpu_base->nr_events++; |
1242 | dev->next_event.tv64 = KTIME_MAX; | 1258 | dev->next_event.tv64 = KTIME_MAX; |
1243 | 1259 | ||
1244 | retry: | 1260 | entry_time = now = ktime_get(); |
1245 | /* 5 retries is enough to notice a hang */ | 1261 | retry: |
1246 | if (!(++nr_retries % 5)) | ||
1247 | hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now)); | ||
1248 | |||
1249 | now = ktime_get(); | ||
1250 | |||
1251 | expires_next.tv64 = KTIME_MAX; | 1262 | expires_next.tv64 = KTIME_MAX; |
1252 | 1263 | ||
1253 | spin_lock(&cpu_base->lock); | 1264 | raw_spin_lock(&cpu_base->lock); |
1254 | /* | 1265 | /* |
1255 | * We set expires_next to KTIME_MAX here with cpu_base->lock | 1266 | * We set expires_next to KTIME_MAX here with cpu_base->lock |
1256 | * held to prevent that a timer is enqueued in our queue via | 1267 | * held to prevent that a timer is enqueued in our queue via |
@@ -1306,13 +1317,51 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1306 | * against it. | 1317 | * against it. |
1307 | */ | 1318 | */ |
1308 | cpu_base->expires_next = expires_next; | 1319 | cpu_base->expires_next = expires_next; |
1309 | spin_unlock(&cpu_base->lock); | 1320 | raw_spin_unlock(&cpu_base->lock); |
1310 | 1321 | ||
1311 | /* Reprogramming necessary ? */ | 1322 | /* Reprogramming necessary ? */ |
1312 | if (expires_next.tv64 != KTIME_MAX) { | 1323 | if (expires_next.tv64 == KTIME_MAX || |
1313 | if (tick_program_event(expires_next, force_clock_reprogram)) | 1324 | !tick_program_event(expires_next, 0)) { |
1314 | goto retry; | 1325 | cpu_base->hang_detected = 0; |
1326 | return; | ||
1315 | } | 1327 | } |
1328 | |||
1329 | /* | ||
1330 | * The next timer was already expired due to: | ||
1331 | * - tracing | ||
1332 | * - long lasting callbacks | ||
1333 | * - being scheduled away when running in a VM | ||
1334 | * | ||
1335 | * We need to prevent that we loop forever in the hrtimer | ||
1336 | * interrupt routine. We give it 3 attempts to avoid | ||
1337 | * overreacting on some spurious event. | ||
1338 | */ | ||
1339 | now = ktime_get(); | ||
1340 | cpu_base->nr_retries++; | ||
1341 | if (++retries < 3) | ||
1342 | goto retry; | ||
1343 | /* | ||
1344 | * Give the system a chance to do something else than looping | ||
1345 | * here. We stored the entry time, so we know exactly how long | ||
1346 | * we spent here. We schedule the next event this amount of | ||
1347 | * time away. | ||
1348 | */ | ||
1349 | cpu_base->nr_hangs++; | ||
1350 | cpu_base->hang_detected = 1; | ||
1351 | delta = ktime_sub(now, entry_time); | ||
1352 | if (delta.tv64 > cpu_base->max_hang_time.tv64) | ||
1353 | cpu_base->max_hang_time = delta; | ||
1354 | /* | ||
1355 | * Limit it to a sensible value as we enforce a longer | ||
1356 | * delay. Give the CPU at least 100ms to catch up. | ||
1357 | */ | ||
1358 | if (delta.tv64 > 100 * NSEC_PER_MSEC) | ||
1359 | expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); | ||
1360 | else | ||
1361 | expires_next = ktime_add(now, delta); | ||
1362 | tick_program_event(expires_next, 1); | ||
1363 | printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", | ||
1364 | ktime_to_ns(delta)); | ||
1316 | } | 1365 | } |
1317 | 1366 | ||
1318 | /* | 1367 | /* |
@@ -1408,7 +1457,7 @@ void hrtimer_run_queues(void) | |||
1408 | gettime = 0; | 1457 | gettime = 0; |
1409 | } | 1458 | } |
1410 | 1459 | ||
1411 | spin_lock(&cpu_base->lock); | 1460 | raw_spin_lock(&cpu_base->lock); |
1412 | 1461 | ||
1413 | while ((node = base->first)) { | 1462 | while ((node = base->first)) { |
1414 | struct hrtimer *timer; | 1463 | struct hrtimer *timer; |
@@ -1420,7 +1469,7 @@ void hrtimer_run_queues(void) | |||
1420 | 1469 | ||
1421 | __run_hrtimer(timer, &base->softirq_time); | 1470 | __run_hrtimer(timer, &base->softirq_time); |
1422 | } | 1471 | } |
1423 | spin_unlock(&cpu_base->lock); | 1472 | raw_spin_unlock(&cpu_base->lock); |
1424 | } | 1473 | } |
1425 | } | 1474 | } |
1426 | 1475 | ||
@@ -1576,7 +1625,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
1576 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 1625 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
1577 | int i; | 1626 | int i; |
1578 | 1627 | ||
1579 | spin_lock_init(&cpu_base->lock); | 1628 | raw_spin_lock_init(&cpu_base->lock); |
1580 | 1629 | ||
1581 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 1630 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
1582 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1631 | cpu_base->clock_base[i].cpu_base = cpu_base; |
@@ -1634,16 +1683,16 @@ static void migrate_hrtimers(int scpu) | |||
1634 | * The caller is globally serialized and nobody else | 1683 | * The caller is globally serialized and nobody else |
1635 | * takes two locks at once, deadlock is not possible. | 1684 | * takes two locks at once, deadlock is not possible. |
1636 | */ | 1685 | */ |
1637 | spin_lock(&new_base->lock); | 1686 | raw_spin_lock(&new_base->lock); |
1638 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 1687 | raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1639 | 1688 | ||
1640 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1689 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1641 | migrate_hrtimer_list(&old_base->clock_base[i], | 1690 | migrate_hrtimer_list(&old_base->clock_base[i], |
1642 | &new_base->clock_base[i]); | 1691 | &new_base->clock_base[i]); |
1643 | } | 1692 | } |
1644 | 1693 | ||
1645 | spin_unlock(&old_base->lock); | 1694 | raw_spin_unlock(&old_base->lock); |
1646 | spin_unlock(&new_base->lock); | 1695 | raw_spin_unlock(&new_base->lock); |
1647 | 1696 | ||
1648 | /* Check, if we got expired work to do */ | 1697 | /* Check, if we got expired work to do */ |
1649 | __hrtimer_peek_ahead_timers(); | 1698 | __hrtimer_peek_ahead_timers(); |