diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /kernel/hrtimer.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 178 |
1 files changed, 106 insertions, 72 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 7b19403900ad..02e5097bf319 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -129,11 +129,11 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |||
129 | for (;;) { | 129 | for (;;) { |
130 | base = timer->base; | 130 | base = timer->base; |
131 | if (likely(base != NULL)) { | 131 | if (likely(base != NULL)) { |
132 | spin_lock_irqsave(&base->cpu_base->lock, *flags); | 132 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); |
133 | if (likely(base == timer->base)) | 133 | if (likely(base == timer->base)) |
134 | return base; | 134 | return base; |
135 | /* The timer has migrated to another CPU: */ | 135 | /* The timer has migrated to another CPU: */ |
136 | spin_unlock_irqrestore(&base->cpu_base->lock, *flags); | 136 | raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); |
137 | } | 137 | } |
138 | cpu_relax(); | 138 | cpu_relax(); |
139 | } | 139 | } |
@@ -210,13 +210,13 @@ again: | |||
210 | 210 | ||
211 | /* See the comment in lock_timer_base() */ | 211 | /* See the comment in lock_timer_base() */ |
212 | timer->base = NULL; | 212 | timer->base = NULL; |
213 | spin_unlock(&base->cpu_base->lock); | 213 | raw_spin_unlock(&base->cpu_base->lock); |
214 | spin_lock(&new_base->cpu_base->lock); | 214 | raw_spin_lock(&new_base->cpu_base->lock); |
215 | 215 | ||
216 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { | 216 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { |
217 | cpu = this_cpu; | 217 | cpu = this_cpu; |
218 | spin_unlock(&new_base->cpu_base->lock); | 218 | raw_spin_unlock(&new_base->cpu_base->lock); |
219 | spin_lock(&base->cpu_base->lock); | 219 | raw_spin_lock(&base->cpu_base->lock); |
220 | timer->base = base; | 220 | timer->base = base; |
221 | goto again; | 221 | goto again; |
222 | } | 222 | } |
@@ -232,7 +232,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |||
232 | { | 232 | { |
233 | struct hrtimer_clock_base *base = timer->base; | 233 | struct hrtimer_clock_base *base = timer->base; |
234 | 234 | ||
235 | spin_lock_irqsave(&base->cpu_base->lock, *flags); | 235 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); |
236 | 236 | ||
237 | return base; | 237 | return base; |
238 | } | 238 | } |
@@ -559,7 +559,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |||
559 | static int hrtimer_reprogram(struct hrtimer *timer, | 559 | static int hrtimer_reprogram(struct hrtimer *timer, |
560 | struct hrtimer_clock_base *base) | 560 | struct hrtimer_clock_base *base) |
561 | { | 561 | { |
562 | ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; | 562 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
563 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 563 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
564 | int res; | 564 | int res; |
565 | 565 | ||
@@ -584,7 +584,16 @@ static int hrtimer_reprogram(struct hrtimer *timer, | |||
584 | if (expires.tv64 < 0) | 584 | if (expires.tv64 < 0) |
585 | return -ETIME; | 585 | return -ETIME; |
586 | 586 | ||
587 | if (expires.tv64 >= expires_next->tv64) | 587 | if (expires.tv64 >= cpu_base->expires_next.tv64) |
588 | return 0; | ||
589 | |||
590 | /* | ||
591 | * If a hang was detected in the last timer interrupt then we | ||
592 | * do not schedule a timer which is earlier than the expiry | ||
593 | * which we enforced in the hang detection. We want the system | ||
594 | * to make progress. | ||
595 | */ | ||
596 | if (cpu_base->hang_detected) | ||
588 | return 0; | 597 | return 0; |
589 | 598 | ||
590 | /* | 599 | /* |
@@ -592,7 +601,7 @@ static int hrtimer_reprogram(struct hrtimer *timer, | |||
592 | */ | 601 | */ |
593 | res = tick_program_event(expires, 0); | 602 | res = tick_program_event(expires, 0); |
594 | if (!IS_ERR_VALUE(res)) | 603 | if (!IS_ERR_VALUE(res)) |
595 | *expires_next = expires; | 604 | cpu_base->expires_next = expires; |
596 | return res; | 605 | return res; |
597 | } | 606 | } |
598 | 607 | ||
@@ -621,12 +630,12 @@ static void retrigger_next_event(void *arg) | |||
621 | base = &__get_cpu_var(hrtimer_bases); | 630 | base = &__get_cpu_var(hrtimer_bases); |
622 | 631 | ||
623 | /* Adjust CLOCK_REALTIME offset */ | 632 | /* Adjust CLOCK_REALTIME offset */ |
624 | spin_lock(&base->lock); | 633 | raw_spin_lock(&base->lock); |
625 | base->clock_base[CLOCK_REALTIME].offset = | 634 | base->clock_base[CLOCK_REALTIME].offset = |
626 | timespec_to_ktime(realtime_offset); | 635 | timespec_to_ktime(realtime_offset); |
627 | 636 | ||
628 | hrtimer_force_reprogram(base, 0); | 637 | hrtimer_force_reprogram(base, 0); |
629 | spin_unlock(&base->lock); | 638 | raw_spin_unlock(&base->lock); |
630 | } | 639 | } |
631 | 640 | ||
632 | /* | 641 | /* |
@@ -687,9 +696,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
687 | { | 696 | { |
688 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | 697 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { |
689 | if (wakeup) { | 698 | if (wakeup) { |
690 | spin_unlock(&base->cpu_base->lock); | 699 | raw_spin_unlock(&base->cpu_base->lock); |
691 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 700 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
692 | spin_lock(&base->cpu_base->lock); | 701 | raw_spin_lock(&base->cpu_base->lock); |
693 | } else | 702 | } else |
694 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 703 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
695 | 704 | ||
@@ -749,17 +758,33 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } | |||
749 | 758 | ||
750 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 759 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
751 | 760 | ||
752 | #ifdef CONFIG_TIMER_STATS | 761 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) |
753 | void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr) | ||
754 | { | 762 | { |
763 | #ifdef CONFIG_TIMER_STATS | ||
755 | if (timer->start_site) | 764 | if (timer->start_site) |
756 | return; | 765 | return; |
757 | 766 | timer->start_site = __builtin_return_address(0); | |
758 | timer->start_site = addr; | ||
759 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | 767 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); |
760 | timer->start_pid = current->pid; | 768 | timer->start_pid = current->pid; |
769 | #endif | ||
761 | } | 770 | } |
771 | |||
772 | static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer) | ||
773 | { | ||
774 | #ifdef CONFIG_TIMER_STATS | ||
775 | timer->start_site = NULL; | ||
776 | #endif | ||
777 | } | ||
778 | |||
779 | static inline void timer_stats_account_hrtimer(struct hrtimer *timer) | ||
780 | { | ||
781 | #ifdef CONFIG_TIMER_STATS | ||
782 | if (likely(!timer_stats_active)) | ||
783 | return; | ||
784 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | ||
785 | timer->function, timer->start_comm, 0); | ||
762 | #endif | 786 | #endif |
787 | } | ||
763 | 788 | ||
764 | /* | 789 | /* |
765 | * Counterpart to lock_hrtimer_base above: | 790 | * Counterpart to lock_hrtimer_base above: |
@@ -767,7 +792,7 @@ void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr) | |||
767 | static inline | 792 | static inline |
768 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | 793 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
769 | { | 794 | { |
770 | spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); | 795 | raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); |
771 | } | 796 | } |
772 | 797 | ||
773 | /** | 798 | /** |
@@ -1027,9 +1052,9 @@ void hrtimer_pull(void) | |||
1027 | struct hrtimer_start_on_info *info; | 1052 | struct hrtimer_start_on_info *info; |
1028 | struct list_head *pos, *safe, list; | 1053 | struct list_head *pos, *safe, list; |
1029 | 1054 | ||
1030 | spin_lock(&base->lock); | 1055 | raw_spin_lock(&base->lock); |
1031 | list_replace_init(&base->to_pull, &list); | 1056 | list_replace_init(&base->to_pull, &list); |
1032 | spin_unlock(&base->lock); | 1057 | raw_spin_unlock(&base->lock); |
1033 | 1058 | ||
1034 | list_for_each_safe(pos, safe, &list) { | 1059 | list_for_each_safe(pos, safe, &list) { |
1035 | info = list_entry(pos, struct hrtimer_start_on_info, list); | 1060 | info = list_entry(pos, struct hrtimer_start_on_info, list); |
@@ -1083,10 +1108,10 @@ int hrtimer_start_on(int cpu, struct hrtimer_start_on_info* info, | |||
1083 | } else { | 1108 | } else { |
1084 | TRACE("hrtimer_start_on: pulling to remote CPU\n"); | 1109 | TRACE("hrtimer_start_on: pulling to remote CPU\n"); |
1085 | base = &per_cpu(hrtimer_bases, cpu); | 1110 | base = &per_cpu(hrtimer_bases, cpu); |
1086 | spin_lock_irqsave(&base->lock, flags); | 1111 | raw_spin_lock_irqsave(&base->lock, flags); |
1087 | was_empty = list_empty(&base->to_pull); | 1112 | was_empty = list_empty(&base->to_pull); |
1088 | list_add(&info->list, &base->to_pull); | 1113 | list_add(&info->list, &base->to_pull); |
1089 | spin_unlock_irqrestore(&base->lock, flags); | 1114 | raw_spin_unlock_irqrestore(&base->lock, flags); |
1090 | if (was_empty) | 1115 | if (was_empty) |
1091 | /* only send IPI if other no else | 1116 | /* only send IPI if other no else |
1092 | * has done so already | 1117 | * has done so already |
@@ -1179,7 +1204,7 @@ ktime_t hrtimer_get_next_event(void) | |||
1179 | unsigned long flags; | 1204 | unsigned long flags; |
1180 | int i; | 1205 | int i; |
1181 | 1206 | ||
1182 | spin_lock_irqsave(&cpu_base->lock, flags); | 1207 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
1183 | 1208 | ||
1184 | if (!hrtimer_hres_active()) { | 1209 | if (!hrtimer_hres_active()) { |
1185 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 1210 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { |
@@ -1196,7 +1221,7 @@ ktime_t hrtimer_get_next_event(void) | |||
1196 | } | 1221 | } |
1197 | } | 1222 | } |
1198 | 1223 | ||
1199 | spin_unlock_irqrestore(&cpu_base->lock, flags); | 1224 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
1200 | 1225 | ||
1201 | if (mindelta.tv64 < 0) | 1226 | if (mindelta.tv64 < 0) |
1202 | mindelta.tv64 = 0; | 1227 | mindelta.tv64 = 0; |
@@ -1278,11 +1303,11 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |||
1278 | * they get migrated to another cpu, therefore its safe to unlock | 1303 | * they get migrated to another cpu, therefore its safe to unlock |
1279 | * the timer base. | 1304 | * the timer base. |
1280 | */ | 1305 | */ |
1281 | spin_unlock(&cpu_base->lock); | 1306 | raw_spin_unlock(&cpu_base->lock); |
1282 | trace_hrtimer_expire_entry(timer, now); | 1307 | trace_hrtimer_expire_entry(timer, now); |
1283 | restart = fn(timer); | 1308 | restart = fn(timer); |
1284 | trace_hrtimer_expire_exit(timer); | 1309 | trace_hrtimer_expire_exit(timer); |
1285 | spin_lock(&cpu_base->lock); | 1310 | raw_spin_lock(&cpu_base->lock); |
1286 | 1311 | ||
1287 | /* | 1312 | /* |
1288 | * Note: We clear the CALLBACK bit after enqueue_hrtimer and | 1313 | * Note: We clear the CALLBACK bit after enqueue_hrtimer and |
@@ -1298,29 +1323,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |||
1298 | 1323 | ||
1299 | #ifdef CONFIG_HIGH_RES_TIMERS | 1324 | #ifdef CONFIG_HIGH_RES_TIMERS |
1300 | 1325 | ||
1301 | static int force_clock_reprogram; | ||
1302 | |||
1303 | /* | ||
1304 | * After 5 iteration's attempts, we consider that hrtimer_interrupt() | ||
1305 | * is hanging, which could happen with something that slows the interrupt | ||
1306 | * such as the tracing. Then we force the clock reprogramming for each future | ||
1307 | * hrtimer interrupts to avoid infinite loops and use the min_delta_ns | ||
1308 | * threshold that we will overwrite. | ||
1309 | * The next tick event will be scheduled to 3 times we currently spend on | ||
1310 | * hrtimer_interrupt(). This gives a good compromise, the cpus will spend | ||
1311 | * 1/4 of their time to process the hrtimer interrupts. This is enough to | ||
1312 | * let it running without serious starvation. | ||
1313 | */ | ||
1314 | |||
1315 | static inline void | ||
1316 | hrtimer_interrupt_hanging(struct clock_event_device *dev, | ||
1317 | ktime_t try_time) | ||
1318 | { | ||
1319 | force_clock_reprogram = 1; | ||
1320 | dev->min_delta_ns = (unsigned long)try_time.tv64 * 3; | ||
1321 | printk(KERN_WARNING "hrtimer: interrupt too slow, " | ||
1322 | "forcing clock min delta to %lu ns\n", dev->min_delta_ns); | ||
1323 | } | ||
1324 | /* | 1326 | /* |
1325 | * High resolution timer interrupt | 1327 | * High resolution timer interrupt |
1326 | * Called with interrupts disabled | 1328 | * Called with interrupts disabled |
@@ -1329,24 +1331,18 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1329 | { | 1331 | { |
1330 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1332 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1331 | struct hrtimer_clock_base *base; | 1333 | struct hrtimer_clock_base *base; |
1332 | ktime_t expires_next, now; | 1334 | ktime_t expires_next, now, entry_time, delta; |
1333 | int nr_retries = 0; | 1335 | int i, retries = 0; |
1334 | int i; | ||
1335 | 1336 | ||
1336 | BUG_ON(!cpu_base->hres_active); | 1337 | BUG_ON(!cpu_base->hres_active); |
1337 | cpu_base->nr_events++; | 1338 | cpu_base->nr_events++; |
1338 | dev->next_event.tv64 = KTIME_MAX; | 1339 | dev->next_event.tv64 = KTIME_MAX; |
1339 | 1340 | ||
1340 | retry: | 1341 | entry_time = now = ktime_get(); |
1341 | /* 5 retries is enough to notice a hang */ | 1342 | retry: |
1342 | if (!(++nr_retries % 5)) | ||
1343 | hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now)); | ||
1344 | |||
1345 | now = ktime_get(); | ||
1346 | |||
1347 | expires_next.tv64 = KTIME_MAX; | 1343 | expires_next.tv64 = KTIME_MAX; |
1348 | 1344 | ||
1349 | spin_lock(&cpu_base->lock); | 1345 | raw_spin_lock(&cpu_base->lock); |
1350 | /* | 1346 | /* |
1351 | * We set expires_next to KTIME_MAX here with cpu_base->lock | 1347 | * We set expires_next to KTIME_MAX here with cpu_base->lock |
1352 | * held to prevent that a timer is enqueued in our queue via | 1348 | * held to prevent that a timer is enqueued in our queue via |
@@ -1402,13 +1398,51 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1402 | * against it. | 1398 | * against it. |
1403 | */ | 1399 | */ |
1404 | cpu_base->expires_next = expires_next; | 1400 | cpu_base->expires_next = expires_next; |
1405 | spin_unlock(&cpu_base->lock); | 1401 | raw_spin_unlock(&cpu_base->lock); |
1406 | 1402 | ||
1407 | /* Reprogramming necessary ? */ | 1403 | /* Reprogramming necessary ? */ |
1408 | if (expires_next.tv64 != KTIME_MAX) { | 1404 | if (expires_next.tv64 == KTIME_MAX || |
1409 | if (tick_program_event(expires_next, force_clock_reprogram)) | 1405 | !tick_program_event(expires_next, 0)) { |
1410 | goto retry; | 1406 | cpu_base->hang_detected = 0; |
1407 | return; | ||
1411 | } | 1408 | } |
1409 | |||
1410 | /* | ||
1411 | * The next timer was already expired due to: | ||
1412 | * - tracing | ||
1413 | * - long lasting callbacks | ||
1414 | * - being scheduled away when running in a VM | ||
1415 | * | ||
1416 | * We need to prevent that we loop forever in the hrtimer | ||
1417 | * interrupt routine. We give it 3 attempts to avoid | ||
1418 | * overreacting on some spurious event. | ||
1419 | */ | ||
1420 | now = ktime_get(); | ||
1421 | cpu_base->nr_retries++; | ||
1422 | if (++retries < 3) | ||
1423 | goto retry; | ||
1424 | /* | ||
1425 | * Give the system a chance to do something else than looping | ||
1426 | * here. We stored the entry time, so we know exactly how long | ||
1427 | * we spent here. We schedule the next event this amount of | ||
1428 | * time away. | ||
1429 | */ | ||
1430 | cpu_base->nr_hangs++; | ||
1431 | cpu_base->hang_detected = 1; | ||
1432 | delta = ktime_sub(now, entry_time); | ||
1433 | if (delta.tv64 > cpu_base->max_hang_time.tv64) | ||
1434 | cpu_base->max_hang_time = delta; | ||
1435 | /* | ||
1436 | * Limit it to a sensible value as we enforce a longer | ||
1437 | * delay. Give the CPU at least 100ms to catch up. | ||
1438 | */ | ||
1439 | if (delta.tv64 > 100 * NSEC_PER_MSEC) | ||
1440 | expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); | ||
1441 | else | ||
1442 | expires_next = ktime_add(now, delta); | ||
1443 | tick_program_event(expires_next, 1); | ||
1444 | printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", | ||
1445 | ktime_to_ns(delta)); | ||
1412 | } | 1446 | } |
1413 | 1447 | ||
1414 | /* | 1448 | /* |
@@ -1504,7 +1538,7 @@ void hrtimer_run_queues(void) | |||
1504 | gettime = 0; | 1538 | gettime = 0; |
1505 | } | 1539 | } |
1506 | 1540 | ||
1507 | spin_lock(&cpu_base->lock); | 1541 | raw_spin_lock(&cpu_base->lock); |
1508 | 1542 | ||
1509 | while ((node = base->first)) { | 1543 | while ((node = base->first)) { |
1510 | struct hrtimer *timer; | 1544 | struct hrtimer *timer; |
@@ -1516,7 +1550,7 @@ void hrtimer_run_queues(void) | |||
1516 | 1550 | ||
1517 | __run_hrtimer(timer, &base->softirq_time); | 1551 | __run_hrtimer(timer, &base->softirq_time); |
1518 | } | 1552 | } |
1519 | spin_unlock(&cpu_base->lock); | 1553 | raw_spin_unlock(&cpu_base->lock); |
1520 | } | 1554 | } |
1521 | } | 1555 | } |
1522 | 1556 | ||
@@ -1672,7 +1706,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
1672 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 1706 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
1673 | int i; | 1707 | int i; |
1674 | 1708 | ||
1675 | spin_lock_init(&cpu_base->lock); | 1709 | raw_spin_lock_init(&cpu_base->lock); |
1676 | 1710 | ||
1677 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 1711 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
1678 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1712 | cpu_base->clock_base[i].cpu_base = cpu_base; |
@@ -1731,16 +1765,16 @@ static void migrate_hrtimers(int scpu) | |||
1731 | * The caller is globally serialized and nobody else | 1765 | * The caller is globally serialized and nobody else |
1732 | * takes two locks at once, deadlock is not possible. | 1766 | * takes two locks at once, deadlock is not possible. |
1733 | */ | 1767 | */ |
1734 | spin_lock(&new_base->lock); | 1768 | raw_spin_lock(&new_base->lock); |
1735 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 1769 | raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1736 | 1770 | ||
1737 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1771 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1738 | migrate_hrtimer_list(&old_base->clock_base[i], | 1772 | migrate_hrtimer_list(&old_base->clock_base[i], |
1739 | &new_base->clock_base[i]); | 1773 | &new_base->clock_base[i]); |
1740 | } | 1774 | } |
1741 | 1775 | ||
1742 | spin_unlock(&old_base->lock); | 1776 | raw_spin_unlock(&old_base->lock); |
1743 | spin_unlock(&new_base->lock); | 1777 | raw_spin_unlock(&new_base->lock); |
1744 | 1778 | ||
1745 | /* Check, if we got expired work to do */ | 1779 | /* Check, if we got expired work to do */ |
1746 | __hrtimer_peek_ahead_timers(); | 1780 | __hrtimer_peek_ahead_timers(); |