diff options
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 280 |
1 files changed, 31 insertions, 249 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 47e63349d1b2..efd6f41e1c16 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -442,22 +442,6 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { } | |||
442 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } | 442 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } |
443 | #endif | 443 | #endif |
444 | 444 | ||
445 | /* | ||
446 | * Check, whether the timer is on the callback pending list | ||
447 | */ | ||
448 | static inline int hrtimer_cb_pending(const struct hrtimer *timer) | ||
449 | { | ||
450 | return timer->state & HRTIMER_STATE_PENDING; | ||
451 | } | ||
452 | |||
453 | /* | ||
454 | * Remove a timer from the callback pending list | ||
455 | */ | ||
456 | static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) | ||
457 | { | ||
458 | list_del_init(&timer->cb_entry); | ||
459 | } | ||
460 | |||
461 | /* High resolution timer related functions */ | 445 | /* High resolution timer related functions */ |
462 | #ifdef CONFIG_HIGH_RES_TIMERS | 446 | #ifdef CONFIG_HIGH_RES_TIMERS |
463 | 447 | ||
@@ -651,6 +635,8 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) | |||
651 | { | 635 | { |
652 | } | 636 | } |
653 | 637 | ||
638 | static void __run_hrtimer(struct hrtimer *timer); | ||
639 | |||
654 | /* | 640 | /* |
655 | * When High resolution timers are active, try to reprogram. Note, that in case | 641 | * When High resolution timers are active, try to reprogram. Note, that in case |
656 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry | 642 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry |
@@ -661,31 +647,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
661 | struct hrtimer_clock_base *base) | 647 | struct hrtimer_clock_base *base) |
662 | { | 648 | { |
663 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | 649 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { |
664 | 650 | /* | |
665 | /* Timer is expired, act upon the callback mode */ | 651 | * XXX: recursion check? |
666 | switch(timer->cb_mode) { | 652 | * hrtimer_forward() should round up with timer granularity |
667 | case HRTIMER_CB_IRQSAFE_PERCPU: | 653 | * so that we never get into inf recursion here, |
668 | case HRTIMER_CB_IRQSAFE_UNLOCKED: | 654 | * it doesn't do that though |
669 | /* | 655 | */ |
670 | * This is solely for the sched tick emulation with | 656 | __run_hrtimer(timer); |
671 | * dynamic tick support to ensure that we do not | 657 | return 1; |
672 | * restart the tick right on the edge and end up with | ||
673 | * the tick timer in the softirq ! The calling site | ||
674 | * takes care of this. Also used for hrtimer sleeper ! | ||
675 | */ | ||
676 | debug_hrtimer_deactivate(timer); | ||
677 | return 1; | ||
678 | case HRTIMER_CB_SOFTIRQ: | ||
679 | /* | ||
680 | * Move everything else into the softirq pending list ! | ||
681 | */ | ||
682 | list_add_tail(&timer->cb_entry, | ||
683 | &base->cpu_base->cb_pending); | ||
684 | timer->state = HRTIMER_STATE_PENDING; | ||
685 | return 1; | ||
686 | default: | ||
687 | BUG(); | ||
688 | } | ||
689 | } | 658 | } |
690 | return 0; | 659 | return 0; |
691 | } | 660 | } |
@@ -724,11 +693,6 @@ static int hrtimer_switch_to_hres(void) | |||
724 | return 1; | 693 | return 1; |
725 | } | 694 | } |
726 | 695 | ||
727 | static inline void hrtimer_raise_softirq(void) | ||
728 | { | ||
729 | raise_softirq(HRTIMER_SOFTIRQ); | ||
730 | } | ||
731 | |||
732 | #else | 696 | #else |
733 | 697 | ||
734 | static inline int hrtimer_hres_active(void) { return 0; } | 698 | static inline int hrtimer_hres_active(void) { return 0; } |
@@ -747,7 +711,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer, | |||
747 | { | 711 | { |
748 | return 0; | 712 | return 0; |
749 | } | 713 | } |
750 | static inline void hrtimer_raise_softirq(void) { } | ||
751 | 714 | ||
752 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 715 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
753 | 716 | ||
@@ -890,10 +853,7 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
890 | struct hrtimer_clock_base *base, | 853 | struct hrtimer_clock_base *base, |
891 | unsigned long newstate, int reprogram) | 854 | unsigned long newstate, int reprogram) |
892 | { | 855 | { |
893 | /* High res. callback list. NOP for !HIGHRES */ | 856 | if (timer->state & HRTIMER_STATE_ENQUEUED) { |
894 | if (hrtimer_cb_pending(timer)) | ||
895 | hrtimer_remove_cb_pending(timer); | ||
896 | else { | ||
897 | /* | 857 | /* |
898 | * Remove the timer from the rbtree and replace the | 858 | * Remove the timer from the rbtree and replace the |
899 | * first entry pointer if necessary. | 859 | * first entry pointer if necessary. |
@@ -953,7 +913,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n | |||
953 | { | 913 | { |
954 | struct hrtimer_clock_base *base, *new_base; | 914 | struct hrtimer_clock_base *base, *new_base; |
955 | unsigned long flags; | 915 | unsigned long flags; |
956 | int ret, raise; | 916 | int ret; |
957 | 917 | ||
958 | base = lock_hrtimer_base(timer, &flags); | 918 | base = lock_hrtimer_base(timer, &flags); |
959 | 919 | ||
@@ -988,26 +948,8 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n | |||
988 | enqueue_hrtimer(timer, new_base, | 948 | enqueue_hrtimer(timer, new_base, |
989 | new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); | 949 | new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); |
990 | 950 | ||
991 | /* | ||
992 | * The timer may be expired and moved to the cb_pending | ||
993 | * list. We can not raise the softirq with base lock held due | ||
994 | * to a possible deadlock with runqueue lock. | ||
995 | */ | ||
996 | raise = timer->state == HRTIMER_STATE_PENDING; | ||
997 | |||
998 | /* | ||
999 | * We use preempt_disable to prevent this task from migrating after | ||
1000 | * setting up the softirq and raising it. Otherwise, if me migrate | ||
1001 | * we will raise the softirq on the wrong CPU. | ||
1002 | */ | ||
1003 | preempt_disable(); | ||
1004 | |||
1005 | unlock_hrtimer_base(timer, &flags); | 951 | unlock_hrtimer_base(timer, &flags); |
1006 | 952 | ||
1007 | if (raise) | ||
1008 | hrtimer_raise_softirq(); | ||
1009 | preempt_enable(); | ||
1010 | |||
1011 | return ret; | 953 | return ret; |
1012 | } | 954 | } |
1013 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); | 955 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); |
@@ -1192,75 +1134,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | |||
1192 | } | 1134 | } |
1193 | EXPORT_SYMBOL_GPL(hrtimer_get_res); | 1135 | EXPORT_SYMBOL_GPL(hrtimer_get_res); |
1194 | 1136 | ||
1195 | static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base) | ||
1196 | { | ||
1197 | spin_lock_irq(&cpu_base->lock); | ||
1198 | |||
1199 | while (!list_empty(&cpu_base->cb_pending)) { | ||
1200 | enum hrtimer_restart (*fn)(struct hrtimer *); | ||
1201 | struct hrtimer *timer; | ||
1202 | int restart; | ||
1203 | int emulate_hardirq_ctx = 0; | ||
1204 | |||
1205 | timer = list_entry(cpu_base->cb_pending.next, | ||
1206 | struct hrtimer, cb_entry); | ||
1207 | |||
1208 | debug_hrtimer_deactivate(timer); | ||
1209 | timer_stats_account_hrtimer(timer); | ||
1210 | |||
1211 | fn = timer->function; | ||
1212 | /* | ||
1213 | * A timer might have been added to the cb_pending list | ||
1214 | * when it was migrated during a cpu-offline operation. | ||
1215 | * Emulate hardirq context for such timers. | ||
1216 | */ | ||
1217 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || | ||
1218 | timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) | ||
1219 | emulate_hardirq_ctx = 1; | ||
1220 | |||
1221 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); | ||
1222 | spin_unlock_irq(&cpu_base->lock); | ||
1223 | |||
1224 | if (unlikely(emulate_hardirq_ctx)) { | ||
1225 | local_irq_disable(); | ||
1226 | restart = fn(timer); | ||
1227 | local_irq_enable(); | ||
1228 | } else | ||
1229 | restart = fn(timer); | ||
1230 | |||
1231 | spin_lock_irq(&cpu_base->lock); | ||
1232 | |||
1233 | timer->state &= ~HRTIMER_STATE_CALLBACK; | ||
1234 | if (restart == HRTIMER_RESTART) { | ||
1235 | BUG_ON(hrtimer_active(timer)); | ||
1236 | /* | ||
1237 | * Enqueue the timer, allow reprogramming of the event | ||
1238 | * device | ||
1239 | */ | ||
1240 | enqueue_hrtimer(timer, timer->base, 1); | ||
1241 | } else if (hrtimer_active(timer)) { | ||
1242 | /* | ||
1243 | * If the timer was rearmed on another CPU, reprogram | ||
1244 | * the event device. | ||
1245 | */ | ||
1246 | struct hrtimer_clock_base *base = timer->base; | ||
1247 | |||
1248 | if (base->first == &timer->node && | ||
1249 | hrtimer_reprogram(timer, base)) { | ||
1250 | /* | ||
1251 | * Timer is expired. Thus move it from tree to | ||
1252 | * pending list again. | ||
1253 | */ | ||
1254 | __remove_hrtimer(timer, base, | ||
1255 | HRTIMER_STATE_PENDING, 0); | ||
1256 | list_add_tail(&timer->cb_entry, | ||
1257 | &base->cpu_base->cb_pending); | ||
1258 | } | ||
1259 | } | ||
1260 | } | ||
1261 | spin_unlock_irq(&cpu_base->lock); | ||
1262 | } | ||
1263 | |||
1264 | static void __run_hrtimer(struct hrtimer *timer) | 1137 | static void __run_hrtimer(struct hrtimer *timer) |
1265 | { | 1138 | { |
1266 | struct hrtimer_clock_base *base = timer->base; | 1139 | struct hrtimer_clock_base *base = timer->base; |
@@ -1268,25 +1141,21 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
1268 | enum hrtimer_restart (*fn)(struct hrtimer *); | 1141 | enum hrtimer_restart (*fn)(struct hrtimer *); |
1269 | int restart; | 1142 | int restart; |
1270 | 1143 | ||
1144 | WARN_ON(!irqs_disabled()); | ||
1145 | |||
1271 | debug_hrtimer_deactivate(timer); | 1146 | debug_hrtimer_deactivate(timer); |
1272 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); | 1147 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); |
1273 | timer_stats_account_hrtimer(timer); | 1148 | timer_stats_account_hrtimer(timer); |
1274 | |||
1275 | fn = timer->function; | 1149 | fn = timer->function; |
1276 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || | 1150 | |
1277 | timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) { | 1151 | /* |
1278 | /* | 1152 | * Because we run timers from hardirq context, there is no chance |
1279 | * Used for scheduler timers, avoid lock inversion with | 1153 | * they get migrated to another cpu, therefore its safe to unlock |
1280 | * rq->lock and tasklist_lock. | 1154 | * the timer base. |
1281 | * | 1155 | */ |
1282 | * These timers are required to deal with enqueue expiry | 1156 | spin_unlock(&cpu_base->lock); |
1283 | * themselves and are not allowed to migrate. | 1157 | restart = fn(timer); |
1284 | */ | 1158 | spin_lock(&cpu_base->lock); |
1285 | spin_unlock(&cpu_base->lock); | ||
1286 | restart = fn(timer); | ||
1287 | spin_lock(&cpu_base->lock); | ||
1288 | } else | ||
1289 | restart = fn(timer); | ||
1290 | 1159 | ||
1291 | /* | 1160 | /* |
1292 | * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid | 1161 | * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid |
@@ -1311,7 +1180,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1311 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1180 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1312 | struct hrtimer_clock_base *base; | 1181 | struct hrtimer_clock_base *base; |
1313 | ktime_t expires_next, now; | 1182 | ktime_t expires_next, now; |
1314 | int i, raise = 0; | 1183 | int i; |
1315 | 1184 | ||
1316 | BUG_ON(!cpu_base->hres_active); | 1185 | BUG_ON(!cpu_base->hres_active); |
1317 | cpu_base->nr_events++; | 1186 | cpu_base->nr_events++; |
@@ -1360,16 +1229,6 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1360 | break; | 1229 | break; |
1361 | } | 1230 | } |
1362 | 1231 | ||
1363 | /* Move softirq callbacks to the pending list */ | ||
1364 | if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { | ||
1365 | __remove_hrtimer(timer, base, | ||
1366 | HRTIMER_STATE_PENDING, 0); | ||
1367 | list_add_tail(&timer->cb_entry, | ||
1368 | &base->cpu_base->cb_pending); | ||
1369 | raise = 1; | ||
1370 | continue; | ||
1371 | } | ||
1372 | |||
1373 | __run_hrtimer(timer); | 1232 | __run_hrtimer(timer); |
1374 | } | 1233 | } |
1375 | spin_unlock(&cpu_base->lock); | 1234 | spin_unlock(&cpu_base->lock); |
@@ -1383,10 +1242,6 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1383 | if (tick_program_event(expires_next, 0)) | 1242 | if (tick_program_event(expires_next, 0)) |
1384 | goto retry; | 1243 | goto retry; |
1385 | } | 1244 | } |
1386 | |||
1387 | /* Raise softirq ? */ | ||
1388 | if (raise) | ||
1389 | raise_softirq(HRTIMER_SOFTIRQ); | ||
1390 | } | 1245 | } |
1391 | 1246 | ||
1392 | /** | 1247 | /** |
@@ -1413,11 +1268,6 @@ void hrtimer_peek_ahead_timers(void) | |||
1413 | local_irq_restore(flags); | 1268 | local_irq_restore(flags); |
1414 | } | 1269 | } |
1415 | 1270 | ||
1416 | static void run_hrtimer_softirq(struct softirq_action *h) | ||
1417 | { | ||
1418 | run_hrtimer_pending(&__get_cpu_var(hrtimer_bases)); | ||
1419 | } | ||
1420 | |||
1421 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 1271 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
1422 | 1272 | ||
1423 | /* | 1273 | /* |
@@ -1429,8 +1279,6 @@ static void run_hrtimer_softirq(struct softirq_action *h) | |||
1429 | */ | 1279 | */ |
1430 | void hrtimer_run_pending(void) | 1280 | void hrtimer_run_pending(void) |
1431 | { | 1281 | { |
1432 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | ||
1433 | |||
1434 | if (hrtimer_hres_active()) | 1282 | if (hrtimer_hres_active()) |
1435 | return; | 1283 | return; |
1436 | 1284 | ||
@@ -1444,8 +1292,6 @@ void hrtimer_run_pending(void) | |||
1444 | */ | 1292 | */ |
1445 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) | 1293 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) |
1446 | hrtimer_switch_to_hres(); | 1294 | hrtimer_switch_to_hres(); |
1447 | |||
1448 | run_hrtimer_pending(cpu_base); | ||
1449 | } | 1295 | } |
1450 | 1296 | ||
1451 | /* | 1297 | /* |
@@ -1482,14 +1328,6 @@ void hrtimer_run_queues(void) | |||
1482 | hrtimer_get_expires_tv64(timer)) | 1328 | hrtimer_get_expires_tv64(timer)) |
1483 | break; | 1329 | break; |
1484 | 1330 | ||
1485 | if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { | ||
1486 | __remove_hrtimer(timer, base, | ||
1487 | HRTIMER_STATE_PENDING, 0); | ||
1488 | list_add_tail(&timer->cb_entry, | ||
1489 | &base->cpu_base->cb_pending); | ||
1490 | continue; | ||
1491 | } | ||
1492 | |||
1493 | __run_hrtimer(timer); | 1331 | __run_hrtimer(timer); |
1494 | } | 1332 | } |
1495 | spin_unlock(&cpu_base->lock); | 1333 | spin_unlock(&cpu_base->lock); |
@@ -1516,9 +1354,6 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) | |||
1516 | { | 1354 | { |
1517 | sl->timer.function = hrtimer_wakeup; | 1355 | sl->timer.function = hrtimer_wakeup; |
1518 | sl->task = task; | 1356 | sl->task = task; |
1519 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1520 | sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; | ||
1521 | #endif | ||
1522 | } | 1357 | } |
1523 | 1358 | ||
1524 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) | 1359 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) |
@@ -1655,18 +1490,16 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
1655 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 1490 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
1656 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1491 | cpu_base->clock_base[i].cpu_base = cpu_base; |
1657 | 1492 | ||
1658 | INIT_LIST_HEAD(&cpu_base->cb_pending); | ||
1659 | hrtimer_init_hres(cpu_base); | 1493 | hrtimer_init_hres(cpu_base); |
1660 | } | 1494 | } |
1661 | 1495 | ||
1662 | #ifdef CONFIG_HOTPLUG_CPU | 1496 | #ifdef CONFIG_HOTPLUG_CPU |
1663 | 1497 | ||
1664 | static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | 1498 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, |
1665 | struct hrtimer_clock_base *new_base, int dcpu) | 1499 | struct hrtimer_clock_base *new_base, int dcpu) |
1666 | { | 1500 | { |
1667 | struct hrtimer *timer; | 1501 | struct hrtimer *timer; |
1668 | struct rb_node *node; | 1502 | struct rb_node *node; |
1669 | int raise = 0; | ||
1670 | 1503 | ||
1671 | while ((node = rb_first(&old_base->active))) { | 1504 | while ((node = rb_first(&old_base->active))) { |
1672 | timer = rb_entry(node, struct hrtimer, node); | 1505 | timer = rb_entry(node, struct hrtimer, node); |
@@ -1674,18 +1507,6 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |||
1674 | debug_hrtimer_deactivate(timer); | 1507 | debug_hrtimer_deactivate(timer); |
1675 | 1508 | ||
1676 | /* | 1509 | /* |
1677 | * Should not happen. Per CPU timers should be | ||
1678 | * canceled _before_ the migration code is called | ||
1679 | */ | ||
1680 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) { | ||
1681 | __remove_hrtimer(timer, old_base, | ||
1682 | HRTIMER_STATE_INACTIVE, 0); | ||
1683 | WARN(1, "hrtimer (%p %p)active but cpu %d dead\n", | ||
1684 | timer, timer->function, dcpu); | ||
1685 | continue; | ||
1686 | } | ||
1687 | |||
1688 | /* | ||
1689 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the | 1510 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the |
1690 | * timer could be seen as !active and just vanish away | 1511 | * timer could be seen as !active and just vanish away |
1691 | * under us on another CPU | 1512 | * under us on another CPU |
@@ -1708,48 +1529,19 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |||
1708 | * otherwise we end up with a stale timer. | 1529 | * otherwise we end up with a stale timer. |
1709 | */ | 1530 | */ |
1710 | if (timer->state == HRTIMER_STATE_MIGRATE) { | 1531 | if (timer->state == HRTIMER_STATE_MIGRATE) { |
1711 | timer->state = HRTIMER_STATE_PENDING; | 1532 | /* XXX: running on offline cpu */ |
1712 | list_add_tail(&timer->cb_entry, | 1533 | __run_hrtimer(timer); |
1713 | &new_base->cpu_base->cb_pending); | ||
1714 | raise = 1; | ||
1715 | } | 1534 | } |
1716 | #endif | 1535 | #endif |
1717 | /* Clear the migration state bit */ | 1536 | /* Clear the migration state bit */ |
1718 | timer->state &= ~HRTIMER_STATE_MIGRATE; | 1537 | timer->state &= ~HRTIMER_STATE_MIGRATE; |
1719 | } | 1538 | } |
1720 | return raise; | ||
1721 | } | 1539 | } |
1722 | 1540 | ||
1723 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1724 | static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, | ||
1725 | struct hrtimer_cpu_base *new_base) | ||
1726 | { | ||
1727 | struct hrtimer *timer; | ||
1728 | int raise = 0; | ||
1729 | |||
1730 | while (!list_empty(&old_base->cb_pending)) { | ||
1731 | timer = list_entry(old_base->cb_pending.next, | ||
1732 | struct hrtimer, cb_entry); | ||
1733 | |||
1734 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0); | ||
1735 | timer->base = &new_base->clock_base[timer->base->index]; | ||
1736 | list_add_tail(&timer->cb_entry, &new_base->cb_pending); | ||
1737 | raise = 1; | ||
1738 | } | ||
1739 | return raise; | ||
1740 | } | ||
1741 | #else | ||
1742 | static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, | ||
1743 | struct hrtimer_cpu_base *new_base) | ||
1744 | { | ||
1745 | return 0; | ||
1746 | } | ||
1747 | #endif | ||
1748 | |||
1749 | static void migrate_hrtimers(int cpu) | 1541 | static void migrate_hrtimers(int cpu) |
1750 | { | 1542 | { |
1751 | struct hrtimer_cpu_base *old_base, *new_base; | 1543 | struct hrtimer_cpu_base *old_base, *new_base; |
1752 | int i, raise = 0; | 1544 | int i; |
1753 | 1545 | ||
1754 | BUG_ON(cpu_online(cpu)); | 1546 | BUG_ON(cpu_online(cpu)); |
1755 | old_base = &per_cpu(hrtimer_bases, cpu); | 1547 | old_base = &per_cpu(hrtimer_bases, cpu); |
@@ -1764,20 +1556,13 @@ static void migrate_hrtimers(int cpu) | |||
1764 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 1556 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1765 | 1557 | ||
1766 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1558 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1767 | if (migrate_hrtimer_list(&old_base->clock_base[i], | 1559 | migrate_hrtimer_list(&old_base->clock_base[i], |
1768 | &new_base->clock_base[i], cpu)) | 1560 | &new_base->clock_base[i], cpu); |
1769 | raise = 1; | ||
1770 | } | 1561 | } |
1771 | 1562 | ||
1772 | if (migrate_hrtimer_pending(old_base, new_base)) | ||
1773 | raise = 1; | ||
1774 | |||
1775 | spin_unlock(&old_base->lock); | 1563 | spin_unlock(&old_base->lock); |
1776 | spin_unlock_irq(&new_base->lock); | 1564 | spin_unlock_irq(&new_base->lock); |
1777 | put_cpu_var(hrtimer_bases); | 1565 | put_cpu_var(hrtimer_bases); |
1778 | |||
1779 | if (raise) | ||
1780 | hrtimer_raise_softirq(); | ||
1781 | } | 1566 | } |
1782 | #endif /* CONFIG_HOTPLUG_CPU */ | 1567 | #endif /* CONFIG_HOTPLUG_CPU */ |
1783 | 1568 | ||
@@ -1817,9 +1602,6 @@ void __init hrtimers_init(void) | |||
1817 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | 1602 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, |
1818 | (void *)(long)smp_processor_id()); | 1603 | (void *)(long)smp_processor_id()); |
1819 | register_cpu_notifier(&hrtimers_nb); | 1604 | register_cpu_notifier(&hrtimers_nb); |
1820 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1821 | open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); | ||
1822 | #endif | ||
1823 | } | 1605 | } |
1824 | 1606 | ||
1825 | /** | 1607 | /** |