aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c331
1 files changed, 60 insertions, 271 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 0ad3f3d6d10d..eb2bfefa6dcc 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -441,22 +441,6 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
441static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } 441static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
442#endif 442#endif
443 443
444/*
445 * Check, whether the timer is on the callback pending list
446 */
447static inline int hrtimer_cb_pending(const struct hrtimer *timer)
448{
449 return timer->state & HRTIMER_STATE_PENDING;
450}
451
452/*
453 * Remove a timer from the callback pending list
454 */
455static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
456{
457 list_del_init(&timer->cb_entry);
458}
459
460/* High resolution timer related functions */ 444/* High resolution timer related functions */
461#ifdef CONFIG_HIGH_RES_TIMERS 445#ifdef CONFIG_HIGH_RES_TIMERS
462 446
@@ -650,6 +634,8 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
650{ 634{
651} 635}
652 636
637static void __run_hrtimer(struct hrtimer *timer);
638
653/* 639/*
654 * When High resolution timers are active, try to reprogram. Note, that in case 640 * When High resolution timers are active, try to reprogram. Note, that in case
655 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry 641 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
@@ -660,31 +646,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
660 struct hrtimer_clock_base *base) 646 struct hrtimer_clock_base *base)
661{ 647{
662 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 648 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
663 649 /*
664 /* Timer is expired, act upon the callback mode */ 650 * XXX: recursion check?
665 switch(timer->cb_mode) { 651 * hrtimer_forward() should round up with timer granularity
666 case HRTIMER_CB_IRQSAFE_PERCPU: 652 * so that we never get into inf recursion here,
667 case HRTIMER_CB_IRQSAFE_UNLOCKED: 653 * it doesn't do that though
668 /* 654 */
669 * This is solely for the sched tick emulation with 655 __run_hrtimer(timer);
670 * dynamic tick support to ensure that we do not 656 return 1;
671 * restart the tick right on the edge and end up with
672 * the tick timer in the softirq ! The calling site
673 * takes care of this. Also used for hrtimer sleeper !
674 */
675 debug_hrtimer_deactivate(timer);
676 return 1;
677 case HRTIMER_CB_SOFTIRQ:
678 /*
679 * Move everything else into the softirq pending list !
680 */
681 list_add_tail(&timer->cb_entry,
682 &base->cpu_base->cb_pending);
683 timer->state = HRTIMER_STATE_PENDING;
684 return 1;
685 default:
686 BUG();
687 }
688 } 657 }
689 return 0; 658 return 0;
690} 659}
@@ -723,11 +692,6 @@ static int hrtimer_switch_to_hres(void)
723 return 1; 692 return 1;
724} 693}
725 694
726static inline void hrtimer_raise_softirq(void)
727{
728 raise_softirq(HRTIMER_SOFTIRQ);
729}
730
731#else 695#else
732 696
733static inline int hrtimer_hres_active(void) { return 0; } 697static inline int hrtimer_hres_active(void) { return 0; }
@@ -746,7 +710,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
746{ 710{
747 return 0; 711 return 0;
748} 712}
749static inline void hrtimer_raise_softirq(void) { }
750 713
751#endif /* CONFIG_HIGH_RES_TIMERS */ 714#endif /* CONFIG_HIGH_RES_TIMERS */
752 715
@@ -889,10 +852,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
889 struct hrtimer_clock_base *base, 852 struct hrtimer_clock_base *base,
890 unsigned long newstate, int reprogram) 853 unsigned long newstate, int reprogram)
891{ 854{
892 /* High res. callback list. NOP for !HIGHRES */ 855 if (timer->state & HRTIMER_STATE_ENQUEUED) {
893 if (hrtimer_cb_pending(timer))
894 hrtimer_remove_cb_pending(timer);
895 else {
896 /* 856 /*
897 * Remove the timer from the rbtree and replace the 857 * Remove the timer from the rbtree and replace the
898 * first entry pointer if necessary. 858 * first entry pointer if necessary.
@@ -952,7 +912,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
952{ 912{
953 struct hrtimer_clock_base *base, *new_base; 913 struct hrtimer_clock_base *base, *new_base;
954 unsigned long flags; 914 unsigned long flags;
955 int ret, raise; 915 int ret;
956 916
957 base = lock_hrtimer_base(timer, &flags); 917 base = lock_hrtimer_base(timer, &flags);
958 918
@@ -987,26 +947,8 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
987 enqueue_hrtimer(timer, new_base, 947 enqueue_hrtimer(timer, new_base,
988 new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); 948 new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
989 949
990 /*
991 * The timer may be expired and moved to the cb_pending
992 * list. We can not raise the softirq with base lock held due
993 * to a possible deadlock with runqueue lock.
994 */
995 raise = timer->state == HRTIMER_STATE_PENDING;
996
997 /*
998 * We use preempt_disable to prevent this task from migrating after
999 * setting up the softirq and raising it. Otherwise, if me migrate
1000 * we will raise the softirq on the wrong CPU.
1001 */
1002 preempt_disable();
1003
1004 unlock_hrtimer_base(timer, &flags); 950 unlock_hrtimer_base(timer, &flags);
1005 951
1006 if (raise)
1007 hrtimer_raise_softirq();
1008 preempt_enable();
1009
1010 return ret; 952 return ret;
1011} 953}
1012EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); 954EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
@@ -1191,75 +1133,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1191} 1133}
1192EXPORT_SYMBOL_GPL(hrtimer_get_res); 1134EXPORT_SYMBOL_GPL(hrtimer_get_res);
1193 1135
1194static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
1195{
1196 spin_lock_irq(&cpu_base->lock);
1197
1198 while (!list_empty(&cpu_base->cb_pending)) {
1199 enum hrtimer_restart (*fn)(struct hrtimer *);
1200 struct hrtimer *timer;
1201 int restart;
1202 int emulate_hardirq_ctx = 0;
1203
1204 timer = list_entry(cpu_base->cb_pending.next,
1205 struct hrtimer, cb_entry);
1206
1207 debug_hrtimer_deactivate(timer);
1208 timer_stats_account_hrtimer(timer);
1209
1210 fn = timer->function;
1211 /*
1212 * A timer might have been added to the cb_pending list
1213 * when it was migrated during a cpu-offline operation.
1214 * Emulate hardirq context for such timers.
1215 */
1216 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
1217 timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
1218 emulate_hardirq_ctx = 1;
1219
1220 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
1221 spin_unlock_irq(&cpu_base->lock);
1222
1223 if (unlikely(emulate_hardirq_ctx)) {
1224 local_irq_disable();
1225 restart = fn(timer);
1226 local_irq_enable();
1227 } else
1228 restart = fn(timer);
1229
1230 spin_lock_irq(&cpu_base->lock);
1231
1232 timer->state &= ~HRTIMER_STATE_CALLBACK;
1233 if (restart == HRTIMER_RESTART) {
1234 BUG_ON(hrtimer_active(timer));
1235 /*
1236 * Enqueue the timer, allow reprogramming of the event
1237 * device
1238 */
1239 enqueue_hrtimer(timer, timer->base, 1);
1240 } else if (hrtimer_active(timer)) {
1241 /*
1242 * If the timer was rearmed on another CPU, reprogram
1243 * the event device.
1244 */
1245 struct hrtimer_clock_base *base = timer->base;
1246
1247 if (base->first == &timer->node &&
1248 hrtimer_reprogram(timer, base)) {
1249 /*
1250 * Timer is expired. Thus move it from tree to
1251 * pending list again.
1252 */
1253 __remove_hrtimer(timer, base,
1254 HRTIMER_STATE_PENDING, 0);
1255 list_add_tail(&timer->cb_entry,
1256 &base->cpu_base->cb_pending);
1257 }
1258 }
1259 }
1260 spin_unlock_irq(&cpu_base->lock);
1261}
1262
1263static void __run_hrtimer(struct hrtimer *timer) 1136static void __run_hrtimer(struct hrtimer *timer)
1264{ 1137{
1265 struct hrtimer_clock_base *base = timer->base; 1138 struct hrtimer_clock_base *base = timer->base;
@@ -1267,25 +1140,21 @@ static void __run_hrtimer(struct hrtimer *timer)
1267 enum hrtimer_restart (*fn)(struct hrtimer *); 1140 enum hrtimer_restart (*fn)(struct hrtimer *);
1268 int restart; 1141 int restart;
1269 1142
1143 WARN_ON(!irqs_disabled());
1144
1270 debug_hrtimer_deactivate(timer); 1145 debug_hrtimer_deactivate(timer);
1271 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); 1146 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1272 timer_stats_account_hrtimer(timer); 1147 timer_stats_account_hrtimer(timer);
1273
1274 fn = timer->function; 1148 fn = timer->function;
1275 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || 1149
1276 timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) { 1150 /*
1277 /* 1151 * Because we run timers from hardirq context, there is no chance
1278 * Used for scheduler timers, avoid lock inversion with 1152 * they get migrated to another cpu, therefore its safe to unlock
1279 * rq->lock and tasklist_lock. 1153 * the timer base.
1280 * 1154 */
1281 * These timers are required to deal with enqueue expiry 1155 spin_unlock(&cpu_base->lock);
1282 * themselves and are not allowed to migrate. 1156 restart = fn(timer);
1283 */ 1157 spin_lock(&cpu_base->lock);
1284 spin_unlock(&cpu_base->lock);
1285 restart = fn(timer);
1286 spin_lock(&cpu_base->lock);
1287 } else
1288 restart = fn(timer);
1289 1158
1290 /* 1159 /*
1291 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid 1160 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
@@ -1310,7 +1179,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1310 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1179 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1311 struct hrtimer_clock_base *base; 1180 struct hrtimer_clock_base *base;
1312 ktime_t expires_next, now; 1181 ktime_t expires_next, now;
1313 int i, raise = 0; 1182 int i;
1314 1183
1315 BUG_ON(!cpu_base->hres_active); 1184 BUG_ON(!cpu_base->hres_active);
1316 cpu_base->nr_events++; 1185 cpu_base->nr_events++;
@@ -1359,16 +1228,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1359 break; 1228 break;
1360 } 1229 }
1361 1230
1362 /* Move softirq callbacks to the pending list */
1363 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1364 __remove_hrtimer(timer, base,
1365 HRTIMER_STATE_PENDING, 0);
1366 list_add_tail(&timer->cb_entry,
1367 &base->cpu_base->cb_pending);
1368 raise = 1;
1369 continue;
1370 }
1371
1372 __run_hrtimer(timer); 1231 __run_hrtimer(timer);
1373 } 1232 }
1374 spin_unlock(&cpu_base->lock); 1233 spin_unlock(&cpu_base->lock);
@@ -1382,10 +1241,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1382 if (tick_program_event(expires_next, 0)) 1241 if (tick_program_event(expires_next, 0))
1383 goto retry; 1242 goto retry;
1384 } 1243 }
1385
1386 /* Raise softirq ? */
1387 if (raise)
1388 raise_softirq(HRTIMER_SOFTIRQ);
1389} 1244}
1390 1245
1391/** 1246/**
@@ -1412,11 +1267,6 @@ void hrtimer_peek_ahead_timers(void)
1412 local_irq_restore(flags); 1267 local_irq_restore(flags);
1413} 1268}
1414 1269
1415static void run_hrtimer_softirq(struct softirq_action *h)
1416{
1417 run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
1418}
1419
1420#endif /* CONFIG_HIGH_RES_TIMERS */ 1270#endif /* CONFIG_HIGH_RES_TIMERS */
1421 1271
1422/* 1272/*
@@ -1428,8 +1278,6 @@ static void run_hrtimer_softirq(struct softirq_action *h)
1428 */ 1278 */
1429void hrtimer_run_pending(void) 1279void hrtimer_run_pending(void)
1430{ 1280{
1431 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1432
1433 if (hrtimer_hres_active()) 1281 if (hrtimer_hres_active())
1434 return; 1282 return;
1435 1283
@@ -1443,8 +1291,6 @@ void hrtimer_run_pending(void)
1443 */ 1291 */
1444 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) 1292 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1445 hrtimer_switch_to_hres(); 1293 hrtimer_switch_to_hres();
1446
1447 run_hrtimer_pending(cpu_base);
1448} 1294}
1449 1295
1450/* 1296/*
@@ -1481,14 +1327,6 @@ void hrtimer_run_queues(void)
1481 hrtimer_get_expires_tv64(timer)) 1327 hrtimer_get_expires_tv64(timer))
1482 break; 1328 break;
1483 1329
1484 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1485 __remove_hrtimer(timer, base,
1486 HRTIMER_STATE_PENDING, 0);
1487 list_add_tail(&timer->cb_entry,
1488 &base->cpu_base->cb_pending);
1489 continue;
1490 }
1491
1492 __run_hrtimer(timer); 1330 __run_hrtimer(timer);
1493 } 1331 }
1494 spin_unlock(&cpu_base->lock); 1332 spin_unlock(&cpu_base->lock);
@@ -1515,9 +1353,6 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1515{ 1353{
1516 sl->timer.function = hrtimer_wakeup; 1354 sl->timer.function = hrtimer_wakeup;
1517 sl->task = task; 1355 sl->task = task;
1518#ifdef CONFIG_HIGH_RES_TIMERS
1519 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
1520#endif
1521} 1356}
1522 1357
1523static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) 1358static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
@@ -1654,18 +1489,16 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1654 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) 1489 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1655 cpu_base->clock_base[i].cpu_base = cpu_base; 1490 cpu_base->clock_base[i].cpu_base = cpu_base;
1656 1491
1657 INIT_LIST_HEAD(&cpu_base->cb_pending);
1658 hrtimer_init_hres(cpu_base); 1492 hrtimer_init_hres(cpu_base);
1659} 1493}
1660 1494
1661#ifdef CONFIG_HOTPLUG_CPU 1495#ifdef CONFIG_HOTPLUG_CPU
1662 1496
1663static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, 1497static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1664 struct hrtimer_clock_base *new_base, int dcpu) 1498 struct hrtimer_clock_base *new_base)
1665{ 1499{
1666 struct hrtimer *timer; 1500 struct hrtimer *timer;
1667 struct rb_node *node; 1501 struct rb_node *node;
1668 int raise = 0;
1669 1502
1670 while ((node = rb_first(&old_base->active))) { 1503 while ((node = rb_first(&old_base->active))) {
1671 timer = rb_entry(node, struct hrtimer, node); 1504 timer = rb_entry(node, struct hrtimer, node);
@@ -1673,18 +1506,6 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1673 debug_hrtimer_deactivate(timer); 1506 debug_hrtimer_deactivate(timer);
1674 1507
1675 /* 1508 /*
1676 * Should not happen. Per CPU timers should be
1677 * canceled _before_ the migration code is called
1678 */
1679 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
1680 __remove_hrtimer(timer, old_base,
1681 HRTIMER_STATE_INACTIVE, 0);
1682 WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
1683 timer, timer->function, dcpu);
1684 continue;
1685 }
1686
1687 /*
1688 * Mark it as STATE_MIGRATE not INACTIVE otherwise the 1509 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1689 * timer could be seen as !active and just vanish away 1510 * timer could be seen as !active and just vanish away
1690 * under us on another CPU 1511 * under us on another CPU
@@ -1692,69 +1513,34 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1692 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); 1513 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1693 timer->base = new_base; 1514 timer->base = new_base;
1694 /* 1515 /*
1695 * Enqueue the timer. Allow reprogramming of the event device 1516 * Enqueue the timers on the new cpu, but do not reprogram
1517 * the timer as that would enable a deadlock between
1518 * hrtimer_enqueue_reprogramm() running the timer and us still
1519 * holding a nested base lock.
1520 *
1521 * Instead we tickle the hrtimer interrupt after the migration
1522 * is done, which will run all expired timers and re-programm
1523 * the timer device.
1696 */ 1524 */
1697 enqueue_hrtimer(timer, new_base, 1); 1525 enqueue_hrtimer(timer, new_base, 0);
1698 1526
1699#ifdef CONFIG_HIGH_RES_TIMERS
1700 /*
1701 * Happens with high res enabled when the timer was
1702 * already expired and the callback mode is
1703 * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
1704 * enqueue code does not move them to the soft irq
1705 * pending list for performance/latency reasons, but
1706 * in the migration state, we need to do that
1707 * otherwise we end up with a stale timer.
1708 */
1709 if (timer->state == HRTIMER_STATE_MIGRATE) {
1710 timer->state = HRTIMER_STATE_PENDING;
1711 list_add_tail(&timer->cb_entry,
1712 &new_base->cpu_base->cb_pending);
1713 raise = 1;
1714 }
1715#endif
1716 /* Clear the migration state bit */ 1527 /* Clear the migration state bit */
1717 timer->state &= ~HRTIMER_STATE_MIGRATE; 1528 timer->state &= ~HRTIMER_STATE_MIGRATE;
1718 } 1529 }
1719 return raise;
1720}
1721
1722#ifdef CONFIG_HIGH_RES_TIMERS
1723static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1724 struct hrtimer_cpu_base *new_base)
1725{
1726 struct hrtimer *timer;
1727 int raise = 0;
1728
1729 while (!list_empty(&old_base->cb_pending)) {
1730 timer = list_entry(old_base->cb_pending.next,
1731 struct hrtimer, cb_entry);
1732
1733 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
1734 timer->base = &new_base->clock_base[timer->base->index];
1735 list_add_tail(&timer->cb_entry, &new_base->cb_pending);
1736 raise = 1;
1737 }
1738 return raise;
1739}
1740#else
1741static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1742 struct hrtimer_cpu_base *new_base)
1743{
1744 return 0;
1745} 1530}
1746#endif
1747 1531
1748static void migrate_hrtimers(int cpu) 1532static int migrate_hrtimers(int scpu)
1749{ 1533{
1750 struct hrtimer_cpu_base *old_base, *new_base; 1534 struct hrtimer_cpu_base *old_base, *new_base;
1751 int i, raise = 0; 1535 int dcpu, i;
1752 1536
1753 BUG_ON(cpu_online(cpu)); 1537 BUG_ON(cpu_online(scpu));
1754 old_base = &per_cpu(hrtimer_bases, cpu); 1538 old_base = &per_cpu(hrtimer_bases, scpu);
1755 new_base = &get_cpu_var(hrtimer_bases); 1539 new_base = &get_cpu_var(hrtimer_bases);
1756 1540
1757 tick_cancel_sched_timer(cpu); 1541 dcpu = smp_processor_id();
1542
1543 tick_cancel_sched_timer(scpu);
1758 /* 1544 /*
1759 * The caller is globally serialized and nobody else 1545 * The caller is globally serialized and nobody else
1760 * takes two locks at once, deadlock is not possible. 1546 * takes two locks at once, deadlock is not possible.
@@ -1763,41 +1549,47 @@ static void migrate_hrtimers(int cpu)
1763 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1549 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1764 1550
1765 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1551 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1766 if (migrate_hrtimer_list(&old_base->clock_base[i], 1552 migrate_hrtimer_list(&old_base->clock_base[i],
1767 &new_base->clock_base[i], cpu)) 1553 &new_base->clock_base[i]);
1768 raise = 1;
1769 } 1554 }
1770 1555
1771 if (migrate_hrtimer_pending(old_base, new_base))
1772 raise = 1;
1773
1774 spin_unlock(&old_base->lock); 1556 spin_unlock(&old_base->lock);
1775 spin_unlock_irq(&new_base->lock); 1557 spin_unlock_irq(&new_base->lock);
1776 put_cpu_var(hrtimer_bases); 1558 put_cpu_var(hrtimer_bases);
1777 1559
1778 if (raise) 1560 return dcpu;
1779 hrtimer_raise_softirq(); 1561}
1562
1563static void tickle_timers(void *arg)
1564{
1565 hrtimer_peek_ahead_timers();
1780} 1566}
1567
1781#endif /* CONFIG_HOTPLUG_CPU */ 1568#endif /* CONFIG_HOTPLUG_CPU */
1782 1569
1783static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, 1570static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1784 unsigned long action, void *hcpu) 1571 unsigned long action, void *hcpu)
1785{ 1572{
1786 unsigned int cpu = (long)hcpu; 1573 int scpu = (long)hcpu;
1787 1574
1788 switch (action) { 1575 switch (action) {
1789 1576
1790 case CPU_UP_PREPARE: 1577 case CPU_UP_PREPARE:
1791 case CPU_UP_PREPARE_FROZEN: 1578 case CPU_UP_PREPARE_FROZEN:
1792 init_hrtimers_cpu(cpu); 1579 init_hrtimers_cpu(scpu);
1793 break; 1580 break;
1794 1581
1795#ifdef CONFIG_HOTPLUG_CPU 1582#ifdef CONFIG_HOTPLUG_CPU
1796 case CPU_DEAD: 1583 case CPU_DEAD:
1797 case CPU_DEAD_FROZEN: 1584 case CPU_DEAD_FROZEN:
1798 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); 1585 {
1799 migrate_hrtimers(cpu); 1586 int dcpu;
1587
1588 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1589 dcpu = migrate_hrtimers(scpu);
1590 smp_call_function_single(dcpu, tickle_timers, NULL, 0);
1800 break; 1591 break;
1592 }
1801#endif 1593#endif
1802 1594
1803 default: 1595 default:
@@ -1816,9 +1608,6 @@ void __init hrtimers_init(void)
1816 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, 1608 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1817 (void *)(long)smp_processor_id()); 1609 (void *)(long)smp_processor_id());
1818 register_cpu_notifier(&hrtimers_nb); 1610 register_cpu_notifier(&hrtimers_nb);
1819#ifdef CONFIG_HIGH_RES_TIMERS
1820 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1821#endif
1822} 1611}
1823 1612
1824/** 1613/**