aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@csr.com>2009-01-02 08:17:13 -0500
committerDavid Vrabel <david.vrabel@csr.com>2009-01-02 08:17:13 -0500
commitb21a207141d83a06abc5f492b80204602e02ca44 (patch)
treef0152cde543008c72d7eb5c12c18095ad92785e6 /kernel/hrtimer.c
parent3af373021fa32f8f787bfbdcc1a9277a287bde4e (diff)
parentb58602a4bac012b5f4fc12fe6b46ab237b610d5d (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-upstream
Conflicts: drivers/uwb/wlp/eda.c
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c332
1 files changed, 60 insertions, 272 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 47e63349d1b2..eb2bfefa6dcc 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -32,7 +32,6 @@
32 */ 32 */
33 33
34#include <linux/cpu.h> 34#include <linux/cpu.h>
35#include <linux/irq.h>
36#include <linux/module.h> 35#include <linux/module.h>
37#include <linux/percpu.h> 36#include <linux/percpu.h>
38#include <linux/hrtimer.h> 37#include <linux/hrtimer.h>
@@ -442,22 +441,6 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
442static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } 441static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
443#endif 442#endif
444 443
445/*
446 * Check, whether the timer is on the callback pending list
447 */
448static inline int hrtimer_cb_pending(const struct hrtimer *timer)
449{
450 return timer->state & HRTIMER_STATE_PENDING;
451}
452
453/*
454 * Remove a timer from the callback pending list
455 */
456static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
457{
458 list_del_init(&timer->cb_entry);
459}
460
461/* High resolution timer related functions */ 444/* High resolution timer related functions */
462#ifdef CONFIG_HIGH_RES_TIMERS 445#ifdef CONFIG_HIGH_RES_TIMERS
463 446
@@ -651,6 +634,8 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
651{ 634{
652} 635}
653 636
637static void __run_hrtimer(struct hrtimer *timer);
638
654/* 639/*
655 * When High resolution timers are active, try to reprogram. Note, that in case 640 * When High resolution timers are active, try to reprogram. Note, that in case
656 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry 641 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
@@ -661,31 +646,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
661 struct hrtimer_clock_base *base) 646 struct hrtimer_clock_base *base)
662{ 647{
663 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 648 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
664 649 /*
665 /* Timer is expired, act upon the callback mode */ 650 * XXX: recursion check?
666 switch(timer->cb_mode) { 651 * hrtimer_forward() should round up with timer granularity
667 case HRTIMER_CB_IRQSAFE_PERCPU: 652 * so that we never get into inf recursion here,
668 case HRTIMER_CB_IRQSAFE_UNLOCKED: 653 * it doesn't do that though
669 /* 654 */
670 * This is solely for the sched tick emulation with 655 __run_hrtimer(timer);
671 * dynamic tick support to ensure that we do not 656 return 1;
672 * restart the tick right on the edge and end up with
673 * the tick timer in the softirq ! The calling site
674 * takes care of this. Also used for hrtimer sleeper !
675 */
676 debug_hrtimer_deactivate(timer);
677 return 1;
678 case HRTIMER_CB_SOFTIRQ:
679 /*
680 * Move everything else into the softirq pending list !
681 */
682 list_add_tail(&timer->cb_entry,
683 &base->cpu_base->cb_pending);
684 timer->state = HRTIMER_STATE_PENDING;
685 return 1;
686 default:
687 BUG();
688 }
689 } 657 }
690 return 0; 658 return 0;
691} 659}
@@ -724,11 +692,6 @@ static int hrtimer_switch_to_hres(void)
724 return 1; 692 return 1;
725} 693}
726 694
727static inline void hrtimer_raise_softirq(void)
728{
729 raise_softirq(HRTIMER_SOFTIRQ);
730}
731
732#else 695#else
733 696
734static inline int hrtimer_hres_active(void) { return 0; } 697static inline int hrtimer_hres_active(void) { return 0; }
@@ -747,7 +710,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
747{ 710{
748 return 0; 711 return 0;
749} 712}
750static inline void hrtimer_raise_softirq(void) { }
751 713
752#endif /* CONFIG_HIGH_RES_TIMERS */ 714#endif /* CONFIG_HIGH_RES_TIMERS */
753 715
@@ -890,10 +852,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
890 struct hrtimer_clock_base *base, 852 struct hrtimer_clock_base *base,
891 unsigned long newstate, int reprogram) 853 unsigned long newstate, int reprogram)
892{ 854{
893 /* High res. callback list. NOP for !HIGHRES */ 855 if (timer->state & HRTIMER_STATE_ENQUEUED) {
894 if (hrtimer_cb_pending(timer))
895 hrtimer_remove_cb_pending(timer);
896 else {
897 /* 856 /*
898 * Remove the timer from the rbtree and replace the 857 * Remove the timer from the rbtree and replace the
899 * first entry pointer if necessary. 858 * first entry pointer if necessary.
@@ -953,7 +912,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
953{ 912{
954 struct hrtimer_clock_base *base, *new_base; 913 struct hrtimer_clock_base *base, *new_base;
955 unsigned long flags; 914 unsigned long flags;
956 int ret, raise; 915 int ret;
957 916
958 base = lock_hrtimer_base(timer, &flags); 917 base = lock_hrtimer_base(timer, &flags);
959 918
@@ -988,26 +947,8 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
988 enqueue_hrtimer(timer, new_base, 947 enqueue_hrtimer(timer, new_base,
989 new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); 948 new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
990 949
991 /*
992 * The timer may be expired and moved to the cb_pending
993 * list. We can not raise the softirq with base lock held due
994 * to a possible deadlock with runqueue lock.
995 */
996 raise = timer->state == HRTIMER_STATE_PENDING;
997
998 /*
999 * We use preempt_disable to prevent this task from migrating after
1000 * setting up the softirq and raising it. Otherwise, if me migrate
1001 * we will raise the softirq on the wrong CPU.
1002 */
1003 preempt_disable();
1004
1005 unlock_hrtimer_base(timer, &flags); 950 unlock_hrtimer_base(timer, &flags);
1006 951
1007 if (raise)
1008 hrtimer_raise_softirq();
1009 preempt_enable();
1010
1011 return ret; 952 return ret;
1012} 953}
1013EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); 954EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
@@ -1192,75 +1133,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1192} 1133}
1193EXPORT_SYMBOL_GPL(hrtimer_get_res); 1134EXPORT_SYMBOL_GPL(hrtimer_get_res);
1194 1135
1195static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
1196{
1197 spin_lock_irq(&cpu_base->lock);
1198
1199 while (!list_empty(&cpu_base->cb_pending)) {
1200 enum hrtimer_restart (*fn)(struct hrtimer *);
1201 struct hrtimer *timer;
1202 int restart;
1203 int emulate_hardirq_ctx = 0;
1204
1205 timer = list_entry(cpu_base->cb_pending.next,
1206 struct hrtimer, cb_entry);
1207
1208 debug_hrtimer_deactivate(timer);
1209 timer_stats_account_hrtimer(timer);
1210
1211 fn = timer->function;
1212 /*
1213 * A timer might have been added to the cb_pending list
1214 * when it was migrated during a cpu-offline operation.
1215 * Emulate hardirq context for such timers.
1216 */
1217 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
1218 timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
1219 emulate_hardirq_ctx = 1;
1220
1221 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
1222 spin_unlock_irq(&cpu_base->lock);
1223
1224 if (unlikely(emulate_hardirq_ctx)) {
1225 local_irq_disable();
1226 restart = fn(timer);
1227 local_irq_enable();
1228 } else
1229 restart = fn(timer);
1230
1231 spin_lock_irq(&cpu_base->lock);
1232
1233 timer->state &= ~HRTIMER_STATE_CALLBACK;
1234 if (restart == HRTIMER_RESTART) {
1235 BUG_ON(hrtimer_active(timer));
1236 /*
1237 * Enqueue the timer, allow reprogramming of the event
1238 * device
1239 */
1240 enqueue_hrtimer(timer, timer->base, 1);
1241 } else if (hrtimer_active(timer)) {
1242 /*
1243 * If the timer was rearmed on another CPU, reprogram
1244 * the event device.
1245 */
1246 struct hrtimer_clock_base *base = timer->base;
1247
1248 if (base->first == &timer->node &&
1249 hrtimer_reprogram(timer, base)) {
1250 /*
1251 * Timer is expired. Thus move it from tree to
1252 * pending list again.
1253 */
1254 __remove_hrtimer(timer, base,
1255 HRTIMER_STATE_PENDING, 0);
1256 list_add_tail(&timer->cb_entry,
1257 &base->cpu_base->cb_pending);
1258 }
1259 }
1260 }
1261 spin_unlock_irq(&cpu_base->lock);
1262}
1263
1264static void __run_hrtimer(struct hrtimer *timer) 1136static void __run_hrtimer(struct hrtimer *timer)
1265{ 1137{
1266 struct hrtimer_clock_base *base = timer->base; 1138 struct hrtimer_clock_base *base = timer->base;
@@ -1268,25 +1140,21 @@ static void __run_hrtimer(struct hrtimer *timer)
1268 enum hrtimer_restart (*fn)(struct hrtimer *); 1140 enum hrtimer_restart (*fn)(struct hrtimer *);
1269 int restart; 1141 int restart;
1270 1142
1143 WARN_ON(!irqs_disabled());
1144
1271 debug_hrtimer_deactivate(timer); 1145 debug_hrtimer_deactivate(timer);
1272 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); 1146 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1273 timer_stats_account_hrtimer(timer); 1147 timer_stats_account_hrtimer(timer);
1274
1275 fn = timer->function; 1148 fn = timer->function;
1276 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || 1149
1277 timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) { 1150 /*
1278 /* 1151 * Because we run timers from hardirq context, there is no chance
1279 * Used for scheduler timers, avoid lock inversion with 1152 * they get migrated to another cpu, therefore its safe to unlock
1280 * rq->lock and tasklist_lock. 1153 * the timer base.
1281 * 1154 */
1282 * These timers are required to deal with enqueue expiry 1155 spin_unlock(&cpu_base->lock);
1283 * themselves and are not allowed to migrate. 1156 restart = fn(timer);
1284 */ 1157 spin_lock(&cpu_base->lock);
1285 spin_unlock(&cpu_base->lock);
1286 restart = fn(timer);
1287 spin_lock(&cpu_base->lock);
1288 } else
1289 restart = fn(timer);
1290 1158
1291 /* 1159 /*
1292 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid 1160 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
@@ -1311,7 +1179,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1311 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1179 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1312 struct hrtimer_clock_base *base; 1180 struct hrtimer_clock_base *base;
1313 ktime_t expires_next, now; 1181 ktime_t expires_next, now;
1314 int i, raise = 0; 1182 int i;
1315 1183
1316 BUG_ON(!cpu_base->hres_active); 1184 BUG_ON(!cpu_base->hres_active);
1317 cpu_base->nr_events++; 1185 cpu_base->nr_events++;
@@ -1360,16 +1228,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1360 break; 1228 break;
1361 } 1229 }
1362 1230
1363 /* Move softirq callbacks to the pending list */
1364 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1365 __remove_hrtimer(timer, base,
1366 HRTIMER_STATE_PENDING, 0);
1367 list_add_tail(&timer->cb_entry,
1368 &base->cpu_base->cb_pending);
1369 raise = 1;
1370 continue;
1371 }
1372
1373 __run_hrtimer(timer); 1231 __run_hrtimer(timer);
1374 } 1232 }
1375 spin_unlock(&cpu_base->lock); 1233 spin_unlock(&cpu_base->lock);
@@ -1383,10 +1241,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1383 if (tick_program_event(expires_next, 0)) 1241 if (tick_program_event(expires_next, 0))
1384 goto retry; 1242 goto retry;
1385 } 1243 }
1386
1387 /* Raise softirq ? */
1388 if (raise)
1389 raise_softirq(HRTIMER_SOFTIRQ);
1390} 1244}
1391 1245
1392/** 1246/**
@@ -1413,11 +1267,6 @@ void hrtimer_peek_ahead_timers(void)
1413 local_irq_restore(flags); 1267 local_irq_restore(flags);
1414} 1268}
1415 1269
1416static void run_hrtimer_softirq(struct softirq_action *h)
1417{
1418 run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
1419}
1420
1421#endif /* CONFIG_HIGH_RES_TIMERS */ 1270#endif /* CONFIG_HIGH_RES_TIMERS */
1422 1271
1423/* 1272/*
@@ -1429,8 +1278,6 @@ static void run_hrtimer_softirq(struct softirq_action *h)
1429 */ 1278 */
1430void hrtimer_run_pending(void) 1279void hrtimer_run_pending(void)
1431{ 1280{
1432 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1433
1434 if (hrtimer_hres_active()) 1281 if (hrtimer_hres_active())
1435 return; 1282 return;
1436 1283
@@ -1444,8 +1291,6 @@ void hrtimer_run_pending(void)
1444 */ 1291 */
1445 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) 1292 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1446 hrtimer_switch_to_hres(); 1293 hrtimer_switch_to_hres();
1447
1448 run_hrtimer_pending(cpu_base);
1449} 1294}
1450 1295
1451/* 1296/*
@@ -1482,14 +1327,6 @@ void hrtimer_run_queues(void)
1482 hrtimer_get_expires_tv64(timer)) 1327 hrtimer_get_expires_tv64(timer))
1483 break; 1328 break;
1484 1329
1485 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1486 __remove_hrtimer(timer, base,
1487 HRTIMER_STATE_PENDING, 0);
1488 list_add_tail(&timer->cb_entry,
1489 &base->cpu_base->cb_pending);
1490 continue;
1491 }
1492
1493 __run_hrtimer(timer); 1330 __run_hrtimer(timer);
1494 } 1331 }
1495 spin_unlock(&cpu_base->lock); 1332 spin_unlock(&cpu_base->lock);
@@ -1516,9 +1353,6 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1516{ 1353{
1517 sl->timer.function = hrtimer_wakeup; 1354 sl->timer.function = hrtimer_wakeup;
1518 sl->task = task; 1355 sl->task = task;
1519#ifdef CONFIG_HIGH_RES_TIMERS
1520 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
1521#endif
1522} 1356}
1523 1357
1524static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) 1358static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
@@ -1655,18 +1489,16 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1655 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) 1489 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1656 cpu_base->clock_base[i].cpu_base = cpu_base; 1490 cpu_base->clock_base[i].cpu_base = cpu_base;
1657 1491
1658 INIT_LIST_HEAD(&cpu_base->cb_pending);
1659 hrtimer_init_hres(cpu_base); 1492 hrtimer_init_hres(cpu_base);
1660} 1493}
1661 1494
1662#ifdef CONFIG_HOTPLUG_CPU 1495#ifdef CONFIG_HOTPLUG_CPU
1663 1496
1664static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, 1497static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1665 struct hrtimer_clock_base *new_base, int dcpu) 1498 struct hrtimer_clock_base *new_base)
1666{ 1499{
1667 struct hrtimer *timer; 1500 struct hrtimer *timer;
1668 struct rb_node *node; 1501 struct rb_node *node;
1669 int raise = 0;
1670 1502
1671 while ((node = rb_first(&old_base->active))) { 1503 while ((node = rb_first(&old_base->active))) {
1672 timer = rb_entry(node, struct hrtimer, node); 1504 timer = rb_entry(node, struct hrtimer, node);
@@ -1674,18 +1506,6 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1674 debug_hrtimer_deactivate(timer); 1506 debug_hrtimer_deactivate(timer);
1675 1507
1676 /* 1508 /*
1677 * Should not happen. Per CPU timers should be
1678 * canceled _before_ the migration code is called
1679 */
1680 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
1681 __remove_hrtimer(timer, old_base,
1682 HRTIMER_STATE_INACTIVE, 0);
1683 WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
1684 timer, timer->function, dcpu);
1685 continue;
1686 }
1687
1688 /*
1689 * Mark it as STATE_MIGRATE not INACTIVE otherwise the 1509 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1690 * timer could be seen as !active and just vanish away 1510 * timer could be seen as !active and just vanish away
1691 * under us on another CPU 1511 * under us on another CPU
@@ -1693,69 +1513,34 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1693 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); 1513 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1694 timer->base = new_base; 1514 timer->base = new_base;
1695 /* 1515 /*
1696 * Enqueue the timer. Allow reprogramming of the event device 1516 * Enqueue the timers on the new cpu, but do not reprogram
1517 * the timer as that would enable a deadlock between
1518 * hrtimer_enqueue_reprogramm() running the timer and us still
1519 * holding a nested base lock.
1520 *
1521 * Instead we tickle the hrtimer interrupt after the migration
1522 * is done, which will run all expired timers and re-programm
1523 * the timer device.
1697 */ 1524 */
1698 enqueue_hrtimer(timer, new_base, 1); 1525 enqueue_hrtimer(timer, new_base, 0);
1699 1526
1700#ifdef CONFIG_HIGH_RES_TIMERS
1701 /*
1702 * Happens with high res enabled when the timer was
1703 * already expired and the callback mode is
1704 * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
1705 * enqueue code does not move them to the soft irq
1706 * pending list for performance/latency reasons, but
1707 * in the migration state, we need to do that
1708 * otherwise we end up with a stale timer.
1709 */
1710 if (timer->state == HRTIMER_STATE_MIGRATE) {
1711 timer->state = HRTIMER_STATE_PENDING;
1712 list_add_tail(&timer->cb_entry,
1713 &new_base->cpu_base->cb_pending);
1714 raise = 1;
1715 }
1716#endif
1717 /* Clear the migration state bit */ 1527 /* Clear the migration state bit */
1718 timer->state &= ~HRTIMER_STATE_MIGRATE; 1528 timer->state &= ~HRTIMER_STATE_MIGRATE;
1719 } 1529 }
1720 return raise;
1721}
1722
1723#ifdef CONFIG_HIGH_RES_TIMERS
1724static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1725 struct hrtimer_cpu_base *new_base)
1726{
1727 struct hrtimer *timer;
1728 int raise = 0;
1729
1730 while (!list_empty(&old_base->cb_pending)) {
1731 timer = list_entry(old_base->cb_pending.next,
1732 struct hrtimer, cb_entry);
1733
1734 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
1735 timer->base = &new_base->clock_base[timer->base->index];
1736 list_add_tail(&timer->cb_entry, &new_base->cb_pending);
1737 raise = 1;
1738 }
1739 return raise;
1740}
1741#else
1742static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1743 struct hrtimer_cpu_base *new_base)
1744{
1745 return 0;
1746} 1530}
1747#endif
1748 1531
1749static void migrate_hrtimers(int cpu) 1532static int migrate_hrtimers(int scpu)
1750{ 1533{
1751 struct hrtimer_cpu_base *old_base, *new_base; 1534 struct hrtimer_cpu_base *old_base, *new_base;
1752 int i, raise = 0; 1535 int dcpu, i;
1753 1536
1754 BUG_ON(cpu_online(cpu)); 1537 BUG_ON(cpu_online(scpu));
1755 old_base = &per_cpu(hrtimer_bases, cpu); 1538 old_base = &per_cpu(hrtimer_bases, scpu);
1756 new_base = &get_cpu_var(hrtimer_bases); 1539 new_base = &get_cpu_var(hrtimer_bases);
1757 1540
1758 tick_cancel_sched_timer(cpu); 1541 dcpu = smp_processor_id();
1542
1543 tick_cancel_sched_timer(scpu);
1759 /* 1544 /*
1760 * The caller is globally serialized and nobody else 1545 * The caller is globally serialized and nobody else
1761 * takes two locks at once, deadlock is not possible. 1546 * takes two locks at once, deadlock is not possible.
@@ -1764,41 +1549,47 @@ static void migrate_hrtimers(int cpu)
1764 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1549 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1765 1550
1766 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1551 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1767 if (migrate_hrtimer_list(&old_base->clock_base[i], 1552 migrate_hrtimer_list(&old_base->clock_base[i],
1768 &new_base->clock_base[i], cpu)) 1553 &new_base->clock_base[i]);
1769 raise = 1;
1770 } 1554 }
1771 1555
1772 if (migrate_hrtimer_pending(old_base, new_base))
1773 raise = 1;
1774
1775 spin_unlock(&old_base->lock); 1556 spin_unlock(&old_base->lock);
1776 spin_unlock_irq(&new_base->lock); 1557 spin_unlock_irq(&new_base->lock);
1777 put_cpu_var(hrtimer_bases); 1558 put_cpu_var(hrtimer_bases);
1778 1559
1779 if (raise) 1560 return dcpu;
1780 hrtimer_raise_softirq(); 1561}
1562
1563static void tickle_timers(void *arg)
1564{
1565 hrtimer_peek_ahead_timers();
1781} 1566}
1567
1782#endif /* CONFIG_HOTPLUG_CPU */ 1568#endif /* CONFIG_HOTPLUG_CPU */
1783 1569
1784static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, 1570static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1785 unsigned long action, void *hcpu) 1571 unsigned long action, void *hcpu)
1786{ 1572{
1787 unsigned int cpu = (long)hcpu; 1573 int scpu = (long)hcpu;
1788 1574
1789 switch (action) { 1575 switch (action) {
1790 1576
1791 case CPU_UP_PREPARE: 1577 case CPU_UP_PREPARE:
1792 case CPU_UP_PREPARE_FROZEN: 1578 case CPU_UP_PREPARE_FROZEN:
1793 init_hrtimers_cpu(cpu); 1579 init_hrtimers_cpu(scpu);
1794 break; 1580 break;
1795 1581
1796#ifdef CONFIG_HOTPLUG_CPU 1582#ifdef CONFIG_HOTPLUG_CPU
1797 case CPU_DEAD: 1583 case CPU_DEAD:
1798 case CPU_DEAD_FROZEN: 1584 case CPU_DEAD_FROZEN:
1799 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); 1585 {
1800 migrate_hrtimers(cpu); 1586 int dcpu;
1587
1588 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1589 dcpu = migrate_hrtimers(scpu);
1590 smp_call_function_single(dcpu, tickle_timers, NULL, 0);
1801 break; 1591 break;
1592 }
1802#endif 1593#endif
1803 1594
1804 default: 1595 default:
@@ -1817,9 +1608,6 @@ void __init hrtimers_init(void)
1817 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, 1608 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1818 (void *)(long)smp_processor_id()); 1609 (void *)(long)smp_processor_id());
1819 register_cpu_notifier(&hrtimers_nb); 1610 register_cpu_notifier(&hrtimers_nb);
1820#ifdef CONFIG_HIGH_RES_TIMERS
1821 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1822#endif
1823} 1611}
1824 1612
1825/** 1613/**