aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c95
1 files changed, 85 insertions, 10 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 4d761d50c52..95978f48e03 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -672,13 +672,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
672 */ 672 */
673 BUG_ON(timer->function(timer) != HRTIMER_NORESTART); 673 BUG_ON(timer->function(timer) != HRTIMER_NORESTART);
674 return 1; 674 return 1;
675 case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: 675 case HRTIMER_CB_IRQSAFE_PERCPU:
676 case HRTIMER_CB_IRQSAFE_UNLOCKED:
676 /* 677 /*
677 * This is solely for the sched tick emulation with 678 * This is solely for the sched tick emulation with
678 * dynamic tick support to ensure that we do not 679 * dynamic tick support to ensure that we do not
679 * restart the tick right on the edge and end up with 680 * restart the tick right on the edge and end up with
680 * the tick timer in the softirq ! The calling site 681 * the tick timer in the softirq ! The calling site
681 * takes care of this. 682 * takes care of this. Also used for hrtimer sleeper !
682 */ 683 */
683 debug_hrtimer_deactivate(timer); 684 debug_hrtimer_deactivate(timer);
684 return 1; 685 return 1;
@@ -1245,7 +1246,8 @@ static void __run_hrtimer(struct hrtimer *timer)
1245 timer_stats_account_hrtimer(timer); 1246 timer_stats_account_hrtimer(timer);
1246 1247
1247 fn = timer->function; 1248 fn = timer->function;
1248 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { 1249 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
1250 timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
1249 /* 1251 /*
1250 * Used for scheduler timers, avoid lock inversion with 1252 * Used for scheduler timers, avoid lock inversion with
1251 * rq->lock and tasklist_lock. 1253 * rq->lock and tasklist_lock.
@@ -1450,7 +1452,7 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1450 sl->timer.function = hrtimer_wakeup; 1452 sl->timer.function = hrtimer_wakeup;
1451 sl->task = task; 1453 sl->task = task;
1452#ifdef CONFIG_HIGH_RES_TIMERS 1454#ifdef CONFIG_HIGH_RES_TIMERS
1453 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; 1455 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
1454#endif 1456#endif
1455} 1457}
1456 1458
@@ -1589,29 +1591,95 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1589 1591
1590#ifdef CONFIG_HOTPLUG_CPU 1592#ifdef CONFIG_HOTPLUG_CPU
1591 1593
1592static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, 1594static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1593 struct hrtimer_clock_base *new_base) 1595 struct hrtimer_clock_base *new_base, int dcpu)
1594{ 1596{
1595 struct hrtimer *timer; 1597 struct hrtimer *timer;
1596 struct rb_node *node; 1598 struct rb_node *node;
1599 int raise = 0;
1597 1600
1598 while ((node = rb_first(&old_base->active))) { 1601 while ((node = rb_first(&old_base->active))) {
1599 timer = rb_entry(node, struct hrtimer, node); 1602 timer = rb_entry(node, struct hrtimer, node);
1600 BUG_ON(hrtimer_callback_running(timer)); 1603 BUG_ON(hrtimer_callback_running(timer));
1601 debug_hrtimer_deactivate(timer); 1604 debug_hrtimer_deactivate(timer);
1602 __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0); 1605
1606 /*
1607 * Should not happen. Per CPU timers should be
1608 * canceled _before_ the migration code is called
1609 */
1610 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
1611 __remove_hrtimer(timer, old_base,
1612 HRTIMER_STATE_INACTIVE, 0);
1613 WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
1614 timer, timer->function, dcpu);
1615 continue;
1616 }
1617
1618 /*
1619 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1620 * timer could be seen as !active and just vanish away
1621 * under us on another CPU
1622 */
1623 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1603 timer->base = new_base; 1624 timer->base = new_base;
1604 /* 1625 /*
1605 * Enqueue the timer. Allow reprogramming of the event device 1626 * Enqueue the timer. Allow reprogramming of the event device
1606 */ 1627 */
1607 enqueue_hrtimer(timer, new_base, 1); 1628 enqueue_hrtimer(timer, new_base, 1);
1629
1630#ifdef CONFIG_HIGH_RES_TIMERS
1631 /*
1632 * Happens with high res enabled when the timer was
1633 * already expired and the callback mode is
1634 * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
1635 * enqueue code does not move them to the soft irq
1636 * pending list for performance/latency reasons, but
1637 * in the migration state, we need to do that
1638 * otherwise we end up with a stale timer.
1639 */
1640 if (timer->state == HRTIMER_STATE_MIGRATE) {
1641 timer->state = HRTIMER_STATE_PENDING;
1642 list_add_tail(&timer->cb_entry,
1643 &new_base->cpu_base->cb_pending);
1644 raise = 1;
1645 }
1646#endif
1647 /* Clear the migration state bit */
1648 timer->state &= ~HRTIMER_STATE_MIGRATE;
1649 }
1650 return raise;
1651}
1652
1653#ifdef CONFIG_HIGH_RES_TIMERS
1654static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1655 struct hrtimer_cpu_base *new_base)
1656{
1657 struct hrtimer *timer;
1658 int raise = 0;
1659
1660 while (!list_empty(&old_base->cb_pending)) {
1661 timer = list_entry(old_base->cb_pending.next,
1662 struct hrtimer, cb_entry);
1663
1664 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
1665 timer->base = &new_base->clock_base[timer->base->index];
1666 list_add_tail(&timer->cb_entry, &new_base->cb_pending);
1667 raise = 1;
1608 } 1668 }
1669 return raise;
1670}
1671#else
1672static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1673 struct hrtimer_cpu_base *new_base)
1674{
1675 return 0;
1609} 1676}
1677#endif
1610 1678
1611static void migrate_hrtimers(int cpu) 1679static void migrate_hrtimers(int cpu)
1612{ 1680{
1613 struct hrtimer_cpu_base *old_base, *new_base; 1681 struct hrtimer_cpu_base *old_base, *new_base;
1614 int i; 1682 int i, raise = 0;
1615 1683
1616 BUG_ON(cpu_online(cpu)); 1684 BUG_ON(cpu_online(cpu));
1617 old_base = &per_cpu(hrtimer_bases, cpu); 1685 old_base = &per_cpu(hrtimer_bases, cpu);
@@ -1626,13 +1694,20 @@ static void migrate_hrtimers(int cpu)
1626 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1694 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1627 1695
1628 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1696 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1629 migrate_hrtimer_list(&old_base->clock_base[i], 1697 if (migrate_hrtimer_list(&old_base->clock_base[i],
1630 &new_base->clock_base[i]); 1698 &new_base->clock_base[i], cpu))
1699 raise = 1;
1631 } 1700 }
1632 1701
1702 if (migrate_hrtimer_pending(old_base, new_base))
1703 raise = 1;
1704
1633 spin_unlock(&old_base->lock); 1705 spin_unlock(&old_base->lock);
1634 spin_unlock_irq(&new_base->lock); 1706 spin_unlock_irq(&new_base->lock);
1635 put_cpu_var(hrtimer_bases); 1707 put_cpu_var(hrtimer_bases);
1708
1709 if (raise)
1710 hrtimer_raise_softirq();
1636} 1711}
1637#endif /* CONFIG_HOTPLUG_CPU */ 1712#endif /* CONFIG_HOTPLUG_CPU */
1638 1713