aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c110
1 files changed, 92 insertions, 18 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index b8e4dce80a7..95978f48e03 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -672,13 +672,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
672 */ 672 */
673 BUG_ON(timer->function(timer) != HRTIMER_NORESTART); 673 BUG_ON(timer->function(timer) != HRTIMER_NORESTART);
674 return 1; 674 return 1;
675 case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: 675 case HRTIMER_CB_IRQSAFE_PERCPU:
676 case HRTIMER_CB_IRQSAFE_UNLOCKED:
676 /* 677 /*
677 * This is solely for the sched tick emulation with 678 * This is solely for the sched tick emulation with
678 * dynamic tick support to ensure that we do not 679 * dynamic tick support to ensure that we do not
679 * restart the tick right on the edge and end up with 680 * restart the tick right on the edge and end up with
680 * the tick timer in the softirq ! The calling site 681 * the tick timer in the softirq ! The calling site
681 * takes care of this. 682 * takes care of this. Also used for hrtimer sleeper !
682 */ 683 */
683 debug_hrtimer_deactivate(timer); 684 debug_hrtimer_deactivate(timer);
684 return 1; 685 return 1;
@@ -1245,7 +1246,8 @@ static void __run_hrtimer(struct hrtimer *timer)
1245 timer_stats_account_hrtimer(timer); 1246 timer_stats_account_hrtimer(timer);
1246 1247
1247 fn = timer->function; 1248 fn = timer->function;
1248 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { 1249 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
1250 timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
1249 /* 1251 /*
1250 * Used for scheduler timers, avoid lock inversion with 1252 * Used for scheduler timers, avoid lock inversion with
1251 * rq->lock and tasklist_lock. 1253 * rq->lock and tasklist_lock.
@@ -1401,9 +1403,7 @@ void hrtimer_run_queues(void)
1401 if (!base->first) 1403 if (!base->first)
1402 continue; 1404 continue;
1403 1405
1404 if (base->get_softirq_time) 1406 if (gettime) {
1405 base->softirq_time = base->get_softirq_time();
1406 else if (gettime) {
1407 hrtimer_get_softirq_time(cpu_base); 1407 hrtimer_get_softirq_time(cpu_base);
1408 gettime = 0; 1408 gettime = 0;
1409 } 1409 }
@@ -1452,7 +1452,7 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1452 sl->timer.function = hrtimer_wakeup; 1452 sl->timer.function = hrtimer_wakeup;
1453 sl->task = task; 1453 sl->task = task;
1454#ifdef CONFIG_HIGH_RES_TIMERS 1454#ifdef CONFIG_HIGH_RES_TIMERS
1455 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; 1455 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
1456#endif 1456#endif
1457} 1457}
1458 1458
@@ -1591,49 +1591,123 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1591 1591
1592#ifdef CONFIG_HOTPLUG_CPU 1592#ifdef CONFIG_HOTPLUG_CPU
1593 1593
1594static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, 1594static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1595 struct hrtimer_clock_base *new_base) 1595 struct hrtimer_clock_base *new_base, int dcpu)
1596{ 1596{
1597 struct hrtimer *timer; 1597 struct hrtimer *timer;
1598 struct rb_node *node; 1598 struct rb_node *node;
1599 int raise = 0;
1599 1600
1600 while ((node = rb_first(&old_base->active))) { 1601 while ((node = rb_first(&old_base->active))) {
1601 timer = rb_entry(node, struct hrtimer, node); 1602 timer = rb_entry(node, struct hrtimer, node);
1602 BUG_ON(hrtimer_callback_running(timer)); 1603 BUG_ON(hrtimer_callback_running(timer));
1603 debug_hrtimer_deactivate(timer); 1604 debug_hrtimer_deactivate(timer);
1604 __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0); 1605
1606 /*
1607 * Should not happen. Per CPU timers should be
1608 * canceled _before_ the migration code is called
1609 */
1610 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
1611 __remove_hrtimer(timer, old_base,
1612 HRTIMER_STATE_INACTIVE, 0);
1613 WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
1614 timer, timer->function, dcpu);
1615 continue;
1616 }
1617
1618 /*
1619 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1620 * timer could be seen as !active and just vanish away
1621 * under us on another CPU
1622 */
1623 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1605 timer->base = new_base; 1624 timer->base = new_base;
1606 /* 1625 /*
1607 * Enqueue the timer. Allow reprogramming of the event device 1626 * Enqueue the timer. Allow reprogramming of the event device
1608 */ 1627 */
1609 enqueue_hrtimer(timer, new_base, 1); 1628 enqueue_hrtimer(timer, new_base, 1);
1629
1630#ifdef CONFIG_HIGH_RES_TIMERS
1631 /*
1632 * Happens with high res enabled when the timer was
1633 * already expired and the callback mode is
1634 * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
1635 * enqueue code does not move them to the soft irq
1636 * pending list for performance/latency reasons, but
1637 * in the migration state, we need to do that
1638 * otherwise we end up with a stale timer.
1639 */
1640 if (timer->state == HRTIMER_STATE_MIGRATE) {
1641 timer->state = HRTIMER_STATE_PENDING;
1642 list_add_tail(&timer->cb_entry,
1643 &new_base->cpu_base->cb_pending);
1644 raise = 1;
1645 }
1646#endif
1647 /* Clear the migration state bit */
1648 timer->state &= ~HRTIMER_STATE_MIGRATE;
1610 } 1649 }
1650 return raise;
1611} 1651}
1612 1652
1653#ifdef CONFIG_HIGH_RES_TIMERS
1654static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1655 struct hrtimer_cpu_base *new_base)
1656{
1657 struct hrtimer *timer;
1658 int raise = 0;
1659
1660 while (!list_empty(&old_base->cb_pending)) {
1661 timer = list_entry(old_base->cb_pending.next,
1662 struct hrtimer, cb_entry);
1663
1664 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
1665 timer->base = &new_base->clock_base[timer->base->index];
1666 list_add_tail(&timer->cb_entry, &new_base->cb_pending);
1667 raise = 1;
1668 }
1669 return raise;
1670}
1671#else
1672static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1673 struct hrtimer_cpu_base *new_base)
1674{
1675 return 0;
1676}
1677#endif
1678
1613static void migrate_hrtimers(int cpu) 1679static void migrate_hrtimers(int cpu)
1614{ 1680{
1615 struct hrtimer_cpu_base *old_base, *new_base; 1681 struct hrtimer_cpu_base *old_base, *new_base;
1616 int i; 1682 int i, raise = 0;
1617 1683
1618 BUG_ON(cpu_online(cpu)); 1684 BUG_ON(cpu_online(cpu));
1619 old_base = &per_cpu(hrtimer_bases, cpu); 1685 old_base = &per_cpu(hrtimer_bases, cpu);
1620 new_base = &get_cpu_var(hrtimer_bases); 1686 new_base = &get_cpu_var(hrtimer_bases);
1621 1687
1622 tick_cancel_sched_timer(cpu); 1688 tick_cancel_sched_timer(cpu);
1623 1689 /*
1624 local_irq_disable(); 1690 * The caller is globally serialized and nobody else
1625 spin_lock(&new_base->lock); 1691 * takes two locks at once, deadlock is not possible.
1692 */
1693 spin_lock_irq(&new_base->lock);
1626 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1694 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1627 1695
1628 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1696 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1629 migrate_hrtimer_list(&old_base->clock_base[i], 1697 if (migrate_hrtimer_list(&old_base->clock_base[i],
1630 &new_base->clock_base[i]); 1698 &new_base->clock_base[i], cpu))
1699 raise = 1;
1631 } 1700 }
1632 1701
1702 if (migrate_hrtimer_pending(old_base, new_base))
1703 raise = 1;
1704
1633 spin_unlock(&old_base->lock); 1705 spin_unlock(&old_base->lock);
1634 spin_unlock(&new_base->lock); 1706 spin_unlock_irq(&new_base->lock);
1635 local_irq_enable();
1636 put_cpu_var(hrtimer_bases); 1707 put_cpu_var(hrtimer_bases);
1708
1709 if (raise)
1710 hrtimer_raise_softirq();
1637} 1711}
1638#endif /* CONFIG_HOTPLUG_CPU */ 1712#endif /* CONFIG_HOTPLUG_CPU */
1639 1713