aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@linux.intel.com>2008-10-17 12:20:26 -0400
committerArjan van de Ven <arjan@linux.intel.com>2008-10-17 12:20:26 -0400
commit651dab4264e4ba0e563f5ff56f748127246e9065 (patch)
tree016630974bdcb00fe529b673f96d389e0fd6dc94 /kernel/hrtimer.c
parent40b8606253552109815786e5d4b0de98782d31f5 (diff)
parent2e532d68a2b3e2aa6b19731501222069735c741c (diff)
Merge commit 'linus/master' into merge-linus
Conflicts: arch/x86/kvm/i8254.c
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c95
1 files changed, 85 insertions, 10 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 2bd230be1cb5..51ee90bca2de 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -672,13 +672,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
672 */ 672 */
673 BUG_ON(timer->function(timer) != HRTIMER_NORESTART); 673 BUG_ON(timer->function(timer) != HRTIMER_NORESTART);
674 return 1; 674 return 1;
675 case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: 675 case HRTIMER_CB_IRQSAFE_PERCPU:
676 case HRTIMER_CB_IRQSAFE_UNLOCKED:
676 /* 677 /*
677 * This is solely for the sched tick emulation with 678 * This is solely for the sched tick emulation with
678 * dynamic tick support to ensure that we do not 679 * dynamic tick support to ensure that we do not
679 * restart the tick right on the edge and end up with 680 * restart the tick right on the edge and end up with
680 * the tick timer in the softirq ! The calling site 681 * the tick timer in the softirq ! The calling site
681 * takes care of this. 682 * takes care of this. Also used for hrtimer sleeper !
682 */ 683 */
683 debug_hrtimer_deactivate(timer); 684 debug_hrtimer_deactivate(timer);
684 return 1; 685 return 1;
@@ -1266,7 +1267,8 @@ static void __run_hrtimer(struct hrtimer *timer)
1266 timer_stats_account_hrtimer(timer); 1267 timer_stats_account_hrtimer(timer);
1267 1268
1268 fn = timer->function; 1269 fn = timer->function;
1269 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { 1270 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
1271 timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
1270 /* 1272 /*
1271 * Used for scheduler timers, avoid lock inversion with 1273 * Used for scheduler timers, avoid lock inversion with
1272 * rq->lock and tasklist_lock. 1274 * rq->lock and tasklist_lock.
@@ -1517,7 +1519,7 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1517 sl->timer.function = hrtimer_wakeup; 1519 sl->timer.function = hrtimer_wakeup;
1518 sl->task = task; 1520 sl->task = task;
1519#ifdef CONFIG_HIGH_RES_TIMERS 1521#ifdef CONFIG_HIGH_RES_TIMERS
1520 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; 1522 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
1521#endif 1523#endif
1522} 1524}
1523 1525
@@ -1661,29 +1663,95 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1661 1663
1662#ifdef CONFIG_HOTPLUG_CPU 1664#ifdef CONFIG_HOTPLUG_CPU
1663 1665
1664static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, 1666static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1665 struct hrtimer_clock_base *new_base) 1667 struct hrtimer_clock_base *new_base, int dcpu)
1666{ 1668{
1667 struct hrtimer *timer; 1669 struct hrtimer *timer;
1668 struct rb_node *node; 1670 struct rb_node *node;
1671 int raise = 0;
1669 1672
1670 while ((node = rb_first(&old_base->active))) { 1673 while ((node = rb_first(&old_base->active))) {
1671 timer = rb_entry(node, struct hrtimer, node); 1674 timer = rb_entry(node, struct hrtimer, node);
1672 BUG_ON(hrtimer_callback_running(timer)); 1675 BUG_ON(hrtimer_callback_running(timer));
1673 debug_hrtimer_deactivate(timer); 1676 debug_hrtimer_deactivate(timer);
1674 __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0); 1677
1678 /*
1679 * Should not happen. Per CPU timers should be
1680 * canceled _before_ the migration code is called
1681 */
1682 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
1683 __remove_hrtimer(timer, old_base,
1684 HRTIMER_STATE_INACTIVE, 0);
1685 WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
1686 timer, timer->function, dcpu);
1687 continue;
1688 }
1689
1690 /*
1691 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1692 * timer could be seen as !active and just vanish away
1693 * under us on another CPU
1694 */
1695 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1675 timer->base = new_base; 1696 timer->base = new_base;
1676 /* 1697 /*
1677 * Enqueue the timer. Allow reprogramming of the event device 1698 * Enqueue the timer. Allow reprogramming of the event device
1678 */ 1699 */
1679 enqueue_hrtimer(timer, new_base, 1); 1700 enqueue_hrtimer(timer, new_base, 1);
1701
1702#ifdef CONFIG_HIGH_RES_TIMERS
1703 /*
1704 * Happens with high res enabled when the timer was
1705 * already expired and the callback mode is
1706 * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
1707 * enqueue code does not move them to the soft irq
1708 * pending list for performance/latency reasons, but
1709 * in the migration state, we need to do that
1710 * otherwise we end up with a stale timer.
1711 */
1712 if (timer->state == HRTIMER_STATE_MIGRATE) {
1713 timer->state = HRTIMER_STATE_PENDING;
1714 list_add_tail(&timer->cb_entry,
1715 &new_base->cpu_base->cb_pending);
1716 raise = 1;
1717 }
1718#endif
1719 /* Clear the migration state bit */
1720 timer->state &= ~HRTIMER_STATE_MIGRATE;
1721 }
1722 return raise;
1723}
1724
1725#ifdef CONFIG_HIGH_RES_TIMERS
1726static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1727 struct hrtimer_cpu_base *new_base)
1728{
1729 struct hrtimer *timer;
1730 int raise = 0;
1731
1732 while (!list_empty(&old_base->cb_pending)) {
1733 timer = list_entry(old_base->cb_pending.next,
1734 struct hrtimer, cb_entry);
1735
1736 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
1737 timer->base = &new_base->clock_base[timer->base->index];
1738 list_add_tail(&timer->cb_entry, &new_base->cb_pending);
1739 raise = 1;
1680 } 1740 }
1741 return raise;
1742}
1743#else
1744static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1745 struct hrtimer_cpu_base *new_base)
1746{
1747 return 0;
1681} 1748}
1749#endif
1682 1750
1683static void migrate_hrtimers(int cpu) 1751static void migrate_hrtimers(int cpu)
1684{ 1752{
1685 struct hrtimer_cpu_base *old_base, *new_base; 1753 struct hrtimer_cpu_base *old_base, *new_base;
1686 int i; 1754 int i, raise = 0;
1687 1755
1688 BUG_ON(cpu_online(cpu)); 1756 BUG_ON(cpu_online(cpu));
1689 old_base = &per_cpu(hrtimer_bases, cpu); 1757 old_base = &per_cpu(hrtimer_bases, cpu);
@@ -1696,14 +1764,21 @@ static void migrate_hrtimers(int cpu)
1696 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1764 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1697 1765
1698 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1766 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1699 migrate_hrtimer_list(&old_base->clock_base[i], 1767 if (migrate_hrtimer_list(&old_base->clock_base[i],
1700 &new_base->clock_base[i]); 1768 &new_base->clock_base[i], cpu))
1769 raise = 1;
1701 } 1770 }
1702 1771
1772 if (migrate_hrtimer_pending(old_base, new_base))
1773 raise = 1;
1774
1703 spin_unlock(&old_base->lock); 1775 spin_unlock(&old_base->lock);
1704 spin_unlock(&new_base->lock); 1776 spin_unlock(&new_base->lock);
1705 local_irq_enable(); 1777 local_irq_enable();
1706 put_cpu_var(hrtimer_bases); 1778 put_cpu_var(hrtimer_bases);
1779
1780 if (raise)
1781 hrtimer_raise_softirq();
1707} 1782}
1708#endif /* CONFIG_HOTPLUG_CPU */ 1783#endif /* CONFIG_HOTPLUG_CPU */
1709 1784