aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c143
1 files changed, 70 insertions, 73 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 61cb933395ba..77aa33bb877c 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -32,7 +32,6 @@
32 */ 32 */
33 33
34#include <linux/cpu.h> 34#include <linux/cpu.h>
35#include <linux/irq.h>
36#include <linux/module.h> 35#include <linux/module.h>
37#include <linux/percpu.h> 36#include <linux/percpu.h>
38#include <linux/hrtimer.h> 37#include <linux/hrtimer.h>
@@ -635,7 +634,6 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
635{ 634{
636} 635}
637 636
638static void __run_hrtimer(struct hrtimer *timer);
639 637
640/* 638/*
641 * When High resolution timers are active, try to reprogram. Note, that in case 639 * When High resolution timers are active, try to reprogram. Note, that in case
@@ -647,13 +645,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
647 struct hrtimer_clock_base *base) 645 struct hrtimer_clock_base *base)
648{ 646{
649 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 647 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
650 /* 648 spin_unlock(&base->cpu_base->lock);
651 * XXX: recursion check? 649 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
652 * hrtimer_forward() should round up with timer granularity 650 spin_lock(&base->cpu_base->lock);
653 * so that we never get into inf recursion here,
654 * it doesn't do that though
655 */
656 __run_hrtimer(timer);
657 return 1; 651 return 1;
658 } 652 }
659 return 0; 653 return 0;
@@ -706,11 +700,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
706} 700}
707static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } 701static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
708static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } 702static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
709static inline int hrtimer_reprogram(struct hrtimer *timer,
710 struct hrtimer_clock_base *base)
711{
712 return 0;
713}
714 703
715#endif /* CONFIG_HIGH_RES_TIMERS */ 704#endif /* CONFIG_HIGH_RES_TIMERS */
716 705
@@ -781,9 +770,11 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
781 * 770 *
782 * The timer is inserted in expiry order. Insertion into the 771 * The timer is inserted in expiry order. Insertion into the
783 * red black tree is O(log(n)). Must hold the base lock. 772 * red black tree is O(log(n)). Must hold the base lock.
773 *
774 * Returns 1 when the new timer is the leftmost timer in the tree.
784 */ 775 */
785static void enqueue_hrtimer(struct hrtimer *timer, 776static int enqueue_hrtimer(struct hrtimer *timer,
786 struct hrtimer_clock_base *base, int reprogram) 777 struct hrtimer_clock_base *base)
787{ 778{
788 struct rb_node **link = &base->active.rb_node; 779 struct rb_node **link = &base->active.rb_node;
789 struct rb_node *parent = NULL; 780 struct rb_node *parent = NULL;
@@ -815,20 +806,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
815 * Insert the timer to the rbtree and check whether it 806 * Insert the timer to the rbtree and check whether it
816 * replaces the first pending timer 807 * replaces the first pending timer
817 */ 808 */
818 if (leftmost) { 809 if (leftmost)
819 /*
820 * Reprogram the clock event device. When the timer is already
821 * expired hrtimer_enqueue_reprogram has either called the
822 * callback or added it to the pending list and raised the
823 * softirq.
824 *
825 * This is a NOP for !HIGHRES
826 */
827 if (reprogram && hrtimer_enqueue_reprogram(timer, base))
828 return;
829
830 base->first = &timer->node; 810 base->first = &timer->node;
831 }
832 811
833 rb_link_node(&timer->node, parent, link); 812 rb_link_node(&timer->node, parent, link);
834 rb_insert_color(&timer->node, &base->active); 813 rb_insert_color(&timer->node, &base->active);
@@ -837,6 +816,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
837 * state of a possibly running callback. 816 * state of a possibly running callback.
838 */ 817 */
839 timer->state |= HRTIMER_STATE_ENQUEUED; 818 timer->state |= HRTIMER_STATE_ENQUEUED;
819
820 return leftmost;
840} 821}
841 822
842/* 823/*
@@ -913,7 +894,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
913{ 894{
914 struct hrtimer_clock_base *base, *new_base; 895 struct hrtimer_clock_base *base, *new_base;
915 unsigned long flags; 896 unsigned long flags;
916 int ret; 897 int ret, leftmost;
917 898
918 base = lock_hrtimer_base(timer, &flags); 899 base = lock_hrtimer_base(timer, &flags);
919 900
@@ -941,12 +922,16 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
941 922
942 timer_stats_hrtimer_set_start_info(timer); 923 timer_stats_hrtimer_set_start_info(timer);
943 924
925 leftmost = enqueue_hrtimer(timer, new_base);
926
944 /* 927 /*
945 * Only allow reprogramming if the new base is on this CPU. 928 * Only allow reprogramming if the new base is on this CPU.
946 * (it might still be on another CPU if the timer was pending) 929 * (it might still be on another CPU if the timer was pending)
930 *
931 * XXX send_remote_softirq() ?
947 */ 932 */
948 enqueue_hrtimer(timer, new_base, 933 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
949 new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); 934 hrtimer_enqueue_reprogram(timer, new_base);
950 935
951 unlock_hrtimer_base(timer, &flags); 936 unlock_hrtimer_base(timer, &flags);
952 937
@@ -1158,13 +1143,13 @@ static void __run_hrtimer(struct hrtimer *timer)
1158 spin_lock(&cpu_base->lock); 1143 spin_lock(&cpu_base->lock);
1159 1144
1160 /* 1145 /*
1161 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid 1146 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
1162 * reprogramming of the event hardware. This happens at the end of this 1147 * we do not reprogramm the event hardware. Happens either in
1163 * function anyway. 1148 * hrtimer_start_range_ns() or in hrtimer_interrupt()
1164 */ 1149 */
1165 if (restart != HRTIMER_NORESTART) { 1150 if (restart != HRTIMER_NORESTART) {
1166 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); 1151 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1167 enqueue_hrtimer(timer, base, 0); 1152 enqueue_hrtimer(timer, base);
1168 } 1153 }
1169 timer->state &= ~HRTIMER_STATE_CALLBACK; 1154 timer->state &= ~HRTIMER_STATE_CALLBACK;
1170} 1155}
@@ -1272,6 +1257,22 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1272 } 1257 }
1273} 1258}
1274 1259
1260/*
1261 * local version of hrtimer_peek_ahead_timers() called with interrupts
1262 * disabled.
1263 */
1264static void __hrtimer_peek_ahead_timers(void)
1265{
1266 struct tick_device *td;
1267
1268 if (!hrtimer_hres_active())
1269 return;
1270
1271 td = &__get_cpu_var(tick_cpu_device);
1272 if (td && td->evtdev)
1273 hrtimer_interrupt(td->evtdev);
1274}
1275
1275/** 1276/**
1276 * hrtimer_peek_ahead_timers -- run soft-expired timers now 1277 * hrtimer_peek_ahead_timers -- run soft-expired timers now
1277 * 1278 *
@@ -1283,20 +1284,23 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1283 */ 1284 */
1284void hrtimer_peek_ahead_timers(void) 1285void hrtimer_peek_ahead_timers(void)
1285{ 1286{
1286 struct tick_device *td;
1287 unsigned long flags; 1287 unsigned long flags;
1288 1288
1289 if (!hrtimer_hres_active())
1290 return;
1291
1292 local_irq_save(flags); 1289 local_irq_save(flags);
1293 td = &__get_cpu_var(tick_cpu_device); 1290 __hrtimer_peek_ahead_timers();
1294 if (td && td->evtdev)
1295 hrtimer_interrupt(td->evtdev);
1296 local_irq_restore(flags); 1291 local_irq_restore(flags);
1297} 1292}
1298 1293
1299#endif /* CONFIG_HIGH_RES_TIMERS */ 1294static void run_hrtimer_softirq(struct softirq_action *h)
1295{
1296 hrtimer_peek_ahead_timers();
1297}
1298
1299#else /* CONFIG_HIGH_RES_TIMERS */
1300
1301static inline void __hrtimer_peek_ahead_timers(void) { }
1302
1303#endif /* !CONFIG_HIGH_RES_TIMERS */
1300 1304
1301/* 1305/*
1302 * Called from timer softirq every jiffy, expire hrtimers: 1306 * Called from timer softirq every jiffy, expire hrtimers:
@@ -1542,39 +1546,36 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1542 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); 1546 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1543 timer->base = new_base; 1547 timer->base = new_base;
1544 /* 1548 /*
1545 * Enqueue the timers on the new cpu, but do not reprogram 1549 * Enqueue the timers on the new cpu. This does not
1546 * the timer as that would enable a deadlock between 1550 * reprogram the event device in case the timer
1547 * hrtimer_enqueue_reprogramm() running the timer and us still 1551 * expires before the earliest on this CPU, but we run
1548 * holding a nested base lock. 1552 * hrtimer_interrupt after we migrated everything to
1549 * 1553 * sort out already expired timers and reprogram the
1550 * Instead we tickle the hrtimer interrupt after the migration 1554 * event device.
1551 * is done, which will run all expired timers and re-programm
1552 * the timer device.
1553 */ 1555 */
1554 enqueue_hrtimer(timer, new_base, 0); 1556 enqueue_hrtimer(timer, new_base);
1555 1557
1556 /* Clear the migration state bit */ 1558 /* Clear the migration state bit */
1557 timer->state &= ~HRTIMER_STATE_MIGRATE; 1559 timer->state &= ~HRTIMER_STATE_MIGRATE;
1558 } 1560 }
1559} 1561}
1560 1562
1561static int migrate_hrtimers(int scpu) 1563static void migrate_hrtimers(int scpu)
1562{ 1564{
1563 struct hrtimer_cpu_base *old_base, *new_base; 1565 struct hrtimer_cpu_base *old_base, *new_base;
1564 int dcpu, i; 1566 int i;
1565 1567
1566 BUG_ON(cpu_online(scpu)); 1568 BUG_ON(cpu_online(scpu));
1567 old_base = &per_cpu(hrtimer_bases, scpu);
1568 new_base = &get_cpu_var(hrtimer_bases);
1569
1570 dcpu = smp_processor_id();
1571
1572 tick_cancel_sched_timer(scpu); 1569 tick_cancel_sched_timer(scpu);
1570
1571 local_irq_disable();
1572 old_base = &per_cpu(hrtimer_bases, scpu);
1573 new_base = &__get_cpu_var(hrtimer_bases);
1573 /* 1574 /*
1574 * The caller is globally serialized and nobody else 1575 * The caller is globally serialized and nobody else
1575 * takes two locks at once, deadlock is not possible. 1576 * takes two locks at once, deadlock is not possible.
1576 */ 1577 */
1577 spin_lock_irq(&new_base->lock); 1578 spin_lock(&new_base->lock);
1578 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1579 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1579 1580
1580 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1581 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
@@ -1583,15 +1584,11 @@ static int migrate_hrtimers(int scpu)
1583 } 1584 }
1584 1585
1585 spin_unlock(&old_base->lock); 1586 spin_unlock(&old_base->lock);
1586 spin_unlock_irq(&new_base->lock); 1587 spin_unlock(&new_base->lock);
1587 put_cpu_var(hrtimer_bases);
1588 1588
1589 return dcpu; 1589 /* Check, if we got expired work to do */
1590} 1590 __hrtimer_peek_ahead_timers();
1591 1591 local_irq_enable();
1592static void tickle_timers(void *arg)
1593{
1594 hrtimer_peek_ahead_timers();
1595} 1592}
1596 1593
1597#endif /* CONFIG_HOTPLUG_CPU */ 1594#endif /* CONFIG_HOTPLUG_CPU */
@@ -1616,11 +1613,8 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1616 case CPU_DEAD: 1613 case CPU_DEAD:
1617 case CPU_DEAD_FROZEN: 1614 case CPU_DEAD_FROZEN:
1618 { 1615 {
1619 int dcpu;
1620
1621 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu); 1616 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1622 dcpu = migrate_hrtimers(scpu); 1617 migrate_hrtimers(scpu);
1623 smp_call_function_single(dcpu, tickle_timers, NULL, 0);
1624 break; 1618 break;
1625 } 1619 }
1626#endif 1620#endif
@@ -1641,6 +1635,9 @@ void __init hrtimers_init(void)
1641 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, 1635 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1642 (void *)(long)smp_processor_id()); 1636 (void *)(long)smp_processor_id());
1643 register_cpu_notifier(&hrtimers_nb); 1637 register_cpu_notifier(&hrtimers_nb);
1638#ifdef CONFIG_HIGH_RES_TIMERS
1639 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1640#endif
1644} 1641}
1645 1642
1646/** 1643/**