aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 20:10:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 20:10:53 -0500
commit3610639d1fceb09cb418c65fcbe9136c31eee03a (patch)
tree78aa6de9e9495c39f8671aed927fece5adff8d24 /kernel
parentcfa97f993c275d193fe82c22511dfb5f1e51b661 (diff)
parent82c5b7b527ccc4b5d3cf832437e842f9d2920a79 (diff)
Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: hrtimer: splitout peek ahead functionality, fix hrtimer: fixup comments hrtimer: fix recursion deadlock by re-introducing the softirq hrtimer: simplify hotplug migration hrtimer: fix HOTPLUG_CPU=n compile warning hrtimer: splitout peek ahead functionality
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c142
1 files changed, 70 insertions, 72 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index eb2bfefa6dcc..1455b7651b6b 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -634,7 +634,6 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
634{ 634{
635} 635}
636 636
637static void __run_hrtimer(struct hrtimer *timer);
638 637
639/* 638/*
640 * When High resolution timers are active, try to reprogram. Note, that in case 639 * When High resolution timers are active, try to reprogram. Note, that in case
@@ -646,13 +645,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
646 struct hrtimer_clock_base *base) 645 struct hrtimer_clock_base *base)
647{ 646{
648 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 647 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
649 /* 648 spin_unlock(&base->cpu_base->lock);
650 * XXX: recursion check? 649 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
651 * hrtimer_forward() should round up with timer granularity 650 spin_lock(&base->cpu_base->lock);
652 * so that we never get into inf recursion here,
653 * it doesn't do that though
654 */
655 __run_hrtimer(timer);
656 return 1; 651 return 1;
657 } 652 }
658 return 0; 653 return 0;
@@ -705,11 +700,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
705} 700}
706static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } 701static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
707static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } 702static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
708static inline int hrtimer_reprogram(struct hrtimer *timer,
709 struct hrtimer_clock_base *base)
710{
711 return 0;
712}
713 703
714#endif /* CONFIG_HIGH_RES_TIMERS */ 704#endif /* CONFIG_HIGH_RES_TIMERS */
715 705
@@ -780,9 +770,11 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
780 * 770 *
781 * The timer is inserted in expiry order. Insertion into the 771 * The timer is inserted in expiry order. Insertion into the
782 * red black tree is O(log(n)). Must hold the base lock. 772 * red black tree is O(log(n)). Must hold the base lock.
773 *
774 * Returns 1 when the new timer is the leftmost timer in the tree.
783 */ 775 */
784static void enqueue_hrtimer(struct hrtimer *timer, 776static int enqueue_hrtimer(struct hrtimer *timer,
785 struct hrtimer_clock_base *base, int reprogram) 777 struct hrtimer_clock_base *base)
786{ 778{
787 struct rb_node **link = &base->active.rb_node; 779 struct rb_node **link = &base->active.rb_node;
788 struct rb_node *parent = NULL; 780 struct rb_node *parent = NULL;
@@ -814,20 +806,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
814 * Insert the timer to the rbtree and check whether it 806 * Insert the timer to the rbtree and check whether it
815 * replaces the first pending timer 807 * replaces the first pending timer
816 */ 808 */
817 if (leftmost) { 809 if (leftmost)
818 /*
819 * Reprogram the clock event device. When the timer is already
820 * expired hrtimer_enqueue_reprogram has either called the
821 * callback or added it to the pending list and raised the
822 * softirq.
823 *
824 * This is a NOP for !HIGHRES
825 */
826 if (reprogram && hrtimer_enqueue_reprogram(timer, base))
827 return;
828
829 base->first = &timer->node; 810 base->first = &timer->node;
830 }
831 811
832 rb_link_node(&timer->node, parent, link); 812 rb_link_node(&timer->node, parent, link);
833 rb_insert_color(&timer->node, &base->active); 813 rb_insert_color(&timer->node, &base->active);
@@ -836,6 +816,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
836 * state of a possibly running callback. 816 * state of a possibly running callback.
837 */ 817 */
838 timer->state |= HRTIMER_STATE_ENQUEUED; 818 timer->state |= HRTIMER_STATE_ENQUEUED;
819
820 return leftmost;
839} 821}
840 822
841/* 823/*
@@ -912,7 +894,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
912{ 894{
913 struct hrtimer_clock_base *base, *new_base; 895 struct hrtimer_clock_base *base, *new_base;
914 unsigned long flags; 896 unsigned long flags;
915 int ret; 897 int ret, leftmost;
916 898
917 base = lock_hrtimer_base(timer, &flags); 899 base = lock_hrtimer_base(timer, &flags);
918 900
@@ -940,12 +922,16 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
940 922
941 timer_stats_hrtimer_set_start_info(timer); 923 timer_stats_hrtimer_set_start_info(timer);
942 924
925 leftmost = enqueue_hrtimer(timer, new_base);
926
943 /* 927 /*
944 * Only allow reprogramming if the new base is on this CPU. 928 * Only allow reprogramming if the new base is on this CPU.
945 * (it might still be on another CPU if the timer was pending) 929 * (it might still be on another CPU if the timer was pending)
930 *
931 * XXX send_remote_softirq() ?
946 */ 932 */
947 enqueue_hrtimer(timer, new_base, 933 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
948 new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); 934 hrtimer_enqueue_reprogram(timer, new_base);
949 935
950 unlock_hrtimer_base(timer, &flags); 936 unlock_hrtimer_base(timer, &flags);
951 937
@@ -1157,13 +1143,13 @@ static void __run_hrtimer(struct hrtimer *timer)
1157 spin_lock(&cpu_base->lock); 1143 spin_lock(&cpu_base->lock);
1158 1144
1159 /* 1145 /*
1160 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid 1146 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
1161 * reprogramming of the event hardware. This happens at the end of this 1147 * we do not reprogramm the event hardware. Happens either in
1162 * function anyway. 1148 * hrtimer_start_range_ns() or in hrtimer_interrupt()
1163 */ 1149 */
1164 if (restart != HRTIMER_NORESTART) { 1150 if (restart != HRTIMER_NORESTART) {
1165 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); 1151 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1166 enqueue_hrtimer(timer, base, 0); 1152 enqueue_hrtimer(timer, base);
1167 } 1153 }
1168 timer->state &= ~HRTIMER_STATE_CALLBACK; 1154 timer->state &= ~HRTIMER_STATE_CALLBACK;
1169} 1155}
@@ -1243,6 +1229,22 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1243 } 1229 }
1244} 1230}
1245 1231
1232/*
1233 * local version of hrtimer_peek_ahead_timers() called with interrupts
1234 * disabled.
1235 */
1236static void __hrtimer_peek_ahead_timers(void)
1237{
1238 struct tick_device *td;
1239
1240 if (!hrtimer_hres_active())
1241 return;
1242
1243 td = &__get_cpu_var(tick_cpu_device);
1244 if (td && td->evtdev)
1245 hrtimer_interrupt(td->evtdev);
1246}
1247
1246/** 1248/**
1247 * hrtimer_peek_ahead_timers -- run soft-expired timers now 1249 * hrtimer_peek_ahead_timers -- run soft-expired timers now
1248 * 1250 *
@@ -1254,20 +1256,23 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1254 */ 1256 */
1255void hrtimer_peek_ahead_timers(void) 1257void hrtimer_peek_ahead_timers(void)
1256{ 1258{
1257 struct tick_device *td;
1258 unsigned long flags; 1259 unsigned long flags;
1259 1260
1260 if (!hrtimer_hres_active())
1261 return;
1262
1263 local_irq_save(flags); 1261 local_irq_save(flags);
1264 td = &__get_cpu_var(tick_cpu_device); 1262 __hrtimer_peek_ahead_timers();
1265 if (td && td->evtdev)
1266 hrtimer_interrupt(td->evtdev);
1267 local_irq_restore(flags); 1263 local_irq_restore(flags);
1268} 1264}
1269 1265
1270#endif /* CONFIG_HIGH_RES_TIMERS */ 1266static void run_hrtimer_softirq(struct softirq_action *h)
1267{
1268 hrtimer_peek_ahead_timers();
1269}
1270
1271#else /* CONFIG_HIGH_RES_TIMERS */
1272
1273static inline void __hrtimer_peek_ahead_timers(void) { }
1274
1275#endif /* !CONFIG_HIGH_RES_TIMERS */
1271 1276
1272/* 1277/*
1273 * Called from timer softirq every jiffy, expire hrtimers: 1278 * Called from timer softirq every jiffy, expire hrtimers:
@@ -1513,39 +1518,36 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1513 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); 1518 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1514 timer->base = new_base; 1519 timer->base = new_base;
1515 /* 1520 /*
1516 * Enqueue the timers on the new cpu, but do not reprogram 1521 * Enqueue the timers on the new cpu. This does not
1517 * the timer as that would enable a deadlock between 1522 * reprogram the event device in case the timer
1518 * hrtimer_enqueue_reprogramm() running the timer and us still 1523 * expires before the earliest on this CPU, but we run
1519 * holding a nested base lock. 1524 * hrtimer_interrupt after we migrated everything to
1520 * 1525 * sort out already expired timers and reprogram the
1521 * Instead we tickle the hrtimer interrupt after the migration 1526 * event device.
1522 * is done, which will run all expired timers and re-programm
1523 * the timer device.
1524 */ 1527 */
1525 enqueue_hrtimer(timer, new_base, 0); 1528 enqueue_hrtimer(timer, new_base);
1526 1529
1527 /* Clear the migration state bit */ 1530 /* Clear the migration state bit */
1528 timer->state &= ~HRTIMER_STATE_MIGRATE; 1531 timer->state &= ~HRTIMER_STATE_MIGRATE;
1529 } 1532 }
1530} 1533}
1531 1534
1532static int migrate_hrtimers(int scpu) 1535static void migrate_hrtimers(int scpu)
1533{ 1536{
1534 struct hrtimer_cpu_base *old_base, *new_base; 1537 struct hrtimer_cpu_base *old_base, *new_base;
1535 int dcpu, i; 1538 int i;
1536 1539
1537 BUG_ON(cpu_online(scpu)); 1540 BUG_ON(cpu_online(scpu));
1538 old_base = &per_cpu(hrtimer_bases, scpu);
1539 new_base = &get_cpu_var(hrtimer_bases);
1540
1541 dcpu = smp_processor_id();
1542
1543 tick_cancel_sched_timer(scpu); 1541 tick_cancel_sched_timer(scpu);
1542
1543 local_irq_disable();
1544 old_base = &per_cpu(hrtimer_bases, scpu);
1545 new_base = &__get_cpu_var(hrtimer_bases);
1544 /* 1546 /*
1545 * The caller is globally serialized and nobody else 1547 * The caller is globally serialized and nobody else
1546 * takes two locks at once, deadlock is not possible. 1548 * takes two locks at once, deadlock is not possible.
1547 */ 1549 */
1548 spin_lock_irq(&new_base->lock); 1550 spin_lock(&new_base->lock);
1549 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1551 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1550 1552
1551 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1553 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
@@ -1554,15 +1556,11 @@ static int migrate_hrtimers(int scpu)
1554 } 1556 }
1555 1557
1556 spin_unlock(&old_base->lock); 1558 spin_unlock(&old_base->lock);
1557 spin_unlock_irq(&new_base->lock); 1559 spin_unlock(&new_base->lock);
1558 put_cpu_var(hrtimer_bases);
1559 1560
1560 return dcpu; 1561 /* Check, if we got expired work to do */
1561} 1562 __hrtimer_peek_ahead_timers();
1562 1563 local_irq_enable();
1563static void tickle_timers(void *arg)
1564{
1565 hrtimer_peek_ahead_timers();
1566} 1564}
1567 1565
1568#endif /* CONFIG_HOTPLUG_CPU */ 1566#endif /* CONFIG_HOTPLUG_CPU */
@@ -1583,11 +1581,8 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1583 case CPU_DEAD: 1581 case CPU_DEAD:
1584 case CPU_DEAD_FROZEN: 1582 case CPU_DEAD_FROZEN:
1585 { 1583 {
1586 int dcpu;
1587
1588 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu); 1584 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1589 dcpu = migrate_hrtimers(scpu); 1585 migrate_hrtimers(scpu);
1590 smp_call_function_single(dcpu, tickle_timers, NULL, 0);
1591 break; 1586 break;
1592 } 1587 }
1593#endif 1588#endif
@@ -1608,6 +1603,9 @@ void __init hrtimers_init(void)
1608 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, 1603 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1609 (void *)(long)smp_processor_id()); 1604 (void *)(long)smp_processor_id());
1610 register_cpu_notifier(&hrtimers_nb); 1605 register_cpu_notifier(&hrtimers_nb);
1606#ifdef CONFIG_HIGH_RES_TIMERS
1607 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1608#endif
1611} 1609}
1612 1610
1613/** 1611/**