aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-01-05 05:28:22 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-05 07:14:33 -0500
commita6037b61c2f5fc99c57c15b26d7cfa58bbb34008 (patch)
treeb076de5cc8a75e4227f96dda31a0daa6e895579b /kernel/hrtimer.c
parent731a55ba0f17064f85903b7bf8e24849ec6cfa20 (diff)
hrtimer: fix recursion deadlock by re-introducing the softirq
Impact: fix rare runtime deadlock There are a few sites that do: spin_lock_irq(&foo) hrtimer_start(&bar) __run_hrtimer(&bar) func() spin_lock(&foo) which obviously deadlocks. In order to avoid this, never call __run_hrtimer() from hrtimer_start*() context, but instead defer this to softirq context. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c60
1 files changed, 27 insertions, 33 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 8010a67cead..b68e98f4e4c 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -634,7 +634,6 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
634{ 634{
635} 635}
636 636
637static void __run_hrtimer(struct hrtimer *timer);
638 637
639/* 638/*
640 * When High resolution timers are active, try to reprogram. Note, that in case 639 * When High resolution timers are active, try to reprogram. Note, that in case
@@ -646,13 +645,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
646 struct hrtimer_clock_base *base) 645 struct hrtimer_clock_base *base)
647{ 646{
648 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 647 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
649 /* 648 spin_unlock(&base->cpu_base->lock);
650 * XXX: recursion check? 649 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
651 * hrtimer_forward() should round up with timer granularity 650 spin_lock(&base->cpu_base->lock);
652 * so that we never get into inf recursion here,
653 * it doesn't do that though
654 */
655 __run_hrtimer(timer);
656 return 1; 651 return 1;
657 } 652 }
658 return 0; 653 return 0;
@@ -705,11 +700,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
705} 700}
706static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } 701static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
707static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } 702static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
708static inline int hrtimer_reprogram(struct hrtimer *timer,
709 struct hrtimer_clock_base *base)
710{
711 return 0;
712}
713 703
714#endif /* CONFIG_HIGH_RES_TIMERS */ 704#endif /* CONFIG_HIGH_RES_TIMERS */
715 705
@@ -780,9 +770,11 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
780 * 770 *
781 * The timer is inserted in expiry order. Insertion into the 771 * The timer is inserted in expiry order. Insertion into the
782 * red black tree is O(log(n)). Must hold the base lock. 772 * red black tree is O(log(n)). Must hold the base lock.
773 *
774 * Returns 1 when the new timer is the leftmost timer in the tree.
783 */ 775 */
784static void enqueue_hrtimer(struct hrtimer *timer, 776static int enqueue_hrtimer(struct hrtimer *timer,
785 struct hrtimer_clock_base *base, int reprogram) 777 struct hrtimer_clock_base *base)
786{ 778{
787 struct rb_node **link = &base->active.rb_node; 779 struct rb_node **link = &base->active.rb_node;
788 struct rb_node *parent = NULL; 780 struct rb_node *parent = NULL;
@@ -814,20 +806,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
814 * Insert the timer to the rbtree and check whether it 806 * Insert the timer to the rbtree and check whether it
815 * replaces the first pending timer 807 * replaces the first pending timer
816 */ 808 */
817 if (leftmost) { 809 if (leftmost)
818 /*
819 * Reprogram the clock event device. When the timer is already
820 * expired hrtimer_enqueue_reprogram has either called the
821 * callback or added it to the pending list and raised the
822 * softirq.
823 *
824 * This is a NOP for !HIGHRES
825 */
826 if (reprogram && hrtimer_enqueue_reprogram(timer, base))
827 return;
828
829 base->first = &timer->node; 810 base->first = &timer->node;
830 }
831 811
832 rb_link_node(&timer->node, parent, link); 812 rb_link_node(&timer->node, parent, link);
833 rb_insert_color(&timer->node, &base->active); 813 rb_insert_color(&timer->node, &base->active);
@@ -836,6 +816,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
836 * state of a possibly running callback. 816 * state of a possibly running callback.
837 */ 817 */
838 timer->state |= HRTIMER_STATE_ENQUEUED; 818 timer->state |= HRTIMER_STATE_ENQUEUED;
819
820 return leftmost;
839} 821}
840 822
841/* 823/*
@@ -912,7 +894,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
912{ 894{
913 struct hrtimer_clock_base *base, *new_base; 895 struct hrtimer_clock_base *base, *new_base;
914 unsigned long flags; 896 unsigned long flags;
915 int ret; 897 int ret, leftmost;
916 898
917 base = lock_hrtimer_base(timer, &flags); 899 base = lock_hrtimer_base(timer, &flags);
918 900
@@ -940,12 +922,16 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
940 922
941 timer_stats_hrtimer_set_start_info(timer); 923 timer_stats_hrtimer_set_start_info(timer);
942 924
925 leftmost = enqueue_hrtimer(timer, new_base);
926
943 /* 927 /*
944 * Only allow reprogramming if the new base is on this CPU. 928 * Only allow reprogramming if the new base is on this CPU.
945 * (it might still be on another CPU if the timer was pending) 929 * (it might still be on another CPU if the timer was pending)
930 *
931 * XXX send_remote_softirq() ?
946 */ 932 */
947 enqueue_hrtimer(timer, new_base, 933 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
948 new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); 934 hrtimer_enqueue_reprogram(timer, new_base);
949 935
950 unlock_hrtimer_base(timer, &flags); 936 unlock_hrtimer_base(timer, &flags);
951 937
@@ -1163,7 +1149,7 @@ static void __run_hrtimer(struct hrtimer *timer)
1163 */ 1149 */
1164 if (restart != HRTIMER_NORESTART) { 1150 if (restart != HRTIMER_NORESTART) {
1165 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); 1151 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1166 enqueue_hrtimer(timer, base, 0); 1152 enqueue_hrtimer(timer, base);
1167 } 1153 }
1168 timer->state &= ~HRTIMER_STATE_CALLBACK; 1154 timer->state &= ~HRTIMER_STATE_CALLBACK;
1169} 1155}
@@ -1277,6 +1263,11 @@ void hrtimer_peek_ahead_timers(void)
1277 local_irq_restore(flags); 1263 local_irq_restore(flags);
1278} 1264}
1279 1265
1266static void run_hrtimer_softirq(struct softirq_action *h)
1267{
1268 hrtimer_peek_ahead_timers();
1269}
1270
1280#endif /* CONFIG_HIGH_RES_TIMERS */ 1271#endif /* CONFIG_HIGH_RES_TIMERS */
1281 1272
1282/* 1273/*
@@ -1532,7 +1523,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1532 * is done, which will run all expired timers and re-programm 1523 * is done, which will run all expired timers and re-programm
1533 * the timer device. 1524 * the timer device.
1534 */ 1525 */
1535 enqueue_hrtimer(timer, new_base, 0); 1526 enqueue_hrtimer(timer, new_base);
1536 1527
1537 /* Clear the migration state bit */ 1528 /* Clear the migration state bit */
1538 timer->state &= ~HRTIMER_STATE_MIGRATE; 1529 timer->state &= ~HRTIMER_STATE_MIGRATE;
@@ -1610,6 +1601,9 @@ void __init hrtimers_init(void)
1610 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, 1601 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1611 (void *)(long)smp_processor_id()); 1602 (void *)(long)smp_processor_id());
1612 register_cpu_notifier(&hrtimers_nb); 1603 register_cpu_notifier(&hrtimers_nb);
1604#ifdef CONFIG_HIGH_RES_TIMERS
1605 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1606#endif
1613} 1607}
1614 1608
1615/** 1609/**