aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-04-28 03:23:24 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-04-28 16:22:21 -0400
commit0c96c5979a522c3323c30a078a70120e29b5bdbc (patch)
tree1cd5cabe5a3591ce8f22640675921289298d0c40 /kernel/hrtimer.c
parente31a94ed371c70855eb30b77c490d6d85dd4da26 (diff)
hrtimer: raise softirq unlocked to avoid circular lock dependency
The scheduler hrtimer bits in 2.6.25 introduced a circular lock dependency in a rare code path: ======================================================= [ INFO: possible circular locking dependency detected ] 2.6.25-sched-devel.git-x86-latest.git #19 ------------------------------------------------------- X/2980 is trying to acquire lock: (&rq->rq_lock_key#2){++..}, at: [<ffffffff80230146>] task_rq_lock+0x56/0xa0 but task is already holding lock: (&cpu_base->lock){++..}, at: [<ffffffff80257ae1>] lock_hrtimer_base+0x31/0x60 which lock already depends on the new lock. The scenario which leads to this is: posix-timer signal is delivered -> posix-timer is rearmed timer is already expired in hrtimer_enqueue() -> softirq is raised To prevent this we need to move the raise of the softirq out of the base->lock protected code path. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: stable@kernel.org Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c19
1 files changed, 17 insertions, 2 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index e379ef0e9c20..dea4c9124ac8 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -590,7 +590,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
590 list_add_tail(&timer->cb_entry, 590 list_add_tail(&timer->cb_entry,
591 &base->cpu_base->cb_pending); 591 &base->cpu_base->cb_pending);
592 timer->state = HRTIMER_STATE_PENDING; 592 timer->state = HRTIMER_STATE_PENDING;
593 raise_softirq(HRTIMER_SOFTIRQ);
594 return 1; 593 return 1;
595 default: 594 default:
596 BUG(); 595 BUG();
@@ -633,6 +632,11 @@ static int hrtimer_switch_to_hres(void)
633 return 1; 632 return 1;
634} 633}
635 634
635static inline void hrtimer_raise_softirq(void)
636{
637 raise_softirq(HRTIMER_SOFTIRQ);
638}
639
636#else 640#else
637 641
638static inline int hrtimer_hres_active(void) { return 0; } 642static inline int hrtimer_hres_active(void) { return 0; }
@@ -651,6 +655,7 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
651{ 655{
652 return 0; 656 return 0;
653} 657}
658static inline void hrtimer_raise_softirq(void) { }
654 659
655#endif /* CONFIG_HIGH_RES_TIMERS */ 660#endif /* CONFIG_HIGH_RES_TIMERS */
656 661
@@ -850,7 +855,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
850{ 855{
851 struct hrtimer_clock_base *base, *new_base; 856 struct hrtimer_clock_base *base, *new_base;
852 unsigned long flags; 857 unsigned long flags;
853 int ret; 858 int ret, raise;
854 859
855 base = lock_hrtimer_base(timer, &flags); 860 base = lock_hrtimer_base(timer, &flags);
856 861
@@ -884,8 +889,18 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
884 enqueue_hrtimer(timer, new_base, 889 enqueue_hrtimer(timer, new_base,
885 new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); 890 new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
886 891
892 /*
893 * The timer may be expired and moved to the cb_pending
894 * list. We can not raise the softirq with base lock held due
895 * to a possible deadlock with runqueue lock.
896 */
897 raise = timer->state == HRTIMER_STATE_PENDING;
898
887 unlock_hrtimer_base(timer, &flags); 899 unlock_hrtimer_base(timer, &flags);
888 900
901 if (raise)
902 hrtimer_raise_softirq();
903
889 return ret; 904 return ret;
890} 905}
891EXPORT_SYMBOL_GPL(hrtimer_start); 906EXPORT_SYMBOL_GPL(hrtimer_start);