aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-06-05 06:34:23 -0400
committerThomas Gleixner <tglx@linutronix.de>2014-06-07 08:55:40 -0400
commit3d5c9340d1949733eb37616abd15db36aef9a57c (patch)
tree257ce62564f206a530ba6b0aedf732a2dee0b6d2 /kernel
parent951e273060d15b233a7f7ccaf76ba682b5b05a03 (diff)
rtmutex: Handle deadlock detection smarter
Even in the case when deadlock detection is not requested by the caller, we can detect deadlocks. Right now the code stops the lock chain walk and keeps the waiter enqueued, even on itself. Silly not to yell when such a scenario is detected and to keep the waiter enqueued. Return -EDEADLK unconditionally and handle it at the call sites. The futex calls return -EDEADLK. The non futex ones dequeue the waiter, throw a warning and put the task into a schedule loop. Tagged for stable as it makes the code more robust. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Brad Mouring <bmouring@ni.com> Link: http://lkml.kernel.org/r/20140605152801.836501969@linutronix.de Cc: stable@vger.kernel.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/rtmutex-debug.h5
-rw-r--r--kernel/locking/rtmutex.c33
-rw-r--r--kernel/locking/rtmutex.h5
3 files changed, 38 insertions, 5 deletions
diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
index 14193d596d78..ab29b6a22669 100644
--- a/kernel/locking/rtmutex-debug.h
+++ b/kernel/locking/rtmutex-debug.h
@@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
31{ 31{
32 return (waiter != NULL); 32 return (waiter != NULL);
33} 33}
34
35static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
36{
37 debug_rt_mutex_print_deadlock(w);
38}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index a620d4d08ca6..eb7a46327798 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -314,7 +314,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
314 } 314 }
315 put_task_struct(task); 315 put_task_struct(task);
316 316
317 return deadlock_detect ? -EDEADLK : 0; 317 return -EDEADLK;
318 } 318 }
319 retry: 319 retry:
320 /* 320 /*
@@ -377,7 +377,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
377 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 377 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
378 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); 378 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
379 raw_spin_unlock(&lock->wait_lock); 379 raw_spin_unlock(&lock->wait_lock);
380 ret = deadlock_detect ? -EDEADLK : 0; 380 ret = -EDEADLK;
381 goto out_unlock_pi; 381 goto out_unlock_pi;
382 } 382 }
383 383
@@ -548,7 +548,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
548 * which is wrong, as the other waiter is not in a deadlock 548 * which is wrong, as the other waiter is not in a deadlock
549 * situation. 549 * situation.
550 */ 550 */
551 if (detect_deadlock && owner == task) 551 if (owner == task)
552 return -EDEADLK; 552 return -EDEADLK;
553 553
554 raw_spin_lock_irqsave(&task->pi_lock, flags); 554 raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -763,6 +763,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
763 return ret; 763 return ret;
764} 764}
765 765
766static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
767 struct rt_mutex_waiter *w)
768{
769 /*
770 * If the result is not -EDEADLOCK or the caller requested
771 * deadlock detection, nothing to do here.
772 */
773 if (res != -EDEADLOCK || detect_deadlock)
774 return;
775
776 /*
777 * Yell lowdly and stop the task right here.
778 */
779 rt_mutex_print_deadlock(w);
780 while (1) {
781 set_current_state(TASK_INTERRUPTIBLE);
782 schedule();
783 }
784}
785
766/* 786/*
767 * Slow path lock function: 787 * Slow path lock function:
768 */ 788 */
@@ -802,8 +822,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
802 822
803 set_current_state(TASK_RUNNING); 823 set_current_state(TASK_RUNNING);
804 824
805 if (unlikely(ret)) 825 if (unlikely(ret)) {
806 remove_waiter(lock, &waiter); 826 remove_waiter(lock, &waiter);
827 rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
828 }
807 829
808 /* 830 /*
809 * try_to_take_rt_mutex() sets the waiter bit 831 * try_to_take_rt_mutex() sets the waiter bit
@@ -1112,7 +1134,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1112 return 1; 1134 return 1;
1113 } 1135 }
1114 1136
1115 ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); 1137 /* We enforce deadlock detection for futexes */
1138 ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
1116 1139
1117 if (ret && !rt_mutex_owner(lock)) { 1140 if (ret && !rt_mutex_owner(lock)) {
1118 /* 1141 /*
diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
index a1a1dd06421d..f6a1f3c133b1 100644
--- a/kernel/locking/rtmutex.h
+++ b/kernel/locking/rtmutex.h
@@ -24,3 +24,8 @@
24#define debug_rt_mutex_print_deadlock(w) do { } while (0) 24#define debug_rt_mutex_print_deadlock(w) do { } while (0)
25#define debug_rt_mutex_detect_deadlock(w,d) (d) 25#define debug_rt_mutex_detect_deadlock(w,d) (d)
26#define debug_rt_mutex_reset_waiter(w) do { } while (0) 26#define debug_rt_mutex_reset_waiter(w) do { } while (0)
27
28static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
29{
30 WARN(1, "rtmutex deadlock detected\n");
31}