diff options
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r-- | kernel/locking/rtmutex.c | 33 |
1 files changed, 28 insertions, 5 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index a620d4d08ca6..eb7a46327798 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -314,7 +314,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
314 | } | 314 | } |
315 | put_task_struct(task); | 315 | put_task_struct(task); |
316 | 316 | ||
317 | return deadlock_detect ? -EDEADLK : 0; | 317 | return -EDEADLK; |
318 | } | 318 | } |
319 | retry: | 319 | retry: |
320 | /* | 320 | /* |
@@ -377,7 +377,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
377 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { | 377 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
378 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); | 378 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
379 | raw_spin_unlock(&lock->wait_lock); | 379 | raw_spin_unlock(&lock->wait_lock); |
380 | ret = deadlock_detect ? -EDEADLK : 0; | 380 | ret = -EDEADLK; |
381 | goto out_unlock_pi; | 381 | goto out_unlock_pi; |
382 | } | 382 | } |
383 | 383 | ||
@@ -548,7 +548,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
548 | * which is wrong, as the other waiter is not in a deadlock | 548 | * which is wrong, as the other waiter is not in a deadlock |
549 | * situation. | 549 | * situation. |
550 | */ | 550 | */ |
551 | if (detect_deadlock && owner == task) | 551 | if (owner == task) |
552 | return -EDEADLK; | 552 | return -EDEADLK; |
553 | 553 | ||
554 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 554 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
@@ -763,6 +763,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
763 | return ret; | 763 | return ret; |
764 | } | 764 | } |
765 | 765 | ||
766 | static void rt_mutex_handle_deadlock(int res, int detect_deadlock, | ||
767 | struct rt_mutex_waiter *w) | ||
768 | { | ||
769 | /* | ||
770 | * If the result is not -EDEADLOCK or the caller requested | ||
771 | * deadlock detection, nothing to do here. | ||
772 | */ | ||
773 | if (res != -EDEADLOCK || detect_deadlock) | ||
774 | return; | ||
775 | |||
776 | /* | ||
777 | * Yell lowdly and stop the task right here. | ||
778 | */ | ||
779 | rt_mutex_print_deadlock(w); | ||
780 | while (1) { | ||
781 | set_current_state(TASK_INTERRUPTIBLE); | ||
782 | schedule(); | ||
783 | } | ||
784 | } | ||
785 | |||
766 | /* | 786 | /* |
767 | * Slow path lock function: | 787 | * Slow path lock function: |
768 | */ | 788 | */ |
@@ -802,8 +822,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
802 | 822 | ||
803 | set_current_state(TASK_RUNNING); | 823 | set_current_state(TASK_RUNNING); |
804 | 824 | ||
805 | if (unlikely(ret)) | 825 | if (unlikely(ret)) { |
806 | remove_waiter(lock, &waiter); | 826 | remove_waiter(lock, &waiter); |
827 | rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter); | ||
828 | } | ||
807 | 829 | ||
808 | /* | 830 | /* |
809 | * try_to_take_rt_mutex() sets the waiter bit | 831 | * try_to_take_rt_mutex() sets the waiter bit |
@@ -1112,7 +1134,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
1112 | return 1; | 1134 | return 1; |
1113 | } | 1135 | } |
1114 | 1136 | ||
1115 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); | 1137 | /* We enforce deadlock detection for futexes */ |
1138 | ret = task_blocks_on_rt_mutex(lock, waiter, task, 1); | ||
1116 | 1139 | ||
1117 | if (ret && !rt_mutex_owner(lock)) { | 1140 | if (ret && !rt_mutex_owner(lock)) { |
1118 | /* | 1141 | /* |