diff options
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/lockdep.c | 2 | ||||
-rw-r--r-- | kernel/locking/rtmutex.c | 32 |
2 files changed, 29 insertions, 5 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index b0e9467922e1..d24e4339b46d 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -4188,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task) | |||
4188 | } | 4188 | } |
4189 | EXPORT_SYMBOL_GPL(debug_show_held_locks); | 4189 | EXPORT_SYMBOL_GPL(debug_show_held_locks); |
4190 | 4190 | ||
4191 | asmlinkage void lockdep_sys_exit(void) | 4191 | asmlinkage __visible void lockdep_sys_exit(void) |
4192 | { | 4192 | { |
4193 | struct task_struct *curr = current; | 4193 | struct task_struct *curr = current; |
4194 | 4194 | ||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index aa4dff04b594..a620d4d08ca6 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -343,9 +343,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
343 | * top_waiter can be NULL, when we are in the deboosting | 343 | * top_waiter can be NULL, when we are in the deboosting |
344 | * mode! | 344 | * mode! |
345 | */ | 345 | */ |
346 | if (top_waiter && (!task_has_pi_waiters(task) || | 346 | if (top_waiter) { |
347 | top_waiter != task_top_pi_waiter(task))) | 347 | if (!task_has_pi_waiters(task)) |
348 | goto out_unlock_pi; | 348 | goto out_unlock_pi; |
349 | /* | ||
350 | * If deadlock detection is off, we stop here if we | ||
351 | * are not the top pi waiter of the task. | ||
352 | */ | ||
353 | if (!detect_deadlock && top_waiter != task_top_pi_waiter(task)) | ||
354 | goto out_unlock_pi; | ||
355 | } | ||
349 | 356 | ||
350 | /* | 357 | /* |
351 | * When deadlock detection is off then we check, if further | 358 | * When deadlock detection is off then we check, if further |
@@ -361,7 +368,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
361 | goto retry; | 368 | goto retry; |
362 | } | 369 | } |
363 | 370 | ||
364 | /* Deadlock detection */ | 371 | /* |
372 | * Deadlock detection. If the lock is the same as the original | ||
373 | * lock which caused us to walk the lock chain or if the | ||
374 | * current lock is owned by the task which initiated the chain | ||
375 | * walk, we detected a deadlock. | ||
376 | */ | ||
365 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { | 377 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
366 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); | 378 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
367 | raw_spin_unlock(&lock->wait_lock); | 379 | raw_spin_unlock(&lock->wait_lock); |
@@ -527,6 +539,18 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
527 | unsigned long flags; | 539 | unsigned long flags; |
528 | int chain_walk = 0, res; | 540 | int chain_walk = 0, res; |
529 | 541 | ||
542 | /* | ||
543 | * Early deadlock detection. We really don't want the task to | ||
544 | * enqueue on itself just to untangle the mess later. It's not | ||
545 | * only an optimization. We drop the locks, so another waiter | ||
546 | * can come in before the chain walk detects the deadlock. So | ||
547 | * the other will detect the deadlock and return -EDEADLOCK, | ||
548 | * which is wrong, as the other waiter is not in a deadlock | ||
549 | * situation. | ||
550 | */ | ||
551 | if (detect_deadlock && owner == task) | ||
552 | return -EDEADLK; | ||
553 | |||
530 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 554 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
531 | __rt_mutex_adjust_prio(task); | 555 | __rt_mutex_adjust_prio(task); |
532 | waiter->task = task; | 556 | waiter->task = task; |