aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/locks.c70
1 files changed, 40 insertions, 30 deletions
diff --git a/fs/locks.c b/fs/locks.c
index 8b8388eca05e..c3eecb895acf 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -683,45 +683,55 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
683 683
684EXPORT_SYMBOL(posix_test_lock); 684EXPORT_SYMBOL(posix_test_lock);
685 685
686/* This function tests for deadlock condition before putting a process to 686/*
687 * sleep. The detection scheme is no longer recursive. Recursive was neat, 687 * Deadlock detection:
688 * but dangerous - we risked stack corruption if the lock data was bad, or 688 *
689 * if the recursion was too deep for any other reason. 689 * We attempt to detect deadlocks that are due purely to posix file
690 * 690 * locks.
691 * We rely on the fact that a task can only be on one lock's wait queue 691 *
692 * at a time. When we find blocked_task on a wait queue we can re-search 692 * We assume that a task can be waiting for at most one lock at a time.
693 * with blocked_task equal to that queue's owner, until either blocked_task 693 * So for any acquired lock, the process holding that lock may be
694 * isn't found, or blocked_task is found on a queue owned by my_task. 694 * waiting on at most one other lock. That lock in turns may be held by
695 * 695 * someone waiting for at most one other lock. Given a requested lock
696 * Note: the above assumption may not be true when handling lock requests 696 * caller_fl which is about to wait for a conflicting lock block_fl, we
697 * from a broken NFS client. But broken NFS clients have a lot more to 697 * follow this chain of waiters to ensure we are not about to create a
698 * worry about than proper deadlock detection anyway... --okir 698 * cycle.
699 * 699 *
700 * However, the failure of this assumption (also possible in the case of 700 * Since we do this before we ever put a process to sleep on a lock, we
701 * multiple tasks sharing the same open file table) also means there's no 701 * are ensured that there is never a cycle; that is what guarantees that
702 * guarantee that the loop below will terminate. As a hack, we give up 702 * the while() loop in posix_locks_deadlock() eventually completes.
703 * after a few iterations. 703 *
704 * Note: the above assumption may not be true when handling lock
705 * requests from a broken NFS client. It may also fail in the presence
706 * of tasks (such as posix threads) sharing the same open file table.
707 *
708 * To handle those cases, we just bail out after a few iterations.
704 */ 709 */
705 710
706#define MAX_DEADLK_ITERATIONS 10 711#define MAX_DEADLK_ITERATIONS 10
707 712
713/* Find a lock that the owner of the given block_fl is blocking on. */
714static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
715{
716 struct file_lock *fl;
717
718 list_for_each_entry(fl, &blocked_list, fl_link) {
719 if (posix_same_owner(fl, block_fl))
720 return fl->fl_next;
721 }
722 return NULL;
723}
724
708static int posix_locks_deadlock(struct file_lock *caller_fl, 725static int posix_locks_deadlock(struct file_lock *caller_fl,
709 struct file_lock *block_fl) 726 struct file_lock *block_fl)
710{ 727{
711 struct file_lock *fl;
712 int i = 0; 728 int i = 0;
713 729
714next_task: 730 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
715 if (posix_same_owner(caller_fl, block_fl)) 731 if (i++ > MAX_DEADLK_ITERATIONS)
716 return 1; 732 return 0;
717 list_for_each_entry(fl, &blocked_list, fl_link) { 733 if (posix_same_owner(caller_fl, block_fl))
718 if (posix_same_owner(fl, block_fl)) { 734 return 1;
719 if (i++ > MAX_DEADLK_ITERATIONS)
720 return 0;
721 fl = fl->fl_next;
722 block_fl = fl;
723 goto next_task;
724 }
725 } 735 }
726 return 0; 736 return 0;
727} 737}