diff options
author | J. Bruce Fields <bfields@citi.umich.edu> | 2007-10-26 18:05:40 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2008-02-03 17:51:36 -0500 |
commit | b533184fc353d4a2d07929b4ac424a6f1bf5a3b9 (patch) | |
tree | 502634d5810735bcaea8666bdadf9bc0b6abc216 /fs/locks.c | |
parent | 9135f1901ee6449dfe338adf6e40e9c2025b8150 (diff) |
locks: clarify posix_locks_deadlock
For such a short function (with such a long comment),
posix_locks_deadlock() seems to cause a lot of confusion. Attempt to
make it a bit clearer:
- Remove the initial posix_same_owner() check, which can never
pass (since this is only called in the case that block_fl and
caller_fl conflict)
- Use an explicit loop (and a helper function) instead of a goto.
- Rewrite the comment, attempting a clearer explanation, and
removing some uninteresting historical detail.
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'fs/locks.c')
-rw-r--r-- | fs/locks.c | 70 |
1 files changed, 40 insertions, 30 deletions
diff --git a/fs/locks.c b/fs/locks.c index 8b8388eca05e..c3eecb895acf 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -683,45 +683,55 @@ posix_test_lock(struct file *filp, struct file_lock *fl) | |||
683 | 683 | ||
684 | EXPORT_SYMBOL(posix_test_lock); | 684 | EXPORT_SYMBOL(posix_test_lock); |
685 | 685 | ||
686 | /* This function tests for deadlock condition before putting a process to | 686 | /* |
687 | * sleep. The detection scheme is no longer recursive. Recursive was neat, | 687 | * Deadlock detection: |
688 | * but dangerous - we risked stack corruption if the lock data was bad, or | 688 | * |
689 | * if the recursion was too deep for any other reason. | 689 | * We attempt to detect deadlocks that are due purely to posix file |
690 | * | 690 | * locks. |
691 | * We rely on the fact that a task can only be on one lock's wait queue | 691 | * |
692 | * at a time. When we find blocked_task on a wait queue we can re-search | 692 | * We assume that a task can be waiting for at most one lock at a time. |
693 | * with blocked_task equal to that queue's owner, until either blocked_task | 693 | * So for any acquired lock, the process holding that lock may be |
694 | * isn't found, or blocked_task is found on a queue owned by my_task. | 694 | * waiting on at most one other lock. That lock in turns may be held by |
695 | * | 695 | * someone waiting for at most one other lock. Given a requested lock |
696 | * Note: the above assumption may not be true when handling lock requests | 696 | * caller_fl which is about to wait for a conflicting lock block_fl, we |
697 | * from a broken NFS client. But broken NFS clients have a lot more to | 697 | * follow this chain of waiters to ensure we are not about to create a |
698 | * worry about than proper deadlock detection anyway... --okir | 698 | * cycle. |
699 | * | 699 | * |
700 | * However, the failure of this assumption (also possible in the case of | 700 | * Since we do this before we ever put a process to sleep on a lock, we |
701 | * multiple tasks sharing the same open file table) also means there's no | 701 | * are ensured that there is never a cycle; that is what guarantees that |
702 | * guarantee that the loop below will terminate. As a hack, we give up | 702 | * the while() loop in posix_locks_deadlock() eventually completes. |
703 | * after a few iterations. | 703 | * |
704 | * Note: the above assumption may not be true when handling lock | ||
705 | * requests from a broken NFS client. It may also fail in the presence | ||
706 | * of tasks (such as posix threads) sharing the same open file table. | ||
707 | * | ||
708 | * To handle those cases, we just bail out after a few iterations. | ||
704 | */ | 709 | */ |
705 | 710 | ||
706 | #define MAX_DEADLK_ITERATIONS 10 | 711 | #define MAX_DEADLK_ITERATIONS 10 |
707 | 712 | ||
713 | /* Find a lock that the owner of the given block_fl is blocking on. */ | ||
714 | static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) | ||
715 | { | ||
716 | struct file_lock *fl; | ||
717 | |||
718 | list_for_each_entry(fl, &blocked_list, fl_link) { | ||
719 | if (posix_same_owner(fl, block_fl)) | ||
720 | return fl->fl_next; | ||
721 | } | ||
722 | return NULL; | ||
723 | } | ||
724 | |||
708 | static int posix_locks_deadlock(struct file_lock *caller_fl, | 725 | static int posix_locks_deadlock(struct file_lock *caller_fl, |
709 | struct file_lock *block_fl) | 726 | struct file_lock *block_fl) |
710 | { | 727 | { |
711 | struct file_lock *fl; | ||
712 | int i = 0; | 728 | int i = 0; |
713 | 729 | ||
714 | next_task: | 730 | while ((block_fl = what_owner_is_waiting_for(block_fl))) { |
715 | if (posix_same_owner(caller_fl, block_fl)) | 731 | if (i++ > MAX_DEADLK_ITERATIONS) |
716 | return 1; | 732 | return 0; |
717 | list_for_each_entry(fl, &blocked_list, fl_link) { | 733 | if (posix_same_owner(caller_fl, block_fl)) |
718 | if (posix_same_owner(fl, block_fl)) { | 734 | return 1; |
719 | if (i++ > MAX_DEADLK_ITERATIONS) | ||
720 | return 0; | ||
721 | fl = fl->fl_next; | ||
722 | block_fl = fl; | ||
723 | goto next_task; | ||
724 | } | ||
725 | } | 735 | } |
726 | return 0; | 736 | return 0; |
727 | } | 737 | } |