aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2007-09-11 08:38:13 -0400
committerJ. Bruce Fields <bfields@citi.umich.edu>2007-10-09 18:32:45 -0400
commit84d535ade62b6f8ce852745731ad6200c46b977c (patch)
tree7e9c17b51a86a1fdb20aff02432fb85723fa9579
parent526985b9dd6ef7716b87f5fe6f0e2438ea3a89c7 (diff)
Memory shortage can result in inconsistent flocks state
When the flock_lock_file() is called to change the flock from F_RDLCK to F_WRLCK or vice versa the existing flock can be removed without appropriate warning. Look: for_each_lock(inode, before) { struct file_lock *fl = *before; if (IS_POSIX(fl)) break; if (IS_LEASE(fl)) continue; if (filp != fl->fl_file) continue; if (request->fl_type == fl->fl_type) goto out; found = 1; locks_delete_lock(before); <<<<<< ! break; } if after this point the subsequent locks_alloc_lock() will fail the return code will be -ENOMEM, but the existing lock is already removed. This is a known feature that such "re-locking" is not atomic, but in the racy case the file should stay locked (although by some other process), but in this case the file will be unlocked. The proposal is to prepare the lock in advance keeping no chance to fail in the future code. Found during making the flocks pid-namespaces aware. (Note: Thanks to Reuben Farrelly for finding a bug in an earlier version of this patch.) Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu> Cc: Reuben Farrelly <reuben-linuxkernel@reub.net>
-rw-r--r--fs/locks.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/fs/locks.c b/fs/locks.c
index efe1affe6bed..6e22c8129a80 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -733,6 +733,15 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
733 lock_kernel(); 733 lock_kernel();
734 if (request->fl_flags & FL_ACCESS) 734 if (request->fl_flags & FL_ACCESS)
735 goto find_conflict; 735 goto find_conflict;
736
737 if (request->fl_type != F_UNLCK) {
738 error = -ENOMEM;
739 new_fl = locks_alloc_lock();
740 if (new_fl == NULL)
741 goto out;
742 error = 0;
743 }
744
736 for_each_lock(inode, before) { 745 for_each_lock(inode, before) {
737 struct file_lock *fl = *before; 746 struct file_lock *fl = *before;
738 if (IS_POSIX(fl)) 747 if (IS_POSIX(fl))
@@ -754,10 +763,6 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
754 goto out; 763 goto out;
755 } 764 }
756 765
757 error = -ENOMEM;
758 new_fl = locks_alloc_lock();
759 if (new_fl == NULL)
760 goto out;
761 /* 766 /*
762 * If a higher-priority process was blocked on the old file lock, 767 * If a higher-priority process was blocked on the old file lock,
763 * give it the opportunity to lock the file. 768 * give it the opportunity to lock the file.