aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2008-05-06 13:58:34 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2008-05-06 13:58:34 -0400
commit0b2bac2f1ea0d33a3621b27ca68b9ae760fca2e9 (patch)
treef5d150cc2177700f0881eecf8d9ed69bb82aba17 /fs
parent33dcdac2df54e66c447ae03f58c95c7251aa5649 (diff)
[PATCH] fix SMP ordering hole in fcntl_setlk()
fcntl_setlk()/close() race prevention has a subtle hole - we need to make sure that if we *do* have an fcntl/close race on SMP box, the access to descriptor table and inode->i_flock won't get reordered. As it is, we get STORE inode->i_flock, LOAD descriptor table entry vs. STORE descriptor table entry, LOAD inode->i_flock with not a single lock in common on both sides. We do have BKL around the first STORE, but check in locks_remove_posix() is outside of BKL and for a good reason - we don't want BKL on common path of close(2). Solution is to hold ->file_lock around fcheck() in there; that orders us wrt removal from descriptor table that preceded locks_remove_posix() on close path and we either come first (in which case eviction will be handled by the close side) or we'll see the effect of close and do eviction ourselves. Note that even though it's read-only access, we do need ->file_lock here - rcu_read_lock() won't be enough to order the things. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/locks.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/fs/locks.c b/fs/locks.c
index 663c069b59b3..0ac6b92cb0b6 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1753,6 +1753,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1753 struct file_lock *file_lock = locks_alloc_lock(); 1753 struct file_lock *file_lock = locks_alloc_lock();
1754 struct flock flock; 1754 struct flock flock;
1755 struct inode *inode; 1755 struct inode *inode;
1756 struct file *f;
1756 int error; 1757 int error;
1757 1758
1758 if (file_lock == NULL) 1759 if (file_lock == NULL)
@@ -1825,7 +1826,15 @@ again:
1825 * Attempt to detect a close/fcntl race and recover by 1826 * Attempt to detect a close/fcntl race and recover by
1826 * releasing the lock that was just acquired. 1827 * releasing the lock that was just acquired.
1827 */ 1828 */
1828 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1829 /*
1830 * we need that spin_lock here - it prevents reordering between
1831 * update of inode->i_flock and check for it done in close().
1832 * rcu_read_lock() wouldn't do.
1833 */
1834 spin_lock(&current->files->file_lock);
1835 f = fcheck(fd);
1836 spin_unlock(&current->files->file_lock);
1837 if (!error && f != filp && flock.l_type != F_UNLCK) {
1829 flock.l_type = F_UNLCK; 1838 flock.l_type = F_UNLCK;
1830 goto again; 1839 goto again;
1831 } 1840 }
@@ -1881,6 +1890,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1881 struct file_lock *file_lock = locks_alloc_lock(); 1890 struct file_lock *file_lock = locks_alloc_lock();
1882 struct flock64 flock; 1891 struct flock64 flock;
1883 struct inode *inode; 1892 struct inode *inode;
1893 struct file *f;
1884 int error; 1894 int error;
1885 1895
1886 if (file_lock == NULL) 1896 if (file_lock == NULL)
@@ -1953,7 +1963,10 @@ again:
1953 * Attempt to detect a close/fcntl race and recover by 1963 * Attempt to detect a close/fcntl race and recover by
1954 * releasing the lock that was just acquired. 1964 * releasing the lock that was just acquired.
1955 */ 1965 */
1956 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1966 spin_lock(&current->files->file_lock);
1967 f = fcheck(fd);
1968 spin_unlock(&current->files->file_lock);
1969 if (!error && f != filp && flock.l_type != F_UNLCK) {
1957 flock.l_type = F_UNLCK; 1970 flock.l_type = F_UNLCK;
1958 goto again; 1971 goto again;
1959 } 1972 }