aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-26 12:52:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-26 12:52:14 -0400
commit32e51f141fd8d880f57b6a2eb53ce72856254d4a (patch)
treed8d7a0d503533a03fb07b2ebd5eccd9043f2d228 /fs/buffer.c
parentca16d140af91febe25daeb9e032bf8bd46b8c31f (diff)
parentb6ff24a333267a6810e28ee5b9fc539d149c52f0 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6: (25 commits) cifs: remove unnecessary dentry_unhash on rmdir/rename_dir ocfs2: remove unnecessary dentry_unhash on rmdir/rename_dir exofs: remove unnecessary dentry_unhash on rmdir/rename_dir nfs: remove unnecessary dentry_unhash on rmdir/rename_dir ext2: remove unnecessary dentry_unhash on rmdir/rename_dir ext3: remove unnecessary dentry_unhash on rmdir/rename_dir ext4: remove unnecessary dentry_unhash on rmdir/rename_dir btrfs: remove unnecessary dentry_unhash in rmdir/rename_dir ceph: remove unnecessary dentry_unhash calls vfs: clean up vfs_rename_other vfs: clean up vfs_rename_dir vfs: clean up vfs_rmdir vfs: fix vfs_rename_dir for FS_RENAME_DOES_D_MOVE filesystems libfs: drop unneeded dentry_unhash vfs: update dentry_unhash() comment vfs: push dentry_unhash on rename_dir into file systems vfs: push dentry_unhash on rmdir into file systems vfs: remove dget() from dentry_unhash() vfs: dentry_unhash immediately prior to rmdir vfs: Block mmapped writes while the fs is frozen ...
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c59
1 files changed, 42 insertions, 17 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index a08bb8e61c6f..b0675bfe8207 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2331,24 +2331,26 @@ EXPORT_SYMBOL(block_commit_write);
2331 * page lock we can determine safely if the page is beyond EOF. If it is not 2331 * page lock we can determine safely if the page is beyond EOF. If it is not
2332 * beyond EOF, then the page is guaranteed safe against truncation until we 2332 * beyond EOF, then the page is guaranteed safe against truncation until we
2333 * unlock the page. 2333 * unlock the page.
2334 *
2335 * Direct callers of this function should call vfs_check_frozen() so that page
2336 * fault does not busyloop until the fs is thawed.
2334 */ 2337 */
2335int 2338int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2336block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2339 get_block_t get_block)
2337 get_block_t get_block)
2338{ 2340{
2339 struct page *page = vmf->page; 2341 struct page *page = vmf->page;
2340 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 2342 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2341 unsigned long end; 2343 unsigned long end;
2342 loff_t size; 2344 loff_t size;
2343 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 2345 int ret;
2344 2346
2345 lock_page(page); 2347 lock_page(page);
2346 size = i_size_read(inode); 2348 size = i_size_read(inode);
2347 if ((page->mapping != inode->i_mapping) || 2349 if ((page->mapping != inode->i_mapping) ||
2348 (page_offset(page) > size)) { 2350 (page_offset(page) > size)) {
2349 /* page got truncated out from underneath us */ 2351 /* We overload EFAULT to mean page got truncated */
2350 unlock_page(page); 2352 ret = -EFAULT;
2351 goto out; 2353 goto out_unlock;
2352 } 2354 }
2353 2355
2354 /* page is wholly or partially inside EOF */ 2356 /* page is wholly or partially inside EOF */
@@ -2361,18 +2363,41 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2361 if (!ret) 2363 if (!ret)
2362 ret = block_commit_write(page, 0, end); 2364 ret = block_commit_write(page, 0, end);
2363 2365
2364 if (unlikely(ret)) { 2366 if (unlikely(ret < 0))
2365 unlock_page(page); 2367 goto out_unlock;
2366 if (ret == -ENOMEM) 2368 /*
2367 ret = VM_FAULT_OOM; 2369 * Freezing in progress? We check after the page is marked dirty and
2368 else /* -ENOSPC, -EIO, etc */ 2370 * with page lock held so if the test here fails, we are sure freezing
2369 ret = VM_FAULT_SIGBUS; 2371 * code will wait during syncing until the page fault is done - at that
2370 } else 2372 * point page will be dirty and unlocked so freezing code will write it
2371 ret = VM_FAULT_LOCKED; 2373 * and writeprotect it again.
2372 2374 */
2373out: 2375 set_page_dirty(page);
2376 if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2377 ret = -EAGAIN;
2378 goto out_unlock;
2379 }
2380 return 0;
2381out_unlock:
2382 unlock_page(page);
2374 return ret; 2383 return ret;
2375} 2384}
2385EXPORT_SYMBOL(__block_page_mkwrite);
2386
2387int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2388 get_block_t get_block)
2389{
2390 int ret;
2391 struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2392
2393 /*
2394 * This check is racy but catches the common case. The check in
2395 * __block_page_mkwrite() is reliable.
2396 */
2397 vfs_check_frozen(sb, SB_FREEZE_WRITE);
2398 ret = __block_page_mkwrite(vma, vmf, get_block);
2399 return block_page_mkwrite_return(ret);
2400}
2376EXPORT_SYMBOL(block_page_mkwrite); 2401EXPORT_SYMBOL(block_page_mkwrite);
2377 2402
2378/* 2403/*