aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c69
1 files changed, 49 insertions, 20 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index a08bb8e61c6f..1a80b048ade8 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -41,6 +41,7 @@
41#include <linux/bitops.h> 41#include <linux/bitops.h>
42#include <linux/mpage.h> 42#include <linux/mpage.h>
43#include <linux/bit_spinlock.h> 43#include <linux/bit_spinlock.h>
44#include <linux/cleancache.h>
44 45
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 46static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 47
@@ -269,6 +270,10 @@ void invalidate_bdev(struct block_device *bdev)
269 invalidate_bh_lrus(); 270 invalidate_bh_lrus();
270 lru_add_drain_all(); /* make sure all lru add caches are flushed */ 271 lru_add_drain_all(); /* make sure all lru add caches are flushed */
271 invalidate_mapping_pages(mapping, 0, -1); 272 invalidate_mapping_pages(mapping, 0, -1);
273 /* 99% of the time, we don't need to flush the cleancache on the bdev.
274 * But, for the strange corners, lets be cautious
275 */
276 cleancache_flush_inode(mapping);
272} 277}
273EXPORT_SYMBOL(invalidate_bdev); 278EXPORT_SYMBOL(invalidate_bdev);
274 279
@@ -1897,10 +1902,8 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1897 if (!buffer_uptodate(*wait_bh)) 1902 if (!buffer_uptodate(*wait_bh))
1898 err = -EIO; 1903 err = -EIO;
1899 } 1904 }
1900 if (unlikely(err)) { 1905 if (unlikely(err))
1901 page_zero_new_buffers(page, from, to); 1906 page_zero_new_buffers(page, from, to);
1902 ClearPageUptodate(page);
1903 }
1904 return err; 1907 return err;
1905} 1908}
1906EXPORT_SYMBOL(__block_write_begin); 1909EXPORT_SYMBOL(__block_write_begin);
@@ -2331,24 +2334,26 @@ EXPORT_SYMBOL(block_commit_write);
2331 * page lock we can determine safely if the page is beyond EOF. If it is not 2334 * page lock we can determine safely if the page is beyond EOF. If it is not
2332 * beyond EOF, then the page is guaranteed safe against truncation until we 2335 * beyond EOF, then the page is guaranteed safe against truncation until we
2333 * unlock the page. 2336 * unlock the page.
2337 *
2338 * Direct callers of this function should call vfs_check_frozen() so that page
2339 * fault does not busyloop until the fs is thawed.
2334 */ 2340 */
2335int 2341int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2336block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2342 get_block_t get_block)
2337 get_block_t get_block)
2338{ 2343{
2339 struct page *page = vmf->page; 2344 struct page *page = vmf->page;
2340 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 2345 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2341 unsigned long end; 2346 unsigned long end;
2342 loff_t size; 2347 loff_t size;
2343 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 2348 int ret;
2344 2349
2345 lock_page(page); 2350 lock_page(page);
2346 size = i_size_read(inode); 2351 size = i_size_read(inode);
2347 if ((page->mapping != inode->i_mapping) || 2352 if ((page->mapping != inode->i_mapping) ||
2348 (page_offset(page) > size)) { 2353 (page_offset(page) > size)) {
2349 /* page got truncated out from underneath us */ 2354 /* We overload EFAULT to mean page got truncated */
2350 unlock_page(page); 2355 ret = -EFAULT;
2351 goto out; 2356 goto out_unlock;
2352 } 2357 }
2353 2358
2354 /* page is wholly or partially inside EOF */ 2359 /* page is wholly or partially inside EOF */
@@ -2361,18 +2366,42 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2361 if (!ret) 2366 if (!ret)
2362 ret = block_commit_write(page, 0, end); 2367 ret = block_commit_write(page, 0, end);
2363 2368
2364 if (unlikely(ret)) { 2369 if (unlikely(ret < 0))
2365 unlock_page(page); 2370 goto out_unlock;
2366 if (ret == -ENOMEM) 2371 /*
2367 ret = VM_FAULT_OOM; 2372 * Freezing in progress? We check after the page is marked dirty and
2368 else /* -ENOSPC, -EIO, etc */ 2373 * with page lock held so if the test here fails, we are sure freezing
2369 ret = VM_FAULT_SIGBUS; 2374 * code will wait during syncing until the page fault is done - at that
2370 } else 2375 * point page will be dirty and unlocked so freezing code will write it
2371 ret = VM_FAULT_LOCKED; 2376 * and writeprotect it again.
2372 2377 */
2373out: 2378 set_page_dirty(page);
2379 if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2380 ret = -EAGAIN;
2381 goto out_unlock;
2382 }
2383 wait_on_page_writeback(page);
2384 return 0;
2385out_unlock:
2386 unlock_page(page);
2374 return ret; 2387 return ret;
2375} 2388}
2389EXPORT_SYMBOL(__block_page_mkwrite);
2390
2391int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2392 get_block_t get_block)
2393{
2394 int ret;
2395 struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2396
2397 /*
2398 * This check is racy but catches the common case. The check in
2399 * __block_page_mkwrite() is reliable.
2400 */
2401 vfs_check_frozen(sb, SB_FREEZE_WRITE);
2402 ret = __block_page_mkwrite(vma, vmf, get_block);
2403 return block_page_mkwrite_return(ret);
2404}
2376EXPORT_SYMBOL(block_page_mkwrite); 2405EXPORT_SYMBOL(block_page_mkwrite);
2377 2406
2378/* 2407/*