diff options
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 65 |
1 files changed, 48 insertions, 17 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index a08bb8e61c6f..49c9aada0374 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/bitops.h> | 41 | #include <linux/bitops.h> |
42 | #include <linux/mpage.h> | 42 | #include <linux/mpage.h> |
43 | #include <linux/bit_spinlock.h> | 43 | #include <linux/bit_spinlock.h> |
44 | #include <linux/cleancache.h> | ||
44 | 45 | ||
45 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); | 46 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); |
46 | 47 | ||
@@ -269,6 +270,10 @@ void invalidate_bdev(struct block_device *bdev) | |||
269 | invalidate_bh_lrus(); | 270 | invalidate_bh_lrus(); |
270 | lru_add_drain_all(); /* make sure all lru add caches are flushed */ | 271 | lru_add_drain_all(); /* make sure all lru add caches are flushed */ |
271 | invalidate_mapping_pages(mapping, 0, -1); | 272 | invalidate_mapping_pages(mapping, 0, -1); |
273 | /* 99% of the time, we don't need to flush the cleancache on the bdev. | ||
274 | * But, for the strange corners, lets be cautious | ||
275 | */ | ||
276 | cleancache_flush_inode(mapping); | ||
272 | } | 277 | } |
273 | EXPORT_SYMBOL(invalidate_bdev); | 278 | EXPORT_SYMBOL(invalidate_bdev); |
274 | 279 | ||
@@ -2331,24 +2336,26 @@ EXPORT_SYMBOL(block_commit_write); | |||
2331 | * page lock we can determine safely if the page is beyond EOF. If it is not | 2336 | * page lock we can determine safely if the page is beyond EOF. If it is not |
2332 | * beyond EOF, then the page is guaranteed safe against truncation until we | 2337 | * beyond EOF, then the page is guaranteed safe against truncation until we |
2333 | * unlock the page. | 2338 | * unlock the page. |
2339 | * | ||
2340 | * Direct callers of this function should call vfs_check_frozen() so that page | ||
2341 | * fault does not busyloop until the fs is thawed. | ||
2334 | */ | 2342 | */ |
2335 | int | 2343 | int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, |
2336 | block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | 2344 | get_block_t get_block) |
2337 | get_block_t get_block) | ||
2338 | { | 2345 | { |
2339 | struct page *page = vmf->page; | 2346 | struct page *page = vmf->page; |
2340 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | 2347 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
2341 | unsigned long end; | 2348 | unsigned long end; |
2342 | loff_t size; | 2349 | loff_t size; |
2343 | int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ | 2350 | int ret; |
2344 | 2351 | ||
2345 | lock_page(page); | 2352 | lock_page(page); |
2346 | size = i_size_read(inode); | 2353 | size = i_size_read(inode); |
2347 | if ((page->mapping != inode->i_mapping) || | 2354 | if ((page->mapping != inode->i_mapping) || |
2348 | (page_offset(page) > size)) { | 2355 | (page_offset(page) > size)) { |
2349 | /* page got truncated out from underneath us */ | 2356 | /* We overload EFAULT to mean page got truncated */ |
2350 | unlock_page(page); | 2357 | ret = -EFAULT; |
2351 | goto out; | 2358 | goto out_unlock; |
2352 | } | 2359 | } |
2353 | 2360 | ||
2354 | /* page is wholly or partially inside EOF */ | 2361 | /* page is wholly or partially inside EOF */ |
@@ -2361,18 +2368,42 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
2361 | if (!ret) | 2368 | if (!ret) |
2362 | ret = block_commit_write(page, 0, end); | 2369 | ret = block_commit_write(page, 0, end); |
2363 | 2370 | ||
2364 | if (unlikely(ret)) { | 2371 | if (unlikely(ret < 0)) |
2365 | unlock_page(page); | 2372 | goto out_unlock; |
2366 | if (ret == -ENOMEM) | 2373 | /* |
2367 | ret = VM_FAULT_OOM; | 2374 | * Freezing in progress? We check after the page is marked dirty and |
2368 | else /* -ENOSPC, -EIO, etc */ | 2375 | * with page lock held so if the test here fails, we are sure freezing |
2369 | ret = VM_FAULT_SIGBUS; | 2376 | * code will wait during syncing until the page fault is done - at that |
2370 | } else | 2377 | * point page will be dirty and unlocked so freezing code will write it |
2371 | ret = VM_FAULT_LOCKED; | 2378 | * and writeprotect it again. |
2372 | 2379 | */ | |
2373 | out: | 2380 | set_page_dirty(page); |
2381 | if (inode->i_sb->s_frozen != SB_UNFROZEN) { | ||
2382 | ret = -EAGAIN; | ||
2383 | goto out_unlock; | ||
2384 | } | ||
2385 | wait_on_page_writeback(page); | ||
2386 | return 0; | ||
2387 | out_unlock: | ||
2388 | unlock_page(page); | ||
2374 | return ret; | 2389 | return ret; |
2375 | } | 2390 | } |
2391 | EXPORT_SYMBOL(__block_page_mkwrite); | ||
2392 | |||
2393 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | ||
2394 | get_block_t get_block) | ||
2395 | { | ||
2396 | int ret; | ||
2397 | struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb; | ||
2398 | |||
2399 | /* | ||
2400 | * This check is racy but catches the common case. The check in | ||
2401 | * __block_page_mkwrite() is reliable. | ||
2402 | */ | ||
2403 | vfs_check_frozen(sb, SB_FREEZE_WRITE); | ||
2404 | ret = __block_page_mkwrite(vma, vmf, get_block); | ||
2405 | return block_page_mkwrite_return(ret); | ||
2406 | } | ||
2376 | EXPORT_SYMBOL(block_page_mkwrite); | 2407 | EXPORT_SYMBOL(block_page_mkwrite); |
2377 | 2408 | ||
2378 | /* | 2409 | /* |