diff options
author | Jan Kara <jack@suse.cz> | 2011-05-23 18:23:34 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2011-05-26 07:26:44 -0400 |
commit | 24da4fab5a617ecbf0f0c64e7ba7703383faa411 (patch) | |
tree | 28f62d2e0e69b11765f021db53c327b56f741576 | |
parent | 7c6e984dfca8ff5b04d359a59b24f39a691b87d3 (diff) |
vfs: Create __block_page_mkwrite() helper passing error values back
Create __block_page_mkwrite() helper which does all what block_page_mkwrite()
does except that it passes back errors from __block_write_begin /
block_commit_write calls.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | fs/buffer.c | 37 | ||||
-rw-r--r-- | include/linux/buffer_head.h | 14 |
2 files changed, 34 insertions, 17 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index a08bb8e61c6f..f6ad8f9b8fa5 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -2332,23 +2332,22 @@ EXPORT_SYMBOL(block_commit_write); | |||
2332 | * beyond EOF, then the page is guaranteed safe against truncation until we | 2332 | * beyond EOF, then the page is guaranteed safe against truncation until we |
2333 | * unlock the page. | 2333 | * unlock the page. |
2334 | */ | 2334 | */ |
2335 | int | 2335 | int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, |
2336 | block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | 2336 | get_block_t get_block) |
2337 | get_block_t get_block) | ||
2338 | { | 2337 | { |
2339 | struct page *page = vmf->page; | 2338 | struct page *page = vmf->page; |
2340 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | 2339 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
2341 | unsigned long end; | 2340 | unsigned long end; |
2342 | loff_t size; | 2341 | loff_t size; |
2343 | int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ | 2342 | int ret; |
2344 | 2343 | ||
2345 | lock_page(page); | 2344 | lock_page(page); |
2346 | size = i_size_read(inode); | 2345 | size = i_size_read(inode); |
2347 | if ((page->mapping != inode->i_mapping) || | 2346 | if ((page->mapping != inode->i_mapping) || |
2348 | (page_offset(page) > size)) { | 2347 | (page_offset(page) > size)) { |
2349 | /* page got truncated out from underneath us */ | 2348 | /* We overload EFAULT to mean page got truncated */ |
2350 | unlock_page(page); | 2349 | ret = -EFAULT; |
2351 | goto out; | 2350 | goto out_unlock; |
2352 | } | 2351 | } |
2353 | 2352 | ||
2354 | /* page is wholly or partially inside EOF */ | 2353 | /* page is wholly or partially inside EOF */ |
@@ -2361,18 +2360,22 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
2361 | if (!ret) | 2360 | if (!ret) |
2362 | ret = block_commit_write(page, 0, end); | 2361 | ret = block_commit_write(page, 0, end); |
2363 | 2362 | ||
2364 | if (unlikely(ret)) { | 2363 | if (unlikely(ret < 0)) |
2365 | unlock_page(page); | 2364 | goto out_unlock; |
2366 | if (ret == -ENOMEM) | 2365 | return 0; |
2367 | ret = VM_FAULT_OOM; | 2366 | out_unlock: |
2368 | else /* -ENOSPC, -EIO, etc */ | 2367 | unlock_page(page); |
2369 | ret = VM_FAULT_SIGBUS; | ||
2370 | } else | ||
2371 | ret = VM_FAULT_LOCKED; | ||
2372 | |||
2373 | out: | ||
2374 | return ret; | 2368 | return ret; |
2375 | } | 2369 | } |
2370 | EXPORT_SYMBOL(__block_page_mkwrite); | ||
2371 | |||
2372 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | ||
2373 | get_block_t get_block) | ||
2374 | { | ||
2375 | int ret = __block_page_mkwrite(vma, vmf, get_block); | ||
2376 | |||
2377 | return block_page_mkwrite_return(ret); | ||
2378 | } | ||
2376 | EXPORT_SYMBOL(block_page_mkwrite); | 2379 | EXPORT_SYMBOL(block_page_mkwrite); |
2377 | 2380 | ||
2378 | /* | 2381 | /* |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index f5df23561b96..2bf6a9136a94 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -217,8 +217,22 @@ int cont_write_begin(struct file *, struct address_space *, loff_t, | |||
217 | get_block_t *, loff_t *); | 217 | get_block_t *, loff_t *); |
218 | int generic_cont_expand_simple(struct inode *inode, loff_t size); | 218 | int generic_cont_expand_simple(struct inode *inode, loff_t size); |
219 | int block_commit_write(struct page *page, unsigned from, unsigned to); | 219 | int block_commit_write(struct page *page, unsigned from, unsigned to); |
220 | int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | ||
221 | get_block_t get_block); | ||
220 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | 222 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, |
221 | get_block_t get_block); | 223 | get_block_t get_block); |
224 | /* Convert errno to return value from ->page_mkwrite() call */ | ||
225 | static inline int block_page_mkwrite_return(int err) | ||
226 | { | ||
227 | if (err == 0) | ||
228 | return VM_FAULT_LOCKED; | ||
229 | if (err == -EFAULT) | ||
230 | return VM_FAULT_NOPAGE; | ||
231 | if (err == -ENOMEM) | ||
232 | return VM_FAULT_OOM; | ||
233 | /* -ENOSPC, -EDQUOT, -EIO ... */ | ||
234 | return VM_FAULT_SIGBUS; | ||
235 | } | ||
222 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); | 236 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); |
223 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); | 237 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); |
224 | int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, | 238 | int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, |