aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2012-06-12 10:20:37 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2012-07-31 01:45:47 -0400
commit14da9200140f8d722ad1767dfabadebd8b34f2ad (patch)
treeea5d88b091999f7a64af0b9d335d7cad4c79edfb /mm/filemap.c
parent5d37e9e6dec65cd21be68ee92de99686213e916b (diff)
fs: Protect write paths by sb_start_write - sb_end_write
There are several entry points which dirty pages in a filesystem. mmap (handled by block_page_mkwrite()), buffered write (handled by __generic_file_aio_write()), splice write (generic_file_splice_write), truncate, and fallocate (these can dirty last partial page - handled inside each filesystem separately). Protect these places with sb_start_write() and sb_end_write(). ->page_mkwrite() calls are particularly complex since they are called with mmap_sem held and thus we cannot use standard sb_start_write() due to lock ordering constraints. We solve the problem by using a special freeze protection sb_start_pagefault() which ranks below mmap_sem. BugLink: https://bugs.launchpad.net/bugs/897421 Tested-by: Kamal Mostafa <kamal@canonical.com> Tested-by: Peter M. Petrakis <peter.petrakis@canonical.com> Tested-by: Dann Frazier <dann.frazier@canonical.com> Tested-by: Massimo Morana <massimo.morana@canonical.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 51efee65c2cc..fa5ca304148e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1718,6 +1718,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1718 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1718 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1719 int ret = VM_FAULT_LOCKED; 1719 int ret = VM_FAULT_LOCKED;
1720 1720
1721 sb_start_pagefault(inode->i_sb);
1721 file_update_time(vma->vm_file); 1722 file_update_time(vma->vm_file);
1722 lock_page(page); 1723 lock_page(page);
1723 if (page->mapping != inode->i_mapping) { 1724 if (page->mapping != inode->i_mapping) {
@@ -1725,7 +1726,14 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1725 ret = VM_FAULT_NOPAGE; 1726 ret = VM_FAULT_NOPAGE;
1726 goto out; 1727 goto out;
1727 } 1728 }
1729 /*
1730 * We mark the page dirty already here so that when freeze is in
1731 * progress, we are guaranteed that writeback during freezing will
1732 * see the dirty page and writeprotect it again.
1733 */
1734 set_page_dirty(page);
1728out: 1735out:
1736 sb_end_pagefault(inode->i_sb);
1729 return ret; 1737 return ret;
1730} 1738}
1731EXPORT_SYMBOL(filemap_page_mkwrite); 1739EXPORT_SYMBOL(filemap_page_mkwrite);
@@ -2426,8 +2434,6 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2426 count = ocount; 2434 count = ocount;
2427 pos = *ppos; 2435 pos = *ppos;
2428 2436
2429 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2430
2431 /* We can write back this queue in page reclaim */ 2437 /* We can write back this queue in page reclaim */
2432 current->backing_dev_info = mapping->backing_dev_info; 2438 current->backing_dev_info = mapping->backing_dev_info;
2433 written = 0; 2439 written = 0;
@@ -2526,6 +2532,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2526 2532
2527 BUG_ON(iocb->ki_pos != pos); 2533 BUG_ON(iocb->ki_pos != pos);
2528 2534
2535 sb_start_write(inode->i_sb);
2529 mutex_lock(&inode->i_mutex); 2536 mutex_lock(&inode->i_mutex);
2530 blk_start_plug(&plug); 2537 blk_start_plug(&plug);
2531 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 2538 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
@@ -2539,6 +2546,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2539 ret = err; 2546 ret = err;
2540 } 2547 }
2541 blk_finish_plug(&plug); 2548 blk_finish_plug(&plug);
2549 sb_end_write(inode->i_sb);
2542 return ret; 2550 return ret;
2543} 2551}
2544EXPORT_SYMBOL(generic_file_aio_write); 2552EXPORT_SYMBOL(generic_file_aio_write);