diff options
| author | Jan Kara <jack@suse.cz> | 2012-06-12 10:20:37 -0400 |
|---|---|---|
| committer | Al Viro <viro@zeniv.linux.org.uk> | 2012-07-31 01:45:47 -0400 |
| commit | 14da9200140f8d722ad1767dfabadebd8b34f2ad (patch) | |
| tree | ea5d88b091999f7a64af0b9d335d7cad4c79edfb /mm | |
| parent | 5d37e9e6dec65cd21be68ee92de99686213e916b (diff) | |
fs: Protect write paths by sb_start_write - sb_end_write
There are several entry points which dirty pages in a filesystem. mmap
(handled by block_page_mkwrite()), buffered write (handled by
__generic_file_aio_write()), splice write (generic_file_splice_write),
truncate, and fallocate (these can dirty last partial page - handled inside
each filesystem separately). Protect these places with sb_start_write() and
sb_end_write().
->page_mkwrite() calls are particularly complex since they are called with
mmap_sem held and thus we cannot use standard sb_start_write() due to lock
ordering constraints. We solve the problem by using a special freeze protection
sb_start_pagefault() which ranks below mmap_sem.
BugLink: https://bugs.launchpad.net/bugs/897421
Tested-by: Kamal Mostafa <kamal@canonical.com>
Tested-by: Peter M. Petrakis <peter.petrakis@canonical.com>
Tested-by: Dann Frazier <dann.frazier@canonical.com>
Tested-by: Massimo Morana <massimo.morana@canonical.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/filemap.c | 12 | ||||
| -rw-r--r-- | mm/filemap_xip.c | 5 |
2 files changed, 13 insertions, 4 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 51efee65c2cc..fa5ca304148e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -1718,6 +1718,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 1718 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | 1718 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
| 1719 | int ret = VM_FAULT_LOCKED; | 1719 | int ret = VM_FAULT_LOCKED; |
| 1720 | 1720 | ||
| 1721 | sb_start_pagefault(inode->i_sb); | ||
| 1721 | file_update_time(vma->vm_file); | 1722 | file_update_time(vma->vm_file); |
| 1722 | lock_page(page); | 1723 | lock_page(page); |
| 1723 | if (page->mapping != inode->i_mapping) { | 1724 | if (page->mapping != inode->i_mapping) { |
| @@ -1725,7 +1726,14 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 1725 | ret = VM_FAULT_NOPAGE; | 1726 | ret = VM_FAULT_NOPAGE; |
| 1726 | goto out; | 1727 | goto out; |
| 1727 | } | 1728 | } |
| 1729 | /* | ||
| 1730 | * We mark the page dirty already here so that when freeze is in | ||
| 1731 | * progress, we are guaranteed that writeback during freezing will | ||
| 1732 | * see the dirty page and writeprotect it again. | ||
| 1733 | */ | ||
| 1734 | set_page_dirty(page); | ||
| 1728 | out: | 1735 | out: |
| 1736 | sb_end_pagefault(inode->i_sb); | ||
| 1729 | return ret; | 1737 | return ret; |
| 1730 | } | 1738 | } |
| 1731 | EXPORT_SYMBOL(filemap_page_mkwrite); | 1739 | EXPORT_SYMBOL(filemap_page_mkwrite); |
| @@ -2426,8 +2434,6 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 2426 | count = ocount; | 2434 | count = ocount; |
| 2427 | pos = *ppos; | 2435 | pos = *ppos; |
| 2428 | 2436 | ||
| 2429 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | ||
| 2430 | |||
| 2431 | /* We can write back this queue in page reclaim */ | 2437 | /* We can write back this queue in page reclaim */ |
| 2432 | current->backing_dev_info = mapping->backing_dev_info; | 2438 | current->backing_dev_info = mapping->backing_dev_info; |
| 2433 | written = 0; | 2439 | written = 0; |
| @@ -2526,6 +2532,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 2526 | 2532 | ||
| 2527 | BUG_ON(iocb->ki_pos != pos); | 2533 | BUG_ON(iocb->ki_pos != pos); |
| 2528 | 2534 | ||
| 2535 | sb_start_write(inode->i_sb); | ||
| 2529 | mutex_lock(&inode->i_mutex); | 2536 | mutex_lock(&inode->i_mutex); |
| 2530 | blk_start_plug(&plug); | 2537 | blk_start_plug(&plug); |
| 2531 | ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); | 2538 | ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); |
| @@ -2539,6 +2546,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 2539 | ret = err; | 2546 | ret = err; |
| 2540 | } | 2547 | } |
| 2541 | blk_finish_plug(&plug); | 2548 | blk_finish_plug(&plug); |
| 2549 | sb_end_write(inode->i_sb); | ||
| 2542 | return ret; | 2550 | return ret; |
| 2543 | } | 2551 | } |
| 2544 | EXPORT_SYMBOL(generic_file_aio_write); | 2552 | EXPORT_SYMBOL(generic_file_aio_write); |
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 80b34ef82dfe..13e013b1270c 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c | |||
| @@ -402,6 +402,8 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, | |||
| 402 | loff_t pos; | 402 | loff_t pos; |
| 403 | ssize_t ret; | 403 | ssize_t ret; |
| 404 | 404 | ||
| 405 | sb_start_write(inode->i_sb); | ||
| 406 | |||
| 405 | mutex_lock(&inode->i_mutex); | 407 | mutex_lock(&inode->i_mutex); |
| 406 | 408 | ||
| 407 | if (!access_ok(VERIFY_READ, buf, len)) { | 409 | if (!access_ok(VERIFY_READ, buf, len)) { |
| @@ -412,8 +414,6 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, | |||
| 412 | pos = *ppos; | 414 | pos = *ppos; |
| 413 | count = len; | 415 | count = len; |
| 414 | 416 | ||
| 415 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | ||
| 416 | |||
| 417 | /* We can write back this queue in page reclaim */ | 417 | /* We can write back this queue in page reclaim */ |
| 418 | current->backing_dev_info = mapping->backing_dev_info; | 418 | current->backing_dev_info = mapping->backing_dev_info; |
| 419 | 419 | ||
| @@ -437,6 +437,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, | |||
| 437 | current->backing_dev_info = NULL; | 437 | current->backing_dev_info = NULL; |
| 438 | out_up: | 438 | out_up: |
| 439 | mutex_unlock(&inode->i_mutex); | 439 | mutex_unlock(&inode->i_mutex); |
| 440 | sb_end_write(inode->i_sb); | ||
| 440 | return ret; | 441 | return ret; |
| 441 | } | 442 | } |
| 442 | EXPORT_SYMBOL_GPL(xip_file_write); | 443 | EXPORT_SYMBOL_GPL(xip_file_write); |
