aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-08-01 13:26:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-08-01 13:26:23 -0400
commita0e881b7c189fa2bd76c024dbff91e79511c971d (patch)
tree0c801918565b08921d21aceee5b326f64d998f5f /mm
parenteff0d13f3823f35d70228cd151d2a2c89288ff32 (diff)
parentdbc6e0222d79e78925fe20733844a796a4b72cf9 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull second vfs pile from Al Viro: "The stuff in there: fsfreeze deadlock fixes by Jan (essentially, the deadlock reproduced by xfstests 068), symlink and hardlink restriction patches, plus assorted cleanups and fixes. Note that another fsfreeze deadlock (emergency thaw one) is *not* dealt with - the series by Fernando conflicts a lot with Jan's, breaks userland ABI (FIFREEZE semantics gets changed) and trades the deadlock for massive vfsmount leak; this is going to be handled next cycle. There probably will be another pull request, but that stuff won't be in it." Fix up trivial conflicts due to unrelated changes next to each other in drivers/{staging/gdm72xx/usb_boot.c, usb/gadget/storage_common.c} * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (54 commits) delousing target_core_file a bit Documentation: Correct s_umount state for freeze_fs/unfreeze_fs fs: Remove old freezing mechanism ext2: Implement freezing btrfs: Convert to new freezing mechanism nilfs2: Convert to new freezing mechanism ntfs: Convert to new freezing mechanism fuse: Convert to new freezing mechanism gfs2: Convert to new freezing mechanism ocfs2: Convert to new freezing mechanism xfs: Convert to new freezing code ext4: Convert to new freezing mechanism fs: Protect write paths by sb_start_write - sb_end_write fs: Skip atime update on frozen filesystem fs: Add freezing handling to mnt_want_write() / mnt_drop_write() fs: Improve filesystem freezing handling switch the protection of percpu_counter list to spinlock nfsd: Push mnt_want_write() outside of i_mutex btrfs: Push mnt_want_write() outside of i_mutex fat: Push mnt_want_write() outside of i_mutex ...
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c31
-rw-r--r--mm/filemap_xip.c6
-rw-r--r--mm/memory.c14
3 files changed, 40 insertions, 11 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index a4a5260b0279..fa5ca304148e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1712,8 +1712,35 @@ page_not_uptodate:
1712} 1712}
1713EXPORT_SYMBOL(filemap_fault); 1713EXPORT_SYMBOL(filemap_fault);
1714 1714
1715int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1716{
1717 struct page *page = vmf->page;
1718 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1719 int ret = VM_FAULT_LOCKED;
1720
1721 sb_start_pagefault(inode->i_sb);
1722 file_update_time(vma->vm_file);
1723 lock_page(page);
1724 if (page->mapping != inode->i_mapping) {
1725 unlock_page(page);
1726 ret = VM_FAULT_NOPAGE;
1727 goto out;
1728 }
1729 /*
1730 * We mark the page dirty already here so that when freeze is in
1731 * progress, we are guaranteed that writeback during freezing will
1732 * see the dirty page and writeprotect it again.
1733 */
1734 set_page_dirty(page);
1735out:
1736 sb_end_pagefault(inode->i_sb);
1737 return ret;
1738}
1739EXPORT_SYMBOL(filemap_page_mkwrite);
1740
1715const struct vm_operations_struct generic_file_vm_ops = { 1741const struct vm_operations_struct generic_file_vm_ops = {
1716 .fault = filemap_fault, 1742 .fault = filemap_fault,
1743 .page_mkwrite = filemap_page_mkwrite,
1717}; 1744};
1718 1745
1719/* This is used for a general mmap of a disk file */ 1746/* This is used for a general mmap of a disk file */
@@ -2407,8 +2434,6 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2407 count = ocount; 2434 count = ocount;
2408 pos = *ppos; 2435 pos = *ppos;
2409 2436
2410 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2411
2412 /* We can write back this queue in page reclaim */ 2437 /* We can write back this queue in page reclaim */
2413 current->backing_dev_info = mapping->backing_dev_info; 2438 current->backing_dev_info = mapping->backing_dev_info;
2414 written = 0; 2439 written = 0;
@@ -2507,6 +2532,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2507 2532
2508 BUG_ON(iocb->ki_pos != pos); 2533 BUG_ON(iocb->ki_pos != pos);
2509 2534
2535 sb_start_write(inode->i_sb);
2510 mutex_lock(&inode->i_mutex); 2536 mutex_lock(&inode->i_mutex);
2511 blk_start_plug(&plug); 2537 blk_start_plug(&plug);
2512 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 2538 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
@@ -2520,6 +2546,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2520 ret = err; 2546 ret = err;
2521 } 2547 }
2522 blk_finish_plug(&plug); 2548 blk_finish_plug(&plug);
2549 sb_end_write(inode->i_sb);
2523 return ret; 2550 return ret;
2524} 2551}
2525EXPORT_SYMBOL(generic_file_aio_write); 2552EXPORT_SYMBOL(generic_file_aio_write);
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 213ca1f53409..13e013b1270c 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -304,6 +304,7 @@ out:
304 304
305static const struct vm_operations_struct xip_file_vm_ops = { 305static const struct vm_operations_struct xip_file_vm_ops = {
306 .fault = xip_file_fault, 306 .fault = xip_file_fault,
307 .page_mkwrite = filemap_page_mkwrite,
307}; 308};
308 309
309int xip_file_mmap(struct file * file, struct vm_area_struct * vma) 310int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
@@ -401,6 +402,8 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
401 loff_t pos; 402 loff_t pos;
402 ssize_t ret; 403 ssize_t ret;
403 404
405 sb_start_write(inode->i_sb);
406
404 mutex_lock(&inode->i_mutex); 407 mutex_lock(&inode->i_mutex);
405 408
406 if (!access_ok(VERIFY_READ, buf, len)) { 409 if (!access_ok(VERIFY_READ, buf, len)) {
@@ -411,8 +414,6 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
411 pos = *ppos; 414 pos = *ppos;
412 count = len; 415 count = len;
413 416
414 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
415
416 /* We can write back this queue in page reclaim */ 417 /* We can write back this queue in page reclaim */
417 current->backing_dev_info = mapping->backing_dev_info; 418 current->backing_dev_info = mapping->backing_dev_info;
418 419
@@ -436,6 +437,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
436 current->backing_dev_info = NULL; 437 current->backing_dev_info = NULL;
437 out_up: 438 out_up:
438 mutex_unlock(&inode->i_mutex); 439 mutex_unlock(&inode->i_mutex);
440 sb_end_write(inode->i_sb);
439 return ret; 441 return ret;
440} 442}
441EXPORT_SYMBOL_GPL(xip_file_write); 443EXPORT_SYMBOL_GPL(xip_file_write);
diff --git a/mm/memory.c b/mm/memory.c
index 482f089765ff..57361708d1a5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2650,6 +2650,9 @@ reuse:
2650 if (!page_mkwrite) { 2650 if (!page_mkwrite) {
2651 wait_on_page_locked(dirty_page); 2651 wait_on_page_locked(dirty_page);
2652 set_page_dirty_balance(dirty_page, page_mkwrite); 2652 set_page_dirty_balance(dirty_page, page_mkwrite);
2653 /* file_update_time outside page_lock */
2654 if (vma->vm_file)
2655 file_update_time(vma->vm_file);
2653 } 2656 }
2654 put_page(dirty_page); 2657 put_page(dirty_page);
2655 if (page_mkwrite) { 2658 if (page_mkwrite) {
@@ -2667,10 +2670,6 @@ reuse:
2667 } 2670 }
2668 } 2671 }
2669 2672
2670 /* file_update_time outside page_lock */
2671 if (vma->vm_file)
2672 file_update_time(vma->vm_file);
2673
2674 return ret; 2673 return ret;
2675 } 2674 }
2676 2675
@@ -3339,12 +3338,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3339 3338
3340 if (dirty_page) { 3339 if (dirty_page) {
3341 struct address_space *mapping = page->mapping; 3340 struct address_space *mapping = page->mapping;
3341 int dirtied = 0;
3342 3342
3343 if (set_page_dirty(dirty_page)) 3343 if (set_page_dirty(dirty_page))
3344 page_mkwrite = 1; 3344 dirtied = 1;
3345 unlock_page(dirty_page); 3345 unlock_page(dirty_page);
3346 put_page(dirty_page); 3346 put_page(dirty_page);
3347 if (page_mkwrite && mapping) { 3347 if ((dirtied || page_mkwrite) && mapping) {
3348 /* 3348 /*
3349 * Some device drivers do not set page.mapping but still 3349 * Some device drivers do not set page.mapping but still
3350 * dirty their pages 3350 * dirty their pages
@@ -3353,7 +3353,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3353 } 3353 }
3354 3354
3355 /* file_update_time outside page_lock */ 3355 /* file_update_time outside page_lock */
3356 if (vma->vm_file) 3356 if (vma->vm_file && !page_mkwrite)
3357 file_update_time(vma->vm_file); 3357 file_update_time(vma->vm_file);
3358 } else { 3358 } else {
3359 unlock_page(vmf.page); 3359 unlock_page(vmf.page);