diff options
Diffstat (limited to 'mm/truncate.c')
-rw-r--r-- | mm/truncate.c | 61 |
1 files changed, 59 insertions, 2 deletions
diff --git a/mm/truncate.c b/mm/truncate.c index 96d167372d89..f1e4d6052369 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/buffer_head.h> /* grr. try_to_release_page, | 20 | #include <linux/buffer_head.h> /* grr. try_to_release_page, |
21 | do_invalidatepage */ | 21 | do_invalidatepage */ |
22 | #include <linux/cleancache.h> | 22 | #include <linux/cleancache.h> |
23 | #include <linux/rmap.h> | ||
23 | #include "internal.h" | 24 | #include "internal.h" |
24 | 25 | ||
25 | static void clear_exceptional_entry(struct address_space *mapping, | 26 | static void clear_exceptional_entry(struct address_space *mapping, |
@@ -714,17 +715,73 @@ EXPORT_SYMBOL(truncate_pagecache); | |||
714 | * necessary) to @newsize. It will be typically be called from the filesystem's | 715 | * necessary) to @newsize. It will be typically be called from the filesystem's |
715 | * setattr function when ATTR_SIZE is passed in. | 716 | * setattr function when ATTR_SIZE is passed in. |
716 | * | 717 | * |
717 | * Must be called with inode_mutex held and before all filesystem specific | 718 | * Must be called with a lock serializing truncates and writes (generally |
718 | * block truncation has been performed. | 719 | * i_mutex but e.g. xfs uses a different lock) and before all filesystem |
720 | * specific block truncation has been performed. | ||
719 | */ | 721 | */ |
720 | void truncate_setsize(struct inode *inode, loff_t newsize) | 722 | void truncate_setsize(struct inode *inode, loff_t newsize) |
721 | { | 723 | { |
724 | loff_t oldsize = inode->i_size; | ||
725 | |||
722 | i_size_write(inode, newsize); | 726 | i_size_write(inode, newsize); |
727 | if (newsize > oldsize) | ||
728 | pagecache_isize_extended(inode, oldsize, newsize); | ||
723 | truncate_pagecache(inode, newsize); | 729 | truncate_pagecache(inode, newsize); |
724 | } | 730 | } |
725 | EXPORT_SYMBOL(truncate_setsize); | 731 | EXPORT_SYMBOL(truncate_setsize); |
726 | 732 | ||
727 | /** | 733 | /** |
734 | * pagecache_isize_extended - update pagecache after extension of i_size | ||
735 | * @inode: inode for which i_size was extended | ||
736 | * @from: original inode size | ||
737 | * @to: new inode size | ||
738 | * | ||
739 | * Handle extension of inode size either caused by extending truncate or by | ||
740 | * write starting after current i_size. We mark the page straddling current | ||
741 | * i_size RO so that page_mkwrite() is called on the nearest write access to | ||
742 | * the page. This way filesystem can be sure that page_mkwrite() is called on | ||
743 | * the page before user writes to the page via mmap after the i_size has been | ||
744 | * changed. | ||
745 | * | ||
746 | * The function must be called after i_size is updated so that page fault | ||
747 | * coming after we unlock the page will already see the new i_size. | ||
748 | * The function must be called while we still hold i_mutex - this not only | ||
749 | * makes sure i_size is stable but also that userspace cannot observe new | ||
750 | * i_size value before we are prepared to store mmap writes at new inode size. | ||
751 | */ | ||
752 | void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) | ||
753 | { | ||
754 | int bsize = 1 << inode->i_blkbits; | ||
755 | loff_t rounded_from; | ||
756 | struct page *page; | ||
757 | pgoff_t index; | ||
758 | |||
759 | WARN_ON(to > inode->i_size); | ||
760 | |||
761 | if (from >= to || bsize == PAGE_CACHE_SIZE) | ||
762 | return; | ||
763 | /* Page straddling @from will not have any hole block created? */ | ||
764 | rounded_from = round_up(from, bsize); | ||
765 | if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1))) | ||
766 | return; | ||
767 | |||
768 | index = from >> PAGE_CACHE_SHIFT; | ||
769 | page = find_lock_page(inode->i_mapping, index); | ||
770 | /* Page not cached? Nothing to do */ | ||
771 | if (!page) | ||
772 | return; | ||
773 | /* | ||
774 | * See clear_page_dirty_for_io() for details why set_page_dirty() | ||
775 | * is needed. | ||
776 | */ | ||
777 | if (page_mkclean(page)) | ||
778 | set_page_dirty(page); | ||
779 | unlock_page(page); | ||
780 | page_cache_release(page); | ||
781 | } | ||
782 | EXPORT_SYMBOL(pagecache_isize_extended); | ||
783 | |||
784 | /** | ||
728 | * truncate_pagecache_range - unmap and remove pagecache that is hole-punched | 785 | * truncate_pagecache_range - unmap and remove pagecache that is hole-punched |
729 | * @inode: inode | 786 | * @inode: inode |
730 | * @lstart: offset of beginning of hole | 787 | * @lstart: offset of beginning of hole |