aboutsummaryrefslogtreecommitdiffstats
path: root/mm/truncate.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-01-31 19:17:36 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 20:18:37 -0500
commit977fbdcd5986c9ff700bf276644d2b1973a53348 (patch)
tree312bd6dd7f6a528759dad4b2da27eaf9ff339b12 /mm/truncate.c
parenta365ac09d334389bc69841c9d153f03fa2442f1c (diff)
mm: add unmap_mapping_pages()
Several users of unmap_mapping_range() would prefer to express their range in pages rather than bytes. Unfortuately, on a 32-bit kernel, you have to remember to cast your page number to a 64-bit type before shifting it, and four places in the current tree didn't remember to do that. That's a sign of a bad interface. Conveniently, unmap_mapping_range() actually converts from bytes into pages, so hoist the guts of unmap_mapping_range() into a new function unmap_mapping_pages() and convert the callers which want to use pages. Link: http://lkml.kernel.org/r/20171206142627.GD32044@bombadil.infradead.org Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Reported-by: "zhangyi (F)" <yi.zhang@huawei.com> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c23
1 files changed, 7 insertions, 16 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index e4b4cf0f4070..c34e2fd4f583 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -179,12 +179,8 @@ static void
179truncate_cleanup_page(struct address_space *mapping, struct page *page) 179truncate_cleanup_page(struct address_space *mapping, struct page *page)
180{ 180{
181 if (page_mapped(page)) { 181 if (page_mapped(page)) {
182 loff_t holelen; 182 pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
183 183 unmap_mapping_pages(mapping, page->index, nr, false);
184 holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
185 unmap_mapping_range(mapping,
186 (loff_t)page->index << PAGE_SHIFT,
187 holelen, 0);
188 } 184 }
189 185
190 if (page_has_private(page)) 186 if (page_has_private(page))
@@ -715,19 +711,15 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
715 /* 711 /*
716 * Zap the rest of the file in one hit. 712 * Zap the rest of the file in one hit.
717 */ 713 */
718 unmap_mapping_range(mapping, 714 unmap_mapping_pages(mapping, index,
719 (loff_t)index << PAGE_SHIFT, 715 (1 + end - index), false);
720 (loff_t)(1 + end - index)
721 << PAGE_SHIFT,
722 0);
723 did_range_unmap = 1; 716 did_range_unmap = 1;
724 } else { 717 } else {
725 /* 718 /*
726 * Just zap this page 719 * Just zap this page
727 */ 720 */
728 unmap_mapping_range(mapping, 721 unmap_mapping_pages(mapping, index,
729 (loff_t)index << PAGE_SHIFT, 722 1, false);
730 PAGE_SIZE, 0);
731 } 723 }
732 } 724 }
733 BUG_ON(page_mapped(page)); 725 BUG_ON(page_mapped(page));
@@ -753,8 +745,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
753 * get remapped later. 745 * get remapped later.
754 */ 746 */
755 if (dax_mapping(mapping)) { 747 if (dax_mapping(mapping)) {
756 unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT, 748 unmap_mapping_pages(mapping, start, end - start + 1, false);
757 (loff_t)(end - start + 1) << PAGE_SHIFT, 0);
758 } 749 }
759out: 750out:
760 cleancache_invalidate_inode(mapping); 751 cleancache_invalidate_inode(mapping);