aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-01-31 19:17:36 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 20:18:37 -0500
commit977fbdcd5986c9ff700bf276644d2b1973a53348 (patch)
tree312bd6dd7f6a528759dad4b2da27eaf9ff339b12 /fs/dax.c
parenta365ac09d334389bc69841c9d153f03fa2442f1c (diff)
mm: add unmap_mapping_pages()
Several users of unmap_mapping_range() would prefer to express their range in pages rather than bytes. Unfortuately, on a 32-bit kernel, you have to remember to cast your page number to a 64-bit type before shifting it, and four places in the current tree didn't remember to do that. That's a sign of a bad interface. Conveniently, unmap_mapping_range() actually converts from bytes into pages, so hoist the guts of unmap_mapping_range() into a new function unmap_mapping_pages() and convert the callers which want to use pages. Link: http://lkml.kernel.org/r/20171206142627.GD32044@bombadil.infradead.org Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Reported-by: "zhangyi (F)" <yi.zhang@huawei.com> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/fs/dax.c b/fs/dax.c
index c2ebf10b70da..6ee6f7e24f5a 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -44,6 +44,7 @@
44 44
45/* The 'colour' (ie low bits) within a PMD of a page offset. */ 45/* The 'colour' (ie low bits) within a PMD of a page offset. */
46#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) 46#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
47#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
47 48
48static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; 49static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
49 50
@@ -375,8 +376,8 @@ restart:
375 * unmapped. 376 * unmapped.
376 */ 377 */
377 if (pmd_downgrade && dax_is_zero_entry(entry)) 378 if (pmd_downgrade && dax_is_zero_entry(entry))
378 unmap_mapping_range(mapping, 379 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
379 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); 380 PG_PMD_NR, false);
380 381
381 err = radix_tree_preload( 382 err = radix_tree_preload(
382 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); 383 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
@@ -538,12 +539,10 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
538 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) { 539 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
539 /* we are replacing a zero page with block mapping */ 540 /* we are replacing a zero page with block mapping */
540 if (dax_is_pmd_entry(entry)) 541 if (dax_is_pmd_entry(entry))
541 unmap_mapping_range(mapping, 542 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
542 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, 543 PG_PMD_NR, false);
543 PMD_SIZE, 0);
544 else /* pte entry */ 544 else /* pte entry */
545 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, 545 unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
546 PAGE_SIZE, 0);
547 } 546 }
548 547
549 spin_lock_irq(&mapping->tree_lock); 548 spin_lock_irq(&mapping->tree_lock);
@@ -1269,12 +1268,6 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1269} 1268}
1270 1269
1271#ifdef CONFIG_FS_DAX_PMD 1270#ifdef CONFIG_FS_DAX_PMD
1272/*
1273 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1274 * more often than one might expect in the below functions.
1275 */
1276#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1277
1278static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap, 1271static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1279 void *entry) 1272 void *entry)
1280{ 1273{