aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
authorRoss Zwisler <ross.zwisler@linux.intel.com>2017-05-12 18:46:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-12 18:57:15 -0400
commit4636e70bb0a8b871998b6841a2e4b205cf2bc863 (patch)
treea1c8ed75c08995acbb94320782b5bc63d3cc0e51 /fs/dax.c
parentcea582247a0aea78b4674c32833c10b8820fcdbe (diff)
dax: prevent invalidation of mapped DAX entries
Patch series "mm,dax: Fix data corruption due to mmap inconsistency", v4. This series fixes data corruption that can happen for DAX mounts when page faults race with write(2) and as a result page tables get out of sync with block mappings in the filesystem and thus data seen through mmap is different from data seen through read(2). The series passes testing with t_mmap_stale test program from Ross and also other mmap related tests on DAX filesystem. This patch (of 4): dax_invalidate_mapping_entry() currently removes DAX exceptional entries only if they are clean and unlocked. This is done via: invalidate_mapping_pages() invalidate_exceptional_entry() dax_invalidate_mapping_entry() However, for page cache pages removed in invalidate_mapping_pages() there is an additional criteria which is that the page must not be mapped. This is noted in the comments above invalidate_mapping_pages() and is checked in invalidate_inode_page(). For DAX entries this means that we can can end up in a situation where a DAX exceptional entry, either a huge zero page or a regular DAX entry, could end up mapped but without an associated radix tree entry. This is inconsistent with the rest of the DAX code and with what happens in the page cache case. We aren't able to unmap the DAX exceptional entry because according to its comments invalidate_mapping_pages() isn't allowed to block, and unmap_mapping_range() takes a write lock on the mapping->i_mmap_rwsem. Since we essentially never have unmapped DAX entries to evict from the radix tree, just remove dax_invalidate_mapping_entry(). Fixes: c6dcf52c23d2 ("mm: Invalidate DAX radix tree entries only if appropriate") Link: http://lkml.kernel.org/r/20170510085419.27601-2-jack@suse.cz Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Reported-by: Jan Kara <jack@suse.cz> Cc: Dan Williams <dan.j.williams@intel.com> Cc: <stable@vger.kernel.org> [4.10+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c29
1 files changed, 0 insertions, 29 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 66d79067eedf..38deebb8c86e 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -461,35 +461,6 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
461} 461}
462 462
463/* 463/*
464 * Invalidate exceptional DAX entry if easily possible. This handles DAX
465 * entries for invalidate_inode_pages() so we evict the entry only if we can
466 * do so without blocking.
467 */
468int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
469{
470 int ret = 0;
471 void *entry, **slot;
472 struct radix_tree_root *page_tree = &mapping->page_tree;
473
474 spin_lock_irq(&mapping->tree_lock);
475 entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
476 if (!entry || !radix_tree_exceptional_entry(entry) ||
477 slot_locked(mapping, slot))
478 goto out;
479 if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
480 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
481 goto out;
482 radix_tree_delete(page_tree, index);
483 mapping->nrexceptional--;
484 ret = 1;
485out:
486 spin_unlock_irq(&mapping->tree_lock);
487 if (ret)
488 dax_wake_mapping_entry_waiter(mapping, index, entry, true);
489 return ret;
490}
491
492/*
493 * Invalidate exceptional DAX entry if it is clean. 464 * Invalidate exceptional DAX entry if it is clean.
494 */ 465 */
495int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 466int dax_invalidate_mapping_entry_sync(struct address_space *mapping,