summaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-05-12 12:29:19 -0400
committerRoss Zwisler <ross.zwisler@linux.intel.com>2016-05-19 17:27:49 -0400
commitbc2466e4257369d0ebee2b6265070d323343fa72 (patch)
treedc3c050e1b7bde8f0c93b1eb0764750f10331fed /fs/dax.c
parentac401cc782429cc8560ce4840b1405d603740917 (diff)
dax: Use radix tree entry lock to protect cow faults
When doing cow faults, we cannot directly fill in PTE as we do for other faults as we rely on generic code to do proper accounting of the cowed page. We also have no page to lock to protect against races with truncate as other faults have and we need the protection to extend until the moment generic code inserts cowed page into PTE thus at that point we have no protection of fs-specific i_mmap_sem. So far we relied on using i_mmap_lock for the protection however that is completely special to cow faults. To make fault locking more uniform use DAX entry lock instead. Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/fs/dax.c b/fs/dax.c
index f43c3d806fb6..be74635e05a6 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -478,7 +478,7 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
478 } 478 }
479} 479}
480 480
481static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index) 481void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
482{ 482{
483 void *ret, **slot; 483 void *ret, **slot;
484 484
@@ -501,7 +501,7 @@ static void put_locked_mapping_entry(struct address_space *mapping,
501 unlock_page(entry); 501 unlock_page(entry);
502 put_page(entry); 502 put_page(entry);
503 } else { 503 } else {
504 unlock_mapping_entry(mapping, index); 504 dax_unlock_mapping_entry(mapping, index);
505 } 505 }
506} 506}
507 507
@@ -884,12 +884,10 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
884 goto unlock_entry; 884 goto unlock_entry;
885 if (!radix_tree_exceptional_entry(entry)) { 885 if (!radix_tree_exceptional_entry(entry)) {
886 vmf->page = entry; 886 vmf->page = entry;
887 } else { 887 return VM_FAULT_LOCKED;
888 unlock_mapping_entry(mapping, vmf->pgoff);
889 i_mmap_lock_read(mapping);
890 vmf->page = NULL;
891 } 888 }
892 return VM_FAULT_LOCKED; 889 vmf->entry = entry;
890 return VM_FAULT_DAX_LOCKED;
893 } 891 }
894 892
895 if (!buffer_mapped(&bh)) { 893 if (!buffer_mapped(&bh)) {