aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-10-19 08:48:38 -0400
committerDan Williams <dan.j.williams@intel.com>2016-12-26 23:29:25 -0500
commitf449b936f1aff7696b24a338f493d5cee8d48d55 (patch)
tree5eb066bfc0c39f9485c83a7c28da77533fef4e11
parente3fce68cdbed297d927e993b3ea7b8b1cee545da (diff)
dax: Finish fault completely when loading holes
The only case when we do not finish the page fault completely is when we are loading hole pages into a radix tree. Avoid this special case and finish the fault in that case as well inside the DAX fault handler. It will allow us for easier iomap handling. Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--fs/dax.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 08e15db28b79..bfec6f2ef613 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -539,15 +539,16 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
539 * otherwise it will simply fall out of the page cache under memory 539 * otherwise it will simply fall out of the page cache under memory
540 * pressure without ever having been dirtied. 540 * pressure without ever having been dirtied.
541 */ 541 */
542static int dax_load_hole(struct address_space *mapping, void *entry, 542static int dax_load_hole(struct address_space *mapping, void **entry,
543 struct vm_fault *vmf) 543 struct vm_fault *vmf)
544{ 544{
545 struct page *page; 545 struct page *page;
546 int ret;
546 547
547 /* Hole page already exists? Return it... */ 548 /* Hole page already exists? Return it... */
548 if (!radix_tree_exceptional_entry(entry)) { 549 if (!radix_tree_exceptional_entry(*entry)) {
549 vmf->page = entry; 550 page = *entry;
550 return VM_FAULT_LOCKED; 551 goto out;
551 } 552 }
552 553
553 /* This will replace locked radix tree entry with a hole page */ 554 /* This will replace locked radix tree entry with a hole page */
@@ -555,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
555 vmf->gfp_mask | __GFP_ZERO); 556 vmf->gfp_mask | __GFP_ZERO);
556 if (!page) 557 if (!page)
557 return VM_FAULT_OOM; 558 return VM_FAULT_OOM;
559 out:
558 vmf->page = page; 560 vmf->page = page;
559 return VM_FAULT_LOCKED; 561 ret = finish_fault(vmf);
562 vmf->page = NULL;
563 *entry = page;
564 if (!ret) {
565 /* Grab reference for PTE that is now referencing the page */
566 get_page(page);
567 return VM_FAULT_NOPAGE;
568 }
569 return ret;
560} 570}
561 571
562static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size, 572static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
@@ -1163,8 +1173,8 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
1163 case IOMAP_UNWRITTEN: 1173 case IOMAP_UNWRITTEN:
1164 case IOMAP_HOLE: 1174 case IOMAP_HOLE:
1165 if (!(vmf->flags & FAULT_FLAG_WRITE)) { 1175 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1166 vmf_ret = dax_load_hole(mapping, entry, vmf); 1176 vmf_ret = dax_load_hole(mapping, &entry, vmf);
1167 break; 1177 goto finish_iomap;
1168 } 1178 }
1169 /*FALLTHRU*/ 1179 /*FALLTHRU*/
1170 default: 1180 default:
@@ -1185,8 +1195,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
1185 } 1195 }
1186 } 1196 }
1187 unlock_entry: 1197 unlock_entry:
1188 if (vmf_ret != VM_FAULT_LOCKED || error) 1198 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1189 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1190 out: 1199 out:
1191 if (error == -ENOMEM) 1200 if (error == -ENOMEM)
1192 return VM_FAULT_OOM | major; 1201 return VM_FAULT_OOM | major;