aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
authorMatthew Wilcox <mawilcox@microsoft.com>2018-06-02 22:39:37 -0400
committerDan Williams <dan.j.williams@intel.com>2018-06-02 22:39:37 -0400
commitcc4a90ac816e00775fbc2a9c018bf2af606abd06 (patch)
tree4f3aa285bcb4468655be73035c655e01284e869c /fs/dax.c
parentd6dc57e251a43c428a9ee3adb7665543a1a584f0 (diff)
dax: dax_insert_mapping_entry always succeeds
It does not return an error, so we don't need to check the return value for IS_ERR(). Indeed, it is a bug to do so; with a sufficiently large PFN, a legitimate DAX entry may be mistaken for an error return. Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/fs/dax.c b/fs/dax.c
index e8f61ea690f7..31e9f51ac917 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1009,7 +1009,6 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
1009 unsigned long vaddr = vmf->address; 1009 unsigned long vaddr = vmf->address;
1010 int ret = VM_FAULT_NOPAGE; 1010 int ret = VM_FAULT_NOPAGE;
1011 struct page *zero_page; 1011 struct page *zero_page;
1012 void *entry2;
1013 pfn_t pfn; 1012 pfn_t pfn;
1014 1013
1015 zero_page = ZERO_PAGE(0); 1014 zero_page = ZERO_PAGE(0);
@@ -1019,13 +1018,8 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
1019 } 1018 }
1020 1019
1021 pfn = page_to_pfn_t(zero_page); 1020 pfn = page_to_pfn_t(zero_page);
1022 entry2 = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1021 dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
1023 RADIX_DAX_ZERO_PAGE, false); 1022 false);
1024 if (IS_ERR(entry2)) {
1025 ret = VM_FAULT_SIGBUS;
1026 goto out;
1027 }
1028
1029 vm_insert_mixed(vmf->vma, vaddr, pfn); 1023 vm_insert_mixed(vmf->vma, vaddr, pfn);
1030out: 1024out:
1031 trace_dax_load_hole(inode, vmf, ret); 1025 trace_dax_load_hole(inode, vmf, ret);
@@ -1337,10 +1331,6 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1337 1331
1338 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1332 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1339 0, write && !sync); 1333 0, write && !sync);
1340 if (IS_ERR(entry)) {
1341 error = PTR_ERR(entry);
1342 goto error_finish_iomap;
1343 }
1344 1334
1345 /* 1335 /*
1346 * If we are doing synchronous page fault and inode needs fsync, 1336 * If we are doing synchronous page fault and inode needs fsync,
@@ -1424,8 +1414,6 @@ static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1424 pfn = page_to_pfn_t(zero_page); 1414 pfn = page_to_pfn_t(zero_page);
1425 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1415 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1426 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false); 1416 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1427 if (IS_ERR(ret))
1428 goto fallback;
1429 1417
1430 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1418 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1431 if (!pmd_none(*(vmf->pmd))) { 1419 if (!pmd_none(*(vmf->pmd))) {
@@ -1547,8 +1535,6 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1547 1535
1548 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, 1536 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1549 RADIX_DAX_PMD, write && !sync); 1537 RADIX_DAX_PMD, write && !sync);
1550 if (IS_ERR(entry))
1551 goto finish_iomap;
1552 1538
1553 /* 1539 /*
1554 * If we are doing synchronous page fault and inode needs fsync, 1540 * If we are doing synchronous page fault and inode needs fsync,