aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2018-01-07 16:38:43 -0500
committerTheodore Ts'o <tytso@mit.edu>2018-01-07 16:38:43 -0500
commitc0b24625979284dd212423320fe1c84fe244ed7f (patch)
tree92824567101689de2900f33446ca5db723394e48 /fs/dax.c
parentbbe45d2460da98785cb9453fb0b42d9b2e79dd99 (diff)
dax: pass detailed error code from dax_iomap_fault()
Ext4 needs to pass through error from its iomap handler to the page fault handler so that it can properly detect ENOSPC and force transaction commit and retry the fault (and block allocation). Add argument to dax_iomap_fault() for passing such error. Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 95981591977a..f3afa1d6156c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1096,7 +1096,7 @@ static bool dax_fault_is_synchronous(unsigned long flags,
1096} 1096}
1097 1097
1098static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, 1098static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1099 const struct iomap_ops *ops) 1099 int *iomap_errp, const struct iomap_ops *ops)
1100{ 1100{
1101 struct vm_area_struct *vma = vmf->vma; 1101 struct vm_area_struct *vma = vmf->vma;
1102 struct address_space *mapping = vma->vm_file->f_mapping; 1102 struct address_space *mapping = vma->vm_file->f_mapping;
@@ -1149,6 +1149,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1149 * that we never have to deal with more than a single extent here. 1149 * that we never have to deal with more than a single extent here.
1150 */ 1150 */
1151 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1151 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1152 if (iomap_errp)
1153 *iomap_errp = error;
1152 if (error) { 1154 if (error) {
1153 vmf_ret = dax_fault_return(error); 1155 vmf_ret = dax_fault_return(error);
1154 goto unlock_entry; 1156 goto unlock_entry;
@@ -1488,6 +1490,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1488 * @vmf: The description of the fault 1490 * @vmf: The description of the fault
1489 * @pe_size: Size of the page to fault in 1491 * @pe_size: Size of the page to fault in
1490 * @pfnp: PFN to insert for synchronous faults if fsync is required 1492 * @pfnp: PFN to insert for synchronous faults if fsync is required
1493 * @iomap_errp: Storage for detailed error code in case of error
1491 * @ops: Iomap ops passed from the file system 1494 * @ops: Iomap ops passed from the file system
1492 * 1495 *
1493 * When a page fault occurs, filesystems may call this helper in 1496 * When a page fault occurs, filesystems may call this helper in
@@ -1496,11 +1499,11 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1496 * successfully. 1499 * successfully.
1497 */ 1500 */
1498int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 1501int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1499 pfn_t *pfnp, const struct iomap_ops *ops) 1502 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1500{ 1503{
1501 switch (pe_size) { 1504 switch (pe_size) {
1502 case PE_SIZE_PTE: 1505 case PE_SIZE_PTE:
1503 return dax_iomap_pte_fault(vmf, pfnp, ops); 1506 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1504 case PE_SIZE_PMD: 1507 case PE_SIZE_PMD:
1505 return dax_iomap_pmd_fault(vmf, pfnp, ops); 1508 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1506 default: 1509 default: