diff options
author | Jan Kara <jack@suse.cz> | 2018-01-07 16:41:01 -0500 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2018-01-07 16:41:01 -0500 |
commit | 22446423108f3687167c9fdc080e6f21dd784d18 (patch) | |
tree | 38cad36cd241ce7ffdfd65058ebb128e643bb9b4 | |
parent | c0b24625979284dd212423320fe1c84fe244ed7f (diff) |
ext4: fix ENOSPC handling in DAX page fault handler
When allocation of underlying block for a page fault fails, we fail the
fault with SIGBUS. However we may well hit ENOSPC just due to lots of
free blocks being held by the running / committing transaction. So
propagate the error from ext4_iomap_begin() and implement do standard
allocation retry loop in ext4_dax_huge_fault().
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
-rw-r--r-- | fs/ext4/file.c | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 1c7cd882d998..fb6f023622fe 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
@@ -280,7 +280,8 @@ out: | |||
280 | static int ext4_dax_huge_fault(struct vm_fault *vmf, | 280 | static int ext4_dax_huge_fault(struct vm_fault *vmf, |
281 | enum page_entry_size pe_size) | 281 | enum page_entry_size pe_size) |
282 | { | 282 | { |
283 | int result; | 283 | int result, error = 0; |
284 | int retries = 0; | ||
284 | handle_t *handle = NULL; | 285 | handle_t *handle = NULL; |
285 | struct inode *inode = file_inode(vmf->vma->vm_file); | 286 | struct inode *inode = file_inode(vmf->vma->vm_file); |
286 | struct super_block *sb = inode->i_sb; | 287 | struct super_block *sb = inode->i_sb; |
@@ -304,6 +305,7 @@ static int ext4_dax_huge_fault(struct vm_fault *vmf, | |||
304 | sb_start_pagefault(sb); | 305 | sb_start_pagefault(sb); |
305 | file_update_time(vmf->vma->vm_file); | 306 | file_update_time(vmf->vma->vm_file); |
306 | down_read(&EXT4_I(inode)->i_mmap_sem); | 307 | down_read(&EXT4_I(inode)->i_mmap_sem); |
308 | retry: | ||
307 | handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, | 309 | handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, |
308 | EXT4_DATA_TRANS_BLOCKS(sb)); | 310 | EXT4_DATA_TRANS_BLOCKS(sb)); |
309 | if (IS_ERR(handle)) { | 311 | if (IS_ERR(handle)) { |
@@ -314,9 +316,13 @@ static int ext4_dax_huge_fault(struct vm_fault *vmf, | |||
314 | } else { | 316 | } else { |
315 | down_read(&EXT4_I(inode)->i_mmap_sem); | 317 | down_read(&EXT4_I(inode)->i_mmap_sem); |
316 | } | 318 | } |
317 | result = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &ext4_iomap_ops); | 319 | result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops); |
318 | if (write) { | 320 | if (write) { |
319 | ext4_journal_stop(handle); | 321 | ext4_journal_stop(handle); |
322 | |||
323 | if ((result & VM_FAULT_ERROR) && error == -ENOSPC && | ||
324 | ext4_should_retry_alloc(sb, &retries)) | ||
325 | goto retry; | ||
320 | /* Handling synchronous page fault? */ | 326 | /* Handling synchronous page fault? */ |
321 | if (result & VM_FAULT_NEEDDSYNC) | 327 | if (result & VM_FAULT_NEEDDSYNC) |
322 | result = dax_finish_sync_fault(vmf, pe_size, pfn); | 328 | result = dax_finish_sync_fault(vmf, pe_size, pfn); |