diff options
author | Brian Foster <bfoster@redhat.com> | 2017-02-16 20:19:12 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-02-26 05:10:52 -0500 |
commit | d004006055974a976841c4f95e8c25ca7b5b38d5 (patch) | |
tree | 65a0c657f16e114dc32288d54ca2293721fd1815 /fs | |
parent | 57d759622aa76591958f46707edebe20c284b7d2 (diff) |
xfs: clear delalloc and cache on buffered write failure
commit fa7f138ac4c70dc00519c124cf7cd4862a0a5b0e upstream.
The buffered write failure handling code in
xfs_file_iomap_end_delalloc() has a couple minor problems. First, if
written == 0, start_fsb is not rounded down and it fails to kill off a
delalloc block if the start offset is block unaligned. This results in a
lingering delalloc block and broken delalloc block accounting detected
at unmount time. Fix this by rounding down start_fsb in the unlikely
event that written == 0.
Second, it is possible for a failed overwrite of a delalloc extent to
leave dirty pagecache around over a hole in the file. This is because is
possible to hit ->iomap_end() on write failure before the iomap code has
attempted to allocate pagecache, and thus has no need to clean it up. If
the targeted delalloc extent was successfully written by a previous
write, however, then it does still have dirty pages when ->iomap_end()
punches out the underlying blocks. This ultimately results in writeback
over a hole. To fix this problem, unconditionally punch out the
pagecache from XFS before the associated delalloc range.
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/xfs/xfs_iomap.c | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index cdc6bdd495be..e8889614cec3 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -1068,7 +1068,15 @@ xfs_file_iomap_end_delalloc( | |||
1068 | xfs_fileoff_t end_fsb; | 1068 | xfs_fileoff_t end_fsb; |
1069 | int error = 0; | 1069 | int error = 0; |
1070 | 1070 | ||
1071 | start_fsb = XFS_B_TO_FSB(mp, offset + written); | 1071 | /* |
1072 | * start_fsb refers to the first unused block after a short write. If | ||
1073 | * nothing was written, round offset down to point at the first block in | ||
1074 | * the range. | ||
1075 | */ | ||
1076 | if (unlikely(!written)) | ||
1077 | start_fsb = XFS_B_TO_FSBT(mp, offset); | ||
1078 | else | ||
1079 | start_fsb = XFS_B_TO_FSB(mp, offset + written); | ||
1072 | end_fsb = XFS_B_TO_FSB(mp, offset + length); | 1080 | end_fsb = XFS_B_TO_FSB(mp, offset + length); |
1073 | 1081 | ||
1074 | /* | 1082 | /* |
@@ -1080,6 +1088,9 @@ xfs_file_iomap_end_delalloc( | |||
1080 | * blocks in the range, they are ours. | 1088 | * blocks in the range, they are ours. |
1081 | */ | 1089 | */ |
1082 | if (start_fsb < end_fsb) { | 1090 | if (start_fsb < end_fsb) { |
1091 | truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb), | ||
1092 | XFS_FSB_TO_B(mp, end_fsb) - 1); | ||
1093 | |||
1083 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 1094 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
1084 | error = xfs_bmap_punch_delalloc_range(ip, start_fsb, | 1095 | error = xfs_bmap_punch_delalloc_range(ip, start_fsb, |
1085 | end_fsb - start_fsb); | 1096 | end_fsb - start_fsb); |