aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_bmap_util.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_bmap_util.c')
-rw-r--r--fs/xfs/xfs_bmap_util.c72
1 files changed, 20 insertions, 52 deletions
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 92e8f99a5857..281002689d64 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1338,7 +1338,10 @@ xfs_free_file_space(
1338 goto out; 1338 goto out;
1339} 1339}
1340 1340
1341 1341/*
1342 * Preallocate and zero a range of a file. This mechanism has the allocation
1343 * semantics of fallocate and in addition converts data in the range to zeroes.
1344 */
1342int 1345int
1343xfs_zero_file_space( 1346xfs_zero_file_space(
1344 struct xfs_inode *ip, 1347 struct xfs_inode *ip,
@@ -1346,65 +1349,30 @@ xfs_zero_file_space(
1346 xfs_off_t len) 1349 xfs_off_t len)
1347{ 1350{
1348 struct xfs_mount *mp = ip->i_mount; 1351 struct xfs_mount *mp = ip->i_mount;
1349 uint granularity; 1352 uint blksize;
1350 xfs_off_t start_boundary;
1351 xfs_off_t end_boundary;
1352 int error; 1353 int error;
1353 1354
1354 trace_xfs_zero_file_space(ip); 1355 trace_xfs_zero_file_space(ip);
1355 1356
1356 granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 1357 blksize = 1 << mp->m_sb.sb_blocklog;
1357 1358
1358 /* 1359 /*
1359 * Round the range of extents we are going to convert inwards. If the 1360 * Punch a hole and prealloc the range. We use hole punch rather than
1360 * offset is aligned, then it doesn't get changed so we zero from the 1361 * unwritten extent conversion for two reasons:
1361 * start of the block offset points to. 1362 *
1363 * 1.) Hole punch handles partial block zeroing for us.
1364 *
1365 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1366 * by virtue of the hole punch.
1362 */ 1367 */
1363 start_boundary = round_up(offset, granularity); 1368 error = xfs_free_file_space(ip, offset, len);
1364 end_boundary = round_down(offset + len, granularity); 1369 if (error)
1365 1370 goto out;
1366 ASSERT(start_boundary >= offset);
1367 ASSERT(end_boundary <= offset + len);
1368
1369 if (start_boundary < end_boundary - 1) {
1370 /*
1371 * Writeback the range to ensure any inode size updates due to
1372 * appending writes make it to disk (otherwise we could just
1373 * punch out the delalloc blocks).
1374 */
1375 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1376 start_boundary, end_boundary - 1);
1377 if (error)
1378 goto out;
1379 truncate_pagecache_range(VFS_I(ip), start_boundary,
1380 end_boundary - 1);
1381
1382 /* convert the blocks */
1383 error = xfs_alloc_file_space(ip, start_boundary,
1384 end_boundary - start_boundary - 1,
1385 XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
1386 if (error)
1387 goto out;
1388
1389 /* We've handled the interior of the range, now for the edges */
1390 if (start_boundary != offset) {
1391 error = xfs_iozero(ip, offset, start_boundary - offset);
1392 if (error)
1393 goto out;
1394 }
1395
1396 if (end_boundary != offset + len)
1397 error = xfs_iozero(ip, end_boundary,
1398 offset + len - end_boundary);
1399
1400 } else {
1401 /*
1402 * It's either a sub-granularity range or the range spanned lies
1403 * partially across two adjacent blocks.
1404 */
1405 error = xfs_iozero(ip, offset, len);
1406 }
1407 1371
1372 error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1373 round_up(offset + len, blksize) -
1374 round_down(offset, blksize),
1375 XFS_BMAPI_PREALLOC);
1408out: 1376out:
1409 return error; 1377 return error;
1410 1378