aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_reflink.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-02-06 13:50:49 -0500
committerDarrick J. Wong <darrick.wong@oracle.com>2017-02-06 20:47:47 -0500
commita14234c72bf41ac96bc8c98e96e2c84b6d4bd4f2 (patch)
tree711d321fd0656ef512522b0293a12b836d6afecd /fs/xfs/xfs_reflink.c
parentdcf9585a7511147c7ffd580be8580dd39bc52fb6 (diff)
xfs: go straight to real allocations for direct I/O COW writes
When we allocate COW fork blocks for direct I/O writes we currently first create a delayed allocation, and then convert it to a real allocation once we've got the delayed one. As there is no good reason for that this patch instead makes use call xfs_bmapi_write from the COW allocation path. The only interesting bits are a few tweaks the low-level allocator to allow for this, most notably the need to remove the call to xfs_bmap_extsize_align for the cowextsize in xfs_bmap_btalloc - for the existing convert case it's a no-op, but for the direct allocation case it would blow up our block reservation way beyond what we reserved for the transaction. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs/xfs/xfs_reflink.c')
-rw-r--r--fs/xfs/xfs_reflink.c94
1 files changed, 66 insertions, 28 deletions
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 219bc96bfc71..9bba084c1436 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -389,62 +389,100 @@ __xfs_reflink_allocate_cow(
389 xfs_fileoff_t end_fsb) 389 xfs_fileoff_t end_fsb)
390{ 390{
391 struct xfs_mount *mp = ip->i_mount; 391 struct xfs_mount *mp = ip->i_mount;
392 struct xfs_bmbt_irec imap; 392 struct xfs_bmbt_irec imap, got;
393 struct xfs_defer_ops dfops; 393 struct xfs_defer_ops dfops;
394 struct xfs_trans *tp; 394 struct xfs_trans *tp;
395 xfs_fsblock_t first_block; 395 xfs_fsblock_t first_block;
396 int nimaps = 1, error; 396 int nimaps, error, lockmode;
397 bool shared; 397 bool shared, trimmed;
398 xfs_filblks_t resaligned;
399 xfs_extlen_t resblks;
400 xfs_extnum_t idx;
398 401
399 xfs_defer_init(&dfops, &first_block); 402 resaligned = xfs_aligned_fsb_count(*offset_fsb, end_fsb - *offset_fsb,
403 xfs_get_cowextsz_hint(ip));
404 resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
400 405
401 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 406 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
402 XFS_TRANS_RESERVE, &tp);
403 if (error) 407 if (error)
404 return error; 408 return error;
405 409
406 xfs_ilock(ip, XFS_ILOCK_EXCL); 410 lockmode = XFS_ILOCK_EXCL;
411 xfs_ilock(ip, lockmode);
407 412
408 /* Read extent from the source file. */ 413 /*
409 nimaps = 1; 414 * Even if the extent is not shared we might have a preallocation for
410 error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb, 415 * it in the COW fork. If so use it.
411 &imap, &nimaps, 0); 416 */
412 if (error) 417 if (xfs_iext_lookup_extent(ip, ip->i_cowfp, *offset_fsb, &idx, &got) &&
413 goto out_unlock; 418 got.br_startoff <= *offset_fsb) {
414 ASSERT(nimaps == 1); 419 /* If we have a real allocation in the COW fork we're done. */
420 if (!isnullstartblock(got.br_startblock)) {
421 xfs_trim_extent(&got, *offset_fsb,
422 end_fsb - *offset_fsb);
423 *offset_fsb = got.br_startoff + got.br_blockcount;
424 goto out_trans_cancel;
425 }
426 } else {
427 nimaps = 1;
428 error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb,
429 &imap, &nimaps, 0);
430 if (error)
431 goto out_trans_cancel;
432 ASSERT(nimaps == 1);
433
434 /* Trim the mapping to the nearest shared extent boundary. */
435 error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
436 &trimmed);
437 if (error)
438 goto out_trans_cancel;
439
440 if (!shared) {
441 *offset_fsb = imap.br_startoff + imap.br_blockcount;
442 goto out_trans_cancel;
443 }
415 444
416 /* Make sure there's a CoW reservation for it. */ 445 *offset_fsb = imap.br_startoff;
417 error = xfs_reflink_reserve_cow(ip, &imap, &shared); 446 end_fsb = imap.br_startoff + imap.br_blockcount;
447 }
448
449 error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
450 XFS_QMOPT_RES_REGBLKS);
418 if (error) 451 if (error)
419 goto out_trans_cancel; 452 goto out_trans_cancel;
420 453
421 if (!shared) { 454 xfs_trans_ijoin(tp, ip, 0);
422 *offset_fsb = imap.br_startoff + imap.br_blockcount; 455
423 goto out_trans_cancel; 456 xfs_defer_init(&dfops, &first_block);
424 } 457 nimaps = 1;
425 458
426 /* Allocate the entire reservation as unwritten blocks. */ 459 /* Allocate the entire reservation as unwritten blocks. */
427 xfs_trans_ijoin(tp, ip, 0); 460 error = xfs_bmapi_write(tp, ip, *offset_fsb, end_fsb - *offset_fsb,
428 error = xfs_bmapi_write(tp, ip, imap.br_startoff, imap.br_blockcount,
429 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, &first_block, 461 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, &first_block,
430 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 462 resblks, &imap, &nimaps, &dfops);
431 &imap, &nimaps, &dfops);
432 if (error) 463 if (error)
433 goto out_trans_cancel; 464 goto out_bmap_cancel;
434 465
435 /* Finish up. */ 466 /* Finish up. */
436 error = xfs_defer_finish(&tp, &dfops, NULL); 467 error = xfs_defer_finish(&tp, &dfops, NULL);
437 if (error) 468 if (error)
438 goto out_trans_cancel; 469 goto out_bmap_cancel;
439 470
440 error = xfs_trans_commit(tp); 471 error = xfs_trans_commit(tp);
472 if (error)
473 goto out_unlock;
441 474
442 *offset_fsb = imap.br_startoff + imap.br_blockcount; 475 *offset_fsb = imap.br_startoff + imap.br_blockcount;
476
443out_unlock: 477out_unlock:
444 xfs_iunlock(ip, XFS_ILOCK_EXCL); 478 xfs_iunlock(ip, lockmode);
445 return error; 479 return error;
446out_trans_cancel: 480
481out_bmap_cancel:
447 xfs_defer_cancel(&dfops); 482 xfs_defer_cancel(&dfops);
483 xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
484 XFS_QMOPT_RES_REGBLKS);
485out_trans_cancel:
448 xfs_trans_cancel(tp); 486 xfs_trans_cancel(tp);
449 goto out_unlock; 487 goto out_unlock;
450} 488}