summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-02-15 11:02:49 -0500
committerDarrick J. Wong <darrick.wong@oracle.com>2019-02-17 14:55:54 -0500
commit4ad765edb02a5333ce2fade642f116a67a3370ca (patch)
treef05f756dd7f11aba4a52217965895a0581528d2d
parent125851ac92d62b966df851c6f34147121020af2f (diff)
xfs: move xfs_iomap_write_allocate to xfs_aops.c
This function is a small wrapper only used by the writeback code, so move it together with the writeback code and simplify it down to the glorified do { } while loop that is now is. A few bits intentionally got lost here: no need to call xfs_qm_dqattach because quotas are always attached when we create the delalloc reservation, and no need for the imap->br_startblock == 0 check given that xfs_bmapi_convert_delalloc already has a WARN_ON_ONCE for exactly that condition. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
-rw-r--r--fs/xfs/xfs_aops.c51
-rw-r--r--fs/xfs/xfs_iomap.c81
-rw-r--r--fs/xfs/xfs_iomap.h2
3 files changed, 45 insertions, 89 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 8bfb62d8776f..42017ecf78ed 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -329,6 +329,38 @@ xfs_imap_valid(
329 return true; 329 return true;
330} 330}
331 331
332/*
333 * Pass in a dellalloc extent and convert it to real extents, return the real
334 * extent that maps offset_fsb in wpc->imap.
335 *
336 * The current page is held locked so nothing could have removed the block
337 * backing offset_fsb.
338 */
339static int
340xfs_convert_blocks(
341 struct xfs_writepage_ctx *wpc,
342 struct xfs_inode *ip,
343 xfs_fileoff_t offset_fsb)
344{
345 int error;
346
347 /*
348 * Attempt to allocate whatever delalloc extent currently backs
349 * offset_fsb and put the result into wpc->imap. Allocate in a loop
350 * because it may take several attempts to allocate real blocks for a
351 * contiguous delalloc extent if free space is sufficiently fragmented.
352 */
353 do {
354 error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset_fsb,
355 &wpc->imap, wpc->fork == XFS_COW_FORK ?
356 &wpc->cow_seq : &wpc->data_seq);
357 if (error)
358 return error;
359 } while (wpc->imap.br_startoff + wpc->imap.br_blockcount <= offset_fsb);
360
361 return 0;
362}
363
332STATIC int 364STATIC int
333xfs_map_blocks( 365xfs_map_blocks(
334 struct xfs_writepage_ctx *wpc, 366 struct xfs_writepage_ctx *wpc,
@@ -458,14 +490,21 @@ xfs_map_blocks(
458 trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap); 490 trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
459 return 0; 491 return 0;
460allocate_blocks: 492allocate_blocks:
461 error = xfs_iomap_write_allocate(ip, wpc->fork, offset, &imap, 493 error = xfs_convert_blocks(wpc, ip, offset_fsb);
462 wpc->fork == XFS_COW_FORK ?
463 &wpc->cow_seq : &wpc->data_seq);
464 if (error) 494 if (error)
465 return error; 495 return error;
466 ASSERT(wpc->fork == XFS_COW_FORK || cow_fsb == NULLFILEOFF || 496
467 imap.br_startoff + imap.br_blockcount <= cow_fsb); 497 /*
468 wpc->imap = imap; 498 * Due to merging the return real extent might be larger than the
499 * original delalloc one. Trim the return extent to the next COW
500 * boundary again to force a re-lookup.
501 */
502 if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF &&
503 cow_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount)
504 wpc->imap.br_blockcount = cow_fsb - wpc->imap.br_startoff;
505
506 ASSERT(wpc->imap.br_startoff <= offset_fsb);
507 ASSERT(wpc->imap.br_startoff + wpc->imap.br_blockcount > offset_fsb);
469 trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap); 508 trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
470 return 0; 509 return 0;
471} 510}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 15da53b5fb53..361dfe7af783 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -665,87 +665,6 @@ out_unlock:
665 return error; 665 return error;
666} 666}
667 667
668/*
669 * Pass in a delayed allocate extent, convert it to real extents;
670 * return to the caller the extent we create which maps on top of
671 * the originating callers request.
672 *
673 * Called without a lock on the inode.
674 *
675 * We no longer bother to look at the incoming map - all we have to
676 * guarantee is that whatever we allocate fills the required range.
677 */
678int
679xfs_iomap_write_allocate(
680 struct xfs_inode *ip,
681 int whichfork,
682 xfs_off_t offset,
683 struct xfs_bmbt_irec *imap,
684 unsigned int *seq)
685{
686 struct xfs_mount *mp = ip->i_mount;
687 xfs_fileoff_t offset_fsb;
688 xfs_fileoff_t map_start_fsb;
689 xfs_extlen_t map_count_fsb;
690 int error = 0;
691
692 /*
693 * Make sure that the dquots are there.
694 */
695 error = xfs_qm_dqattach(ip);
696 if (error)
697 return error;
698
699 /*
700 * Store the file range the caller is interested in because it encodes
701 * state such as potential overlap with COW fork blocks. We must trim
702 * the allocated extent down to this range to maintain consistency with
703 * what the caller expects. Revalidation of the range itself is the
704 * responsibility of the caller.
705 */
706 offset_fsb = XFS_B_TO_FSBT(mp, offset);
707 map_start_fsb = imap->br_startoff;
708 map_count_fsb = imap->br_blockcount;
709
710 while (true) {
711 /*
712 * Allocate in a loop because it may take several attempts to
713 * allocate real blocks for a contiguous delalloc extent if free
714 * space is sufficiently fragmented.
715 */
716
717 /*
718 * ilock was dropped since imap was populated which means it
719 * might no longer be valid. The current page is held locked so
720 * nothing could have removed the block backing offset_fsb.
721 * Attempt to allocate whatever delalloc extent currently backs
722 * offset_fsb and put the result in the imap pointer from the
723 * caller. We'll trim it down to the caller's most recently
724 * validated range before we return.
725 */
726 error = xfs_bmapi_convert_delalloc(ip, whichfork, offset_fsb,
727 imap, seq);
728 if (error)
729 return error;
730
731 /*
732 * See if we were able to allocate an extent that covers at
733 * least part of the callers request.
734 */
735 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
736 return xfs_alert_fsblock_zero(ip, imap);
737
738 if ((offset_fsb >= imap->br_startoff) &&
739 (offset_fsb < (imap->br_startoff +
740 imap->br_blockcount))) {
741 xfs_trim_extent(imap, map_start_fsb, map_count_fsb);
742 ASSERT(offset_fsb >= imap->br_startoff &&
743 offset_fsb < imap->br_startoff + imap->br_blockcount);
744 return 0;
745 }
746 }
747}
748
749int 668int
750xfs_iomap_write_unwritten( 669xfs_iomap_write_unwritten(
751 xfs_inode_t *ip, 670 xfs_inode_t *ip,
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index c6170548831b..6b16243db0b7 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -13,8 +13,6 @@ struct xfs_bmbt_irec;
13 13
14int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, 14int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
15 struct xfs_bmbt_irec *, int); 15 struct xfs_bmbt_irec *, int);
16int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t,
17 struct xfs_bmbt_irec *, unsigned int *);
18int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool); 16int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
19 17
20void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, 18void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,