diff options
author | Dave Chinner <dchinner@redhat.com> | 2012-11-14 01:54:40 -0500 |
---|---|---|
committer | Ben Myers <bpm@sgi.com> | 2012-11-15 22:35:12 -0500 |
commit | 1813dd64057490e7a0678a885c4fe6d02f78bdc1 (patch) | |
tree | caf95e2be7881b771da65561b2f1664d73588401 /fs/xfs/xfs_buf.c | |
parent | b0f539de9fcc543a3ffa40bc22bf51aca6ea6183 (diff) |
xfs: convert buffer verifiers to an ops structure.
To separate the verifiers from iodone functions and associate read
and write verifiers at the same time, introduce a buffer verifier
operations structure to the xfs_buf.
This avoids the need for assigning the write verifier, clearing the
iodone function and re-running ioend processing in the read
verifier, and gets rid of the nasty "b_pre_io" name for the write
verifier function pointer. If we ever need to, it will also be
easier to add further content specific callbacks to a buffer with an
ops structure in place.
We also avoid needing to export verifier functions, instead we
can simply export the ops structures for those that are needed
outside the function they are defined in.
This patch also fixes a directory block readahead verifier issue
it exposed.
This patch also adds ops callbacks to the inode/alloc btree blocks
initialised by growfs. These will need more work before they will
work with CRCs.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Phil White <pwhite@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r-- | fs/xfs/xfs_buf.c | 63 |
1 files changed, 37 insertions, 26 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index bd1a948ee39c..26673a0b20e7 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -571,7 +571,7 @@ found: | |||
571 | ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); | 571 | ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); |
572 | ASSERT(bp->b_iodone == NULL); | 572 | ASSERT(bp->b_iodone == NULL); |
573 | bp->b_flags &= _XBF_KMEM | _XBF_PAGES; | 573 | bp->b_flags &= _XBF_KMEM | _XBF_PAGES; |
574 | bp->b_pre_io = NULL; | 574 | bp->b_ops = NULL; |
575 | } | 575 | } |
576 | 576 | ||
577 | trace_xfs_buf_find(bp, flags, _RET_IP_); | 577 | trace_xfs_buf_find(bp, flags, _RET_IP_); |
@@ -657,7 +657,7 @@ xfs_buf_read_map( | |||
657 | struct xfs_buf_map *map, | 657 | struct xfs_buf_map *map, |
658 | int nmaps, | 658 | int nmaps, |
659 | xfs_buf_flags_t flags, | 659 | xfs_buf_flags_t flags, |
660 | xfs_buf_iodone_t verify) | 660 | const struct xfs_buf_ops *ops) |
661 | { | 661 | { |
662 | struct xfs_buf *bp; | 662 | struct xfs_buf *bp; |
663 | 663 | ||
@@ -669,7 +669,7 @@ xfs_buf_read_map( | |||
669 | 669 | ||
670 | if (!XFS_BUF_ISDONE(bp)) { | 670 | if (!XFS_BUF_ISDONE(bp)) { |
671 | XFS_STATS_INC(xb_get_read); | 671 | XFS_STATS_INC(xb_get_read); |
672 | bp->b_iodone = verify; | 672 | bp->b_ops = ops; |
673 | _xfs_buf_read(bp, flags); | 673 | _xfs_buf_read(bp, flags); |
674 | } else if (flags & XBF_ASYNC) { | 674 | } else if (flags & XBF_ASYNC) { |
675 | /* | 675 | /* |
@@ -696,13 +696,13 @@ xfs_buf_readahead_map( | |||
696 | struct xfs_buftarg *target, | 696 | struct xfs_buftarg *target, |
697 | struct xfs_buf_map *map, | 697 | struct xfs_buf_map *map, |
698 | int nmaps, | 698 | int nmaps, |
699 | xfs_buf_iodone_t verify) | 699 | const struct xfs_buf_ops *ops) |
700 | { | 700 | { |
701 | if (bdi_read_congested(target->bt_bdi)) | 701 | if (bdi_read_congested(target->bt_bdi)) |
702 | return; | 702 | return; |
703 | 703 | ||
704 | xfs_buf_read_map(target, map, nmaps, | 704 | xfs_buf_read_map(target, map, nmaps, |
705 | XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, verify); | 705 | XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops); |
706 | } | 706 | } |
707 | 707 | ||
708 | /* | 708 | /* |
@@ -715,7 +715,7 @@ xfs_buf_read_uncached( | |||
715 | xfs_daddr_t daddr, | 715 | xfs_daddr_t daddr, |
716 | size_t numblks, | 716 | size_t numblks, |
717 | int flags, | 717 | int flags, |
718 | xfs_buf_iodone_t verify) | 718 | const struct xfs_buf_ops *ops) |
719 | { | 719 | { |
720 | struct xfs_buf *bp; | 720 | struct xfs_buf *bp; |
721 | 721 | ||
@@ -728,7 +728,7 @@ xfs_buf_read_uncached( | |||
728 | bp->b_bn = daddr; | 728 | bp->b_bn = daddr; |
729 | bp->b_maps[0].bm_bn = daddr; | 729 | bp->b_maps[0].bm_bn = daddr; |
730 | bp->b_flags |= XBF_READ; | 730 | bp->b_flags |= XBF_READ; |
731 | bp->b_iodone = verify; | 731 | bp->b_ops = ops; |
732 | 732 | ||
733 | xfsbdstrat(target->bt_mount, bp); | 733 | xfsbdstrat(target->bt_mount, bp); |
734 | xfs_buf_iowait(bp); | 734 | xfs_buf_iowait(bp); |
@@ -1001,27 +1001,37 @@ STATIC void | |||
1001 | xfs_buf_iodone_work( | 1001 | xfs_buf_iodone_work( |
1002 | struct work_struct *work) | 1002 | struct work_struct *work) |
1003 | { | 1003 | { |
1004 | xfs_buf_t *bp = | 1004 | struct xfs_buf *bp = |
1005 | container_of(work, xfs_buf_t, b_iodone_work); | 1005 | container_of(work, xfs_buf_t, b_iodone_work); |
1006 | bool read = !!(bp->b_flags & XBF_READ); | ||
1007 | |||
1008 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); | ||
1009 | if (read && bp->b_ops) | ||
1010 | bp->b_ops->verify_read(bp); | ||
1006 | 1011 | ||
1007 | if (bp->b_iodone) | 1012 | if (bp->b_iodone) |
1008 | (*(bp->b_iodone))(bp); | 1013 | (*(bp->b_iodone))(bp); |
1009 | else if (bp->b_flags & XBF_ASYNC) | 1014 | else if (bp->b_flags & XBF_ASYNC) |
1010 | xfs_buf_relse(bp); | 1015 | xfs_buf_relse(bp); |
1016 | else { | ||
1017 | ASSERT(read && bp->b_ops); | ||
1018 | complete(&bp->b_iowait); | ||
1019 | } | ||
1011 | } | 1020 | } |
1012 | 1021 | ||
1013 | void | 1022 | void |
1014 | xfs_buf_ioend( | 1023 | xfs_buf_ioend( |
1015 | xfs_buf_t *bp, | 1024 | struct xfs_buf *bp, |
1016 | int schedule) | 1025 | int schedule) |
1017 | { | 1026 | { |
1027 | bool read = !!(bp->b_flags & XBF_READ); | ||
1028 | |||
1018 | trace_xfs_buf_iodone(bp, _RET_IP_); | 1029 | trace_xfs_buf_iodone(bp, _RET_IP_); |
1019 | 1030 | ||
1020 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); | ||
1021 | if (bp->b_error == 0) | 1031 | if (bp->b_error == 0) |
1022 | bp->b_flags |= XBF_DONE; | 1032 | bp->b_flags |= XBF_DONE; |
1023 | 1033 | ||
1024 | if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { | 1034 | if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) { |
1025 | if (schedule) { | 1035 | if (schedule) { |
1026 | INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); | 1036 | INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); |
1027 | queue_work(xfslogd_workqueue, &bp->b_iodone_work); | 1037 | queue_work(xfslogd_workqueue, &bp->b_iodone_work); |
@@ -1029,6 +1039,7 @@ xfs_buf_ioend( | |||
1029 | xfs_buf_iodone_work(&bp->b_iodone_work); | 1039 | xfs_buf_iodone_work(&bp->b_iodone_work); |
1030 | } | 1040 | } |
1031 | } else { | 1041 | } else { |
1042 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); | ||
1032 | complete(&bp->b_iowait); | 1043 | complete(&bp->b_iowait); |
1033 | } | 1044 | } |
1034 | } | 1045 | } |
@@ -1316,6 +1327,20 @@ _xfs_buf_ioapply( | |||
1316 | rw |= REQ_FUA; | 1327 | rw |= REQ_FUA; |
1317 | if (bp->b_flags & XBF_FLUSH) | 1328 | if (bp->b_flags & XBF_FLUSH) |
1318 | rw |= REQ_FLUSH; | 1329 | rw |= REQ_FLUSH; |
1330 | |||
1331 | /* | ||
1332 | * Run the write verifier callback function if it exists. If | ||
1333 | * this function fails it will mark the buffer with an error and | ||
1334 | * the IO should not be dispatched. | ||
1335 | */ | ||
1336 | if (bp->b_ops) { | ||
1337 | bp->b_ops->verify_write(bp); | ||
1338 | if (bp->b_error) { | ||
1339 | xfs_force_shutdown(bp->b_target->bt_mount, | ||
1340 | SHUTDOWN_CORRUPT_INCORE); | ||
1341 | return; | ||
1342 | } | ||
1343 | } | ||
1319 | } else if (bp->b_flags & XBF_READ_AHEAD) { | 1344 | } else if (bp->b_flags & XBF_READ_AHEAD) { |
1320 | rw = READA; | 1345 | rw = READA; |
1321 | } else { | 1346 | } else { |
@@ -1326,20 +1351,6 @@ _xfs_buf_ioapply( | |||
1326 | rw |= REQ_META; | 1351 | rw |= REQ_META; |
1327 | 1352 | ||
1328 | /* | 1353 | /* |
1329 | * run the pre-io callback function if it exists. If this function | ||
1330 | * fails it will mark the buffer with an error and the IO should | ||
1331 | * not be dispatched. | ||
1332 | */ | ||
1333 | if (bp->b_pre_io) { | ||
1334 | bp->b_pre_io(bp); | ||
1335 | if (bp->b_error) { | ||
1336 | xfs_force_shutdown(bp->b_target->bt_mount, | ||
1337 | SHUTDOWN_CORRUPT_INCORE); | ||
1338 | return; | ||
1339 | } | ||
1340 | } | ||
1341 | |||
1342 | /* | ||
1343 | * Walk all the vectors issuing IO on them. Set up the initial offset | 1354 | * Walk all the vectors issuing IO on them. Set up the initial offset |
1344 | * into the buffer and the desired IO size before we start - | 1355 | * into the buffer and the desired IO size before we start - |
1345 | * _xfs_buf_ioapply_vec() will modify them appropriately for each | 1356 | * _xfs_buf_ioapply_vec() will modify them appropriately for each |