aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_buf.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2014-10-01 19:04:22 -0400
committerDave Chinner <david@fromorbit.com>2014-10-01 19:04:22 -0400
commite8aaba9a783c8e5d2c58ebe69650ea31b91bb745 (patch)
tree92d2b9057e9a4122634c356e957bb7c89f951003 /fs/xfs/xfs_buf.c
parente11bb8052c3f500e66142f33579cc054d691a8fb (diff)
xfs: xfs_buf_ioend and xfs_buf_iodone_work duplicate functionality
We do some work in xfs_buf_ioend, and some work in xfs_buf_iodone_work, but much of that functionality is the same. This work can all be done in a single function, leaving xfs_buf_iodone just a wrapper to determine if we should execute it by workqueue or directly. hence rename xfs_buf_iodone_work to xfs_buf_ioend(), and add a new xfs_buf_ioend_async() for places that need async processing. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r--fs/xfs/xfs_buf.c88
1 files changed, 39 insertions, 49 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 48b1e2989ea4..a046149e6099 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -998,26 +998,30 @@ xfs_buf_wait_unpin(
998 * Buffer Utility Routines 998 * Buffer Utility Routines
999 */ 999 */
1000 1000
1001STATIC void 1001void
1002xfs_buf_iodone_work( 1002xfs_buf_ioend(
1003 struct work_struct *work) 1003 struct xfs_buf *bp)
1004{ 1004{
1005 struct xfs_buf *bp = 1005 bool read = bp->b_flags & XBF_READ;
1006 container_of(work, xfs_buf_t, b_iodone_work); 1006
1007 bool read = !!(bp->b_flags & XBF_READ); 1007 trace_xfs_buf_iodone(bp, _RET_IP_);
1008 1008
1009 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1009 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1010 1010
1011 /* only validate buffers that were read without errors */ 1011 /* Only validate buffers that were read without errors */
1012 if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE)) 1012 if (read && !bp->b_error && bp->b_ops) {
1013 ASSERT(!bp->b_iodone);
1013 bp->b_ops->verify_read(bp); 1014 bp->b_ops->verify_read(bp);
1015 }
1016
1017 if (!bp->b_error)
1018 bp->b_flags |= XBF_DONE;
1014 1019
1015 if (bp->b_iodone) 1020 if (bp->b_iodone)
1016 (*(bp->b_iodone))(bp); 1021 (*(bp->b_iodone))(bp);
1017 else if (bp->b_flags & XBF_ASYNC) 1022 else if (bp->b_flags & XBF_ASYNC)
1018 xfs_buf_relse(bp); 1023 xfs_buf_relse(bp);
1019 else { 1024 else {
1020 ASSERT(read && bp->b_ops);
1021 complete(&bp->b_iowait); 1025 complete(&bp->b_iowait);
1022 1026
1023 /* release the !XBF_ASYNC ref now we are done. */ 1027 /* release the !XBF_ASYNC ref now we are done. */
@@ -1025,30 +1029,22 @@ xfs_buf_iodone_work(
1025 } 1029 }
1026} 1030}
1027 1031
1028void 1032static void
1029xfs_buf_ioend( 1033xfs_buf_ioend_work(
1030 struct xfs_buf *bp, 1034 struct work_struct *work)
1031 int schedule)
1032{ 1035{
1033 bool read = !!(bp->b_flags & XBF_READ); 1036 struct xfs_buf *bp =
1034 1037 container_of(work, xfs_buf_t, b_iodone_work);
1035 trace_xfs_buf_iodone(bp, _RET_IP_);
1036 1038
1037 if (bp->b_error == 0) 1039 xfs_buf_ioend(bp);
1038 bp->b_flags |= XBF_DONE; 1040}
1039 1041
1040 if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) { 1042void
1041 if (schedule) { 1043xfs_buf_ioend_async(
1042 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); 1044 struct xfs_buf *bp)
1043 queue_work(xfslogd_workqueue, &bp->b_iodone_work); 1045{
1044 } else { 1046 INIT_WORK(&bp->b_iodone_work, xfs_buf_ioend_work);
1045 xfs_buf_iodone_work(&bp->b_iodone_work); 1047 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1046 }
1047 } else {
1048 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1049 complete(&bp->b_iowait);
1050 xfs_buf_rele(bp);
1051 }
1052} 1048}
1053 1049
1054void 1050void
@@ -1099,7 +1095,7 @@ xfs_bioerror(
1099 XFS_BUF_UNDONE(bp); 1095 XFS_BUF_UNDONE(bp);
1100 xfs_buf_stale(bp); 1096 xfs_buf_stale(bp);
1101 1097
1102 xfs_buf_ioend(bp, 0); 1098 xfs_buf_ioend(bp);
1103 1099
1104 return -EIO; 1100 return -EIO;
1105} 1101}
@@ -1186,15 +1182,6 @@ xfs_bwrite(
1186} 1182}
1187 1183
1188STATIC void 1184STATIC void
1189_xfs_buf_ioend(
1190 xfs_buf_t *bp,
1191 int schedule)
1192{
1193 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1194 xfs_buf_ioend(bp, schedule);
1195}
1196
1197STATIC void
1198xfs_buf_bio_end_io( 1185xfs_buf_bio_end_io(
1199 struct bio *bio, 1186 struct bio *bio,
1200 int error) 1187 int error)
@@ -1211,7 +1198,8 @@ xfs_buf_bio_end_io(
1211 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1198 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1212 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); 1199 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1213 1200
1214 _xfs_buf_ioend(bp, 1); 1201 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1202 xfs_buf_ioend_async(bp);
1215 bio_put(bio); 1203 bio_put(bio);
1216} 1204}
1217 1205
@@ -1423,15 +1411,17 @@ xfs_buf_iorequest(
1423 /* 1411 /*
1424 * If _xfs_buf_ioapply failed or we are doing synchronous IO that 1412 * If _xfs_buf_ioapply failed or we are doing synchronous IO that
1425 * completes extremely quickly, we can get back here with only the IO 1413 * completes extremely quickly, we can get back here with only the IO
1426 * reference we took above. _xfs_buf_ioend will drop it to zero. Run 1414 * reference we took above. If we drop it to zero, run completion
1427 * completion processing synchronously so that we don't return to the 1415 * processing synchronously so that we don't return to the caller with
1428 * caller with completion still pending. This avoids unnecessary context 1416 * completion still pending. This avoids unnecessary context switches
1429 * switches associated with the end_io workqueue. 1417 * associated with the end_io workqueue.
1430 */ 1418 */
1431 if (bp->b_error || !(bp->b_flags & XBF_ASYNC)) 1419 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1432 _xfs_buf_ioend(bp, 0); 1420 if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
1433 else 1421 xfs_buf_ioend(bp);
1434 _xfs_buf_ioend(bp, 1); 1422 else
1423 xfs_buf_ioend_async(bp);
1424 }
1435 1425
1436 xfs_buf_rele(bp); 1426 xfs_buf_rele(bp);
1437} 1427}