aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2018-10-18 02:20:30 -0400
committerDave Chinner <david@fromorbit.com>2018-10-18 02:20:30 -0400
commit1aff5696f3e03099a4a3e9a0d965ef9b345265a6 (patch)
tree5362c0bf508f2ca94c3b6ca00f953ed8c16b98e8
parent1002ff45eff5cb70b0f2da28df488c789af2aeab (diff)
xfs: always assign buffer verifiers when one is provided
If a caller supplies buffer ops when trying to read a buffer and the buffer doesn't already have buf ops assigned, ensure that the ops are assigned to the buffer and the verifier is run on that buffer. Note that current XFS code is careful to assign buffer ops after a xfs_{trans_,}buf_read call in which ops were not supplied. However, we should apply ops defensively in case there is ever a coding mistake; and an upcoming repair patch will need to be able to read a buffer without assigning buf ops. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dave Chinner <david@fromorbit.com>
-rw-r--r--fs/xfs/xfs_buf.c64
-rw-r--r--fs/xfs/xfs_buf.h2
-rw-r--r--fs/xfs/xfs_trans_buf.c29
3 files changed, 78 insertions, 17 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index e839907e8492..06149bac2f58 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -749,6 +749,30 @@ _xfs_buf_read(
749 return xfs_buf_submit(bp); 749 return xfs_buf_submit(bp);
750} 750}
751 751
752/*
753 * If the caller passed in an ops structure and the buffer doesn't have ops
754 * assigned, set the ops and use them to verify the contents. If the contents
755 * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no
756 * recorded errors and is already in XBF_DONE state.
757 */
758int
759xfs_buf_ensure_ops(
760 struct xfs_buf *bp,
761 const struct xfs_buf_ops *ops)
762{
763 ASSERT(bp->b_flags & XBF_DONE);
764 ASSERT(bp->b_error == 0);
765
766 if (!ops || bp->b_ops)
767 return 0;
768
769 bp->b_ops = ops;
770 bp->b_ops->verify_read(bp);
771 if (bp->b_error)
772 bp->b_flags &= ~XBF_DONE;
773 return bp->b_error;
774}
775
752xfs_buf_t * 776xfs_buf_t *
753xfs_buf_read_map( 777xfs_buf_read_map(
754 struct xfs_buftarg *target, 778 struct xfs_buftarg *target,
@@ -762,26 +786,32 @@ xfs_buf_read_map(
762 flags |= XBF_READ; 786 flags |= XBF_READ;
763 787
764 bp = xfs_buf_get_map(target, map, nmaps, flags); 788 bp = xfs_buf_get_map(target, map, nmaps, flags);
765 if (bp) { 789 if (!bp)
766 trace_xfs_buf_read(bp, flags, _RET_IP_); 790 return NULL;
767 791
768 if (!(bp->b_flags & XBF_DONE)) { 792 trace_xfs_buf_read(bp, flags, _RET_IP_);
769 XFS_STATS_INC(target->bt_mount, xb_get_read); 793
770 bp->b_ops = ops; 794 if (!(bp->b_flags & XBF_DONE)) {
771 _xfs_buf_read(bp, flags); 795 XFS_STATS_INC(target->bt_mount, xb_get_read);
772 } else if (flags & XBF_ASYNC) { 796 bp->b_ops = ops;
773 /* 797 _xfs_buf_read(bp, flags);
774 * Read ahead call which is already satisfied, 798 return bp;
775 * drop the buffer 799 }
776 */ 800
777 xfs_buf_relse(bp); 801 xfs_buf_ensure_ops(bp, ops);
778 return NULL; 802
779 } else { 803 if (flags & XBF_ASYNC) {
780 /* We do not want read in the flags */ 804 /*
781 bp->b_flags &= ~XBF_READ; 805 * Read ahead call which is already satisfied,
782 } 806 * drop the buffer
807 */
808 xfs_buf_relse(bp);
809 return NULL;
783 } 810 }
784 811
812 /* We do not want read in the flags */
813 bp->b_flags &= ~XBF_READ;
814 ASSERT(bp->b_ops != NULL || ops == NULL);
785 return bp; 815 return bp;
786} 816}
787 817
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 4e3171acd0f8..b9f5511ea998 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -385,4 +385,6 @@ extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
385#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) 385#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
386#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) 386#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
387 387
388int xfs_buf_ensure_ops(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
389
388#endif /* __XFS_BUF_H__ */ 390#endif /* __XFS_BUF_H__ */
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 286a287ac57a..fc40160c1773 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -264,11 +264,39 @@ xfs_trans_read_buf_map(
264 return -EIO; 264 return -EIO;
265 } 265 }
266 266
267 /*
268 * Check if the caller is trying to read a buffer that is
269 * already attached to the transaction yet has no buffer ops
270 * assigned. Ops are usually attached when the buffer is
271 * attached to the transaction, or by the read caller if
272 * special circumstances. That didn't happen, which is not
273 * how this is supposed to go.
274 *
275 * If the buffer passes verification we'll let this go, but if
276 * not we have to shut down. Let the transaction cleanup code
277 * release this buffer when it kills the tranaction.
278 */
279 ASSERT(bp->b_ops != NULL);
280 error = xfs_buf_ensure_ops(bp, ops);
281 if (error) {
282 xfs_buf_ioerror_alert(bp, __func__);
283
284 if (tp->t_flags & XFS_TRANS_DIRTY)
285 xfs_force_shutdown(tp->t_mountp,
286 SHUTDOWN_META_IO_ERROR);
287
288 /* bad CRC means corrupted metadata */
289 if (error == -EFSBADCRC)
290 error = -EFSCORRUPTED;
291 return error;
292 }
293
267 bip = bp->b_log_item; 294 bip = bp->b_log_item;
268 bip->bli_recur++; 295 bip->bli_recur++;
269 296
270 ASSERT(atomic_read(&bip->bli_refcount) > 0); 297 ASSERT(atomic_read(&bip->bli_refcount) > 0);
271 trace_xfs_trans_read_buf_recur(bip); 298 trace_xfs_trans_read_buf_recur(bip);
299 ASSERT(bp->b_ops != NULL || ops == NULL);
272 *bpp = bp; 300 *bpp = bp;
273 return 0; 301 return 0;
274 } 302 }
@@ -316,6 +344,7 @@ xfs_trans_read_buf_map(
316 _xfs_trans_bjoin(tp, bp, 1); 344 _xfs_trans_bjoin(tp, bp, 1);
317 trace_xfs_trans_read_buf(bp->b_log_item); 345 trace_xfs_trans_read_buf(bp->b_log_item);
318 } 346 }
347 ASSERT(bp->b_ops != NULL || ops == NULL);
319 *bpp = bp; 348 *bpp = bp;
320 return 0; 349 return 0;
321 350