aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_trans_buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_trans_buf.c')
-rw-r--r--fs/xfs/xfs_trans_buf.c302
1 files changed, 103 insertions, 199 deletions
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 218829e6a152..fb586360d1c9 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -38,6 +38,7 @@
38#include "xfs_trans_priv.h" 38#include "xfs_trans_priv.h"
39#include "xfs_error.h" 39#include "xfs_error.h"
40#include "xfs_rw.h" 40#include "xfs_rw.h"
41#include "xfs_trace.h"
41 42
42 43
43STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *, 44STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *,
@@ -45,6 +46,65 @@ STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *,
45STATIC xfs_buf_t *xfs_trans_buf_item_match_all(xfs_trans_t *, xfs_buftarg_t *, 46STATIC xfs_buf_t *xfs_trans_buf_item_match_all(xfs_trans_t *, xfs_buftarg_t *,
46 xfs_daddr_t, int); 47 xfs_daddr_t, int);
47 48
49/*
50 * Add the locked buffer to the transaction.
51 *
52 * The buffer must be locked, and it cannot be associated with any
53 * transaction.
54 *
55 * If the buffer does not yet have a buf log item associated with it,
56 * then allocate one for it. Then add the buf item to the transaction.
57 */
58STATIC void
59_xfs_trans_bjoin(
60 struct xfs_trans *tp,
61 struct xfs_buf *bp,
62 int reset_recur)
63{
64 struct xfs_buf_log_item *bip;
65
66 ASSERT(XFS_BUF_ISBUSY(bp));
67 ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
68
69 /*
70 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
71 * it doesn't have one yet, then allocate one and initialize it.
72 * The checks to see if one is there are in xfs_buf_item_init().
73 */
74 xfs_buf_item_init(bp, tp->t_mountp);
75 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
76 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
77 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
78 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
79 if (reset_recur)
80 bip->bli_recur = 0;
81
82 /*
83 * Take a reference for this transaction on the buf item.
84 */
85 atomic_inc(&bip->bli_refcount);
86
87 /*
88 * Get a log_item_desc to point at the new item.
89 */
90 (void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip);
91
92 /*
93 * Initialize b_fsprivate2 so we can find it with incore_match()
94 * in xfs_trans_get_buf() and friends above.
95 */
96 XFS_BUF_SET_FSPRIVATE2(bp, tp);
97
98}
99
100void
101xfs_trans_bjoin(
102 struct xfs_trans *tp,
103 struct xfs_buf *bp)
104{
105 _xfs_trans_bjoin(tp, bp, 0);
106 trace_xfs_trans_bjoin(bp->b_fspriv);
107}
48 108
49/* 109/*
50 * Get and lock the buffer for the caller if it is not already 110 * Get and lock the buffer for the caller if it is not already
@@ -74,16 +134,14 @@ xfs_trans_get_buf(xfs_trans_t *tp,
74 xfs_buf_log_item_t *bip; 134 xfs_buf_log_item_t *bip;
75 135
76 if (flags == 0) 136 if (flags == 0)
77 flags = XFS_BUF_LOCK | XFS_BUF_MAPPED; 137 flags = XBF_LOCK | XBF_MAPPED;
78 138
79 /* 139 /*
80 * Default to a normal get_buf() call if the tp is NULL. 140 * Default to a normal get_buf() call if the tp is NULL.
81 */ 141 */
82 if (tp == NULL) { 142 if (tp == NULL)
83 bp = xfs_buf_get_flags(target_dev, blkno, len, 143 return xfs_buf_get(target_dev, blkno, len,
84 flags | BUF_BUSY); 144 flags | XBF_DONT_BLOCK);
85 return(bp);
86 }
87 145
88 /* 146 /*
89 * If we find the buffer in the cache with this transaction 147 * If we find the buffer in the cache with this transaction
@@ -98,79 +156,43 @@ xfs_trans_get_buf(xfs_trans_t *tp,
98 } 156 }
99 if (bp != NULL) { 157 if (bp != NULL) {
100 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 158 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
101 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) { 159 if (XFS_FORCED_SHUTDOWN(tp->t_mountp))
102 xfs_buftrace("TRANS GET RECUR SHUT", bp);
103 XFS_BUF_SUPER_STALE(bp); 160 XFS_BUF_SUPER_STALE(bp);
104 } 161
105 /* 162 /*
106 * If the buffer is stale then it was binval'ed 163 * If the buffer is stale then it was binval'ed
107 * since last read. This doesn't matter since the 164 * since last read. This doesn't matter since the
108 * caller isn't allowed to use the data anyway. 165 * caller isn't allowed to use the data anyway.
109 */ 166 */
110 else if (XFS_BUF_ISSTALE(bp)) { 167 else if (XFS_BUF_ISSTALE(bp))
111 xfs_buftrace("TRANS GET RECUR STALE", bp);
112 ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); 168 ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
113 } 169
114 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); 170 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
115 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); 171 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
116 ASSERT(bip != NULL); 172 ASSERT(bip != NULL);
117 ASSERT(atomic_read(&bip->bli_refcount) > 0); 173 ASSERT(atomic_read(&bip->bli_refcount) > 0);
118 bip->bli_recur++; 174 bip->bli_recur++;
119 xfs_buftrace("TRANS GET RECUR", bp); 175 trace_xfs_trans_get_buf_recur(bip);
120 xfs_buf_item_trace("GET RECUR", bip);
121 return (bp); 176 return (bp);
122 } 177 }
123 178
124 /* 179 /*
125 * We always specify the BUF_BUSY flag within a transaction so 180 * We always specify the XBF_DONT_BLOCK flag within a transaction
126 * that get_buf does not try to push out a delayed write buffer 181 * so that get_buf does not try to push out a delayed write buffer
127 * which might cause another transaction to take place (if the 182 * which might cause another transaction to take place (if the
128 * buffer was delayed alloc). Such recursive transactions can 183 * buffer was delayed alloc). Such recursive transactions can
129 * easily deadlock with our current transaction as well as cause 184 * easily deadlock with our current transaction as well as cause
130 * us to run out of stack space. 185 * us to run out of stack space.
131 */ 186 */
132 bp = xfs_buf_get_flags(target_dev, blkno, len, flags | BUF_BUSY); 187 bp = xfs_buf_get(target_dev, blkno, len, flags | XBF_DONT_BLOCK);
133 if (bp == NULL) { 188 if (bp == NULL) {
134 return NULL; 189 return NULL;
135 } 190 }
136 191
137 ASSERT(!XFS_BUF_GETERROR(bp)); 192 ASSERT(!XFS_BUF_GETERROR(bp));
138 193
139 /* 194 _xfs_trans_bjoin(tp, bp, 1);
140 * The xfs_buf_log_item pointer is stored in b_fsprivate. If 195 trace_xfs_trans_get_buf(bp->b_fspriv);
141 * it doesn't have one yet, then allocate one and initialize it.
142 * The checks to see if one is there are in xfs_buf_item_init().
143 */
144 xfs_buf_item_init(bp, tp->t_mountp);
145
146 /*
147 * Set the recursion count for the buffer within this transaction
148 * to 0.
149 */
150 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
151 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
152 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
153 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
154 bip->bli_recur = 0;
155
156 /*
157 * Take a reference for this transaction on the buf item.
158 */
159 atomic_inc(&bip->bli_refcount);
160
161 /*
162 * Get a log_item_desc to point at the new item.
163 */
164 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
165
166 /*
167 * Initialize b_fsprivate2 so we can find it with incore_match()
168 * above.
169 */
170 XFS_BUF_SET_FSPRIVATE2(bp, tp);
171
172 xfs_buftrace("TRANS GET", bp);
173 xfs_buf_item_trace("GET", bip);
174 return (bp); 196 return (bp);
175} 197}
176 198
@@ -210,49 +232,16 @@ xfs_trans_getsb(xfs_trans_t *tp,
210 ASSERT(bip != NULL); 232 ASSERT(bip != NULL);
211 ASSERT(atomic_read(&bip->bli_refcount) > 0); 233 ASSERT(atomic_read(&bip->bli_refcount) > 0);
212 bip->bli_recur++; 234 bip->bli_recur++;
213 xfs_buf_item_trace("GETSB RECUR", bip); 235 trace_xfs_trans_getsb_recur(bip);
214 return (bp); 236 return (bp);
215 } 237 }
216 238
217 bp = xfs_getsb(mp, flags); 239 bp = xfs_getsb(mp, flags);
218 if (bp == NULL) { 240 if (bp == NULL)
219 return NULL; 241 return NULL;
220 }
221
222 /*
223 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
224 * it doesn't have one yet, then allocate one and initialize it.
225 * The checks to see if one is there are in xfs_buf_item_init().
226 */
227 xfs_buf_item_init(bp, mp);
228
229 /*
230 * Set the recursion count for the buffer within this transaction
231 * to 0.
232 */
233 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
234 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
235 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
236 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
237 bip->bli_recur = 0;
238 242
239 /* 243 _xfs_trans_bjoin(tp, bp, 1);
240 * Take a reference for this transaction on the buf item. 244 trace_xfs_trans_getsb(bp->b_fspriv);
241 */
242 atomic_inc(&bip->bli_refcount);
243
244 /*
245 * Get a log_item_desc to point at the new item.
246 */
247 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
248
249 /*
250 * Initialize b_fsprivate2 so we can find it with incore_match()
251 * above.
252 */
253 XFS_BUF_SET_FSPRIVATE2(bp, tp);
254
255 xfs_buf_item_trace("GETSB", bip);
256 return (bp); 245 return (bp);
257} 246}
258 247
@@ -296,15 +285,15 @@ xfs_trans_read_buf(
296 int error; 285 int error;
297 286
298 if (flags == 0) 287 if (flags == 0)
299 flags = XFS_BUF_LOCK | XFS_BUF_MAPPED; 288 flags = XBF_LOCK | XBF_MAPPED;
300 289
301 /* 290 /*
302 * Default to a normal get_buf() call if the tp is NULL. 291 * Default to a normal get_buf() call if the tp is NULL.
303 */ 292 */
304 if (tp == NULL) { 293 if (tp == NULL) {
305 bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); 294 bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
306 if (!bp) 295 if (!bp)
307 return (flags & XFS_BUF_TRYLOCK) ? 296 return (flags & XBF_TRYLOCK) ?
308 EAGAIN : XFS_ERROR(ENOMEM); 297 EAGAIN : XFS_ERROR(ENOMEM);
309 298
310 if (XFS_BUF_GETERROR(bp) != 0) { 299 if (XFS_BUF_GETERROR(bp) != 0) {
@@ -350,7 +339,7 @@ xfs_trans_read_buf(
350 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); 339 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
351 ASSERT((XFS_BUF_ISERROR(bp)) == 0); 340 ASSERT((XFS_BUF_ISERROR(bp)) == 0);
352 if (!(XFS_BUF_ISDONE(bp))) { 341 if (!(XFS_BUF_ISDONE(bp))) {
353 xfs_buftrace("READ_BUF_INCORE !DONE", bp); 342 trace_xfs_trans_read_buf_io(bp, _RET_IP_);
354 ASSERT(!XFS_BUF_ISASYNC(bp)); 343 ASSERT(!XFS_BUF_ISASYNC(bp));
355 XFS_BUF_READ(bp); 344 XFS_BUF_READ(bp);
356 xfsbdstrat(tp->t_mountp, bp); 345 xfsbdstrat(tp->t_mountp, bp);
@@ -375,7 +364,7 @@ xfs_trans_read_buf(
375 * brelse it either. Just get out. 364 * brelse it either. Just get out.
376 */ 365 */
377 if (XFS_FORCED_SHUTDOWN(mp)) { 366 if (XFS_FORCED_SHUTDOWN(mp)) {
378 xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp); 367 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
379 *bpp = NULL; 368 *bpp = NULL;
380 return XFS_ERROR(EIO); 369 return XFS_ERROR(EIO);
381 } 370 }
@@ -385,27 +374,26 @@ xfs_trans_read_buf(
385 bip->bli_recur++; 374 bip->bli_recur++;
386 375
387 ASSERT(atomic_read(&bip->bli_refcount) > 0); 376 ASSERT(atomic_read(&bip->bli_refcount) > 0);
388 xfs_buf_item_trace("READ RECUR", bip); 377 trace_xfs_trans_read_buf_recur(bip);
389 *bpp = bp; 378 *bpp = bp;
390 return 0; 379 return 0;
391 } 380 }
392 381
393 /* 382 /*
394 * We always specify the BUF_BUSY flag within a transaction so 383 * We always specify the XBF_DONT_BLOCK flag within a transaction
395 * that get_buf does not try to push out a delayed write buffer 384 * so that get_buf does not try to push out a delayed write buffer
396 * which might cause another transaction to take place (if the 385 * which might cause another transaction to take place (if the
397 * buffer was delayed alloc). Such recursive transactions can 386 * buffer was delayed alloc). Such recursive transactions can
398 * easily deadlock with our current transaction as well as cause 387 * easily deadlock with our current transaction as well as cause
399 * us to run out of stack space. 388 * us to run out of stack space.
400 */ 389 */
401 bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); 390 bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
402 if (bp == NULL) { 391 if (bp == NULL) {
403 *bpp = NULL; 392 *bpp = NULL;
404 return 0; 393 return 0;
405 } 394 }
406 if (XFS_BUF_GETERROR(bp) != 0) { 395 if (XFS_BUF_GETERROR(bp) != 0) {
407 XFS_BUF_SUPER_STALE(bp); 396 XFS_BUF_SUPER_STALE(bp);
408 xfs_buftrace("READ ERROR", bp);
409 error = XFS_BUF_GETERROR(bp); 397 error = XFS_BUF_GETERROR(bp);
410 398
411 xfs_ioerror_alert("xfs_trans_read_buf", mp, 399 xfs_ioerror_alert("xfs_trans_read_buf", mp,
@@ -431,41 +419,9 @@ xfs_trans_read_buf(
431 if (XFS_FORCED_SHUTDOWN(mp)) 419 if (XFS_FORCED_SHUTDOWN(mp))
432 goto shutdown_abort; 420 goto shutdown_abort;
433 421
434 /* 422 _xfs_trans_bjoin(tp, bp, 1);
435 * The xfs_buf_log_item pointer is stored in b_fsprivate. If 423 trace_xfs_trans_read_buf(bp->b_fspriv);
436 * it doesn't have one yet, then allocate one and initialize it.
437 * The checks to see if one is there are in xfs_buf_item_init().
438 */
439 xfs_buf_item_init(bp, tp->t_mountp);
440
441 /*
442 * Set the recursion count for the buffer within this transaction
443 * to 0.
444 */
445 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
446 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
447 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
448 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
449 bip->bli_recur = 0;
450
451 /*
452 * Take a reference for this transaction on the buf item.
453 */
454 atomic_inc(&bip->bli_refcount);
455
456 /*
457 * Get a log_item_desc to point at the new item.
458 */
459 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip);
460 424
461 /*
462 * Initialize b_fsprivate2 so we can find it with incore_match()
463 * above.
464 */
465 XFS_BUF_SET_FSPRIVATE2(bp, tp);
466
467 xfs_buftrace("TRANS READ", bp);
468 xfs_buf_item_trace("READ", bip);
469 *bpp = bp; 425 *bpp = bp;
470 return 0; 426 return 0;
471 427
@@ -480,10 +436,10 @@ shutdown_abort:
480 if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) 436 if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
481 cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp); 437 cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);
482#endif 438#endif
483 ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) != 439 ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) !=
484 (XFS_B_STALE|XFS_B_DELWRI)); 440 (XBF_STALE|XBF_DELWRI));
485 441
486 xfs_buftrace("READ_BUF XFSSHUTDN", bp); 442 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
487 xfs_buf_relse(bp); 443 xfs_buf_relse(bp);
488 *bpp = NULL; 444 *bpp = NULL;
489 return XFS_ERROR(EIO); 445 return XFS_ERROR(EIO);
@@ -549,13 +505,14 @@ xfs_trans_brelse(xfs_trans_t *tp,
549 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); 505 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
550 ASSERT(lidp != NULL); 506 ASSERT(lidp != NULL);
551 507
508 trace_xfs_trans_brelse(bip);
509
552 /* 510 /*
553 * If the release is just for a recursive lock, 511 * If the release is just for a recursive lock,
554 * then decrement the count and return. 512 * then decrement the count and return.
555 */ 513 */
556 if (bip->bli_recur > 0) { 514 if (bip->bli_recur > 0) {
557 bip->bli_recur--; 515 bip->bli_recur--;
558 xfs_buf_item_trace("RELSE RECUR", bip);
559 return; 516 return;
560 } 517 }
561 518
@@ -563,10 +520,8 @@ xfs_trans_brelse(xfs_trans_t *tp,
563 * If the buffer is dirty within this transaction, we can't 520 * If the buffer is dirty within this transaction, we can't
564 * release it until we commit. 521 * release it until we commit.
565 */ 522 */
566 if (lidp->lid_flags & XFS_LID_DIRTY) { 523 if (lidp->lid_flags & XFS_LID_DIRTY)
567 xfs_buf_item_trace("RELSE DIRTY", bip);
568 return; 524 return;
569 }
570 525
571 /* 526 /*
572 * If the buffer has been invalidated, then we can't release 527 * If the buffer has been invalidated, then we can't release
@@ -574,13 +529,10 @@ xfs_trans_brelse(xfs_trans_t *tp,
574 * as part of this transaction. This prevents us from pulling 529 * as part of this transaction. This prevents us from pulling
575 * the item from the AIL before we should. 530 * the item from the AIL before we should.
576 */ 531 */
577 if (bip->bli_flags & XFS_BLI_STALE) { 532 if (bip->bli_flags & XFS_BLI_STALE)
578 xfs_buf_item_trace("RELSE STALE", bip);
579 return; 533 return;
580 }
581 534
582 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); 535 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
583 xfs_buf_item_trace("RELSE", bip);
584 536
585 /* 537 /*
586 * Free up the log item descriptor tracking the released item. 538 * Free up the log item descriptor tracking the released item.
@@ -634,53 +586,6 @@ xfs_trans_brelse(xfs_trans_t *tp,
634} 586}
635 587
636/* 588/*
637 * Add the locked buffer to the transaction.
638 * The buffer must be locked, and it cannot be associated with any
639 * transaction.
640 *
641 * If the buffer does not yet have a buf log item associated with it,
642 * then allocate one for it. Then add the buf item to the transaction.
643 */
644void
645xfs_trans_bjoin(xfs_trans_t *tp,
646 xfs_buf_t *bp)
647{
648 xfs_buf_log_item_t *bip;
649
650 ASSERT(XFS_BUF_ISBUSY(bp));
651 ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
652
653 /*
654 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
655 * it doesn't have one yet, then allocate one and initialize it.
656 * The checks to see if one is there are in xfs_buf_item_init().
657 */
658 xfs_buf_item_init(bp, tp->t_mountp);
659 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
660 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
661 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
662 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
663
664 /*
665 * Take a reference for this transaction on the buf item.
666 */
667 atomic_inc(&bip->bli_refcount);
668
669 /*
670 * Get a log_item_desc to point at the new item.
671 */
672 (void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip);
673
674 /*
675 * Initialize b_fsprivate2 so we can find it with incore_match()
676 * in xfs_trans_get_buf() and friends above.
677 */
678 XFS_BUF_SET_FSPRIVATE2(bp, tp);
679
680 xfs_buf_item_trace("BJOIN", bip);
681}
682
683/*
684 * Mark the buffer as not needing to be unlocked when the buf item's 589 * Mark the buffer as not needing to be unlocked when the buf item's
685 * IOP_UNLOCK() routine is called. The buffer must already be locked 590 * IOP_UNLOCK() routine is called. The buffer must already be locked
686 * and associated with the given transaction. 591 * and associated with the given transaction.
@@ -701,7 +606,7 @@ xfs_trans_bhold(xfs_trans_t *tp,
701 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); 606 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
702 ASSERT(atomic_read(&bip->bli_refcount) > 0); 607 ASSERT(atomic_read(&bip->bli_refcount) > 0);
703 bip->bli_flags |= XFS_BLI_HOLD; 608 bip->bli_flags |= XFS_BLI_HOLD;
704 xfs_buf_item_trace("BHOLD", bip); 609 trace_xfs_trans_bhold(bip);
705} 610}
706 611
707/* 612/*
@@ -724,7 +629,8 @@ xfs_trans_bhold_release(xfs_trans_t *tp,
724 ASSERT(atomic_read(&bip->bli_refcount) > 0); 629 ASSERT(atomic_read(&bip->bli_refcount) > 0);
725 ASSERT(bip->bli_flags & XFS_BLI_HOLD); 630 ASSERT(bip->bli_flags & XFS_BLI_HOLD);
726 bip->bli_flags &= ~XFS_BLI_HOLD; 631 bip->bli_flags &= ~XFS_BLI_HOLD;
727 xfs_buf_item_trace("BHOLD RELEASE", bip); 632
633 trace_xfs_trans_bhold_release(bip);
728} 634}
729 635
730/* 636/*
@@ -770,6 +676,8 @@ xfs_trans_log_buf(xfs_trans_t *tp,
770 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); 676 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
771 bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone; 677 bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone;
772 678
679 trace_xfs_trans_log_buf(bip);
680
773 /* 681 /*
774 * If we invalidated the buffer within this transaction, then 682 * If we invalidated the buffer within this transaction, then
775 * cancel the invalidation now that we're dirtying the buffer 683 * cancel the invalidation now that we're dirtying the buffer
@@ -777,7 +685,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
777 * because we have a reference to the buffer this entire time. 685 * because we have a reference to the buffer this entire time.
778 */ 686 */
779 if (bip->bli_flags & XFS_BLI_STALE) { 687 if (bip->bli_flags & XFS_BLI_STALE) {
780 xfs_buf_item_trace("BLOG UNSTALE", bip);
781 bip->bli_flags &= ~XFS_BLI_STALE; 688 bip->bli_flags &= ~XFS_BLI_STALE;
782 ASSERT(XFS_BUF_ISSTALE(bp)); 689 ASSERT(XFS_BUF_ISSTALE(bp));
783 XFS_BUF_UNSTALE(bp); 690 XFS_BUF_UNSTALE(bp);
@@ -792,7 +699,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
792 lidp->lid_flags &= ~XFS_LID_BUF_STALE; 699 lidp->lid_flags &= ~XFS_LID_BUF_STALE;
793 bip->bli_flags |= XFS_BLI_LOGGED; 700 bip->bli_flags |= XFS_BLI_LOGGED;
794 xfs_buf_item_log(bip, first, last); 701 xfs_buf_item_log(bip, first, last);
795 xfs_buf_item_trace("BLOG", bip);
796} 702}
797 703
798 704
@@ -831,6 +737,8 @@ xfs_trans_binval(
831 ASSERT(lidp != NULL); 737 ASSERT(lidp != NULL);
832 ASSERT(atomic_read(&bip->bli_refcount) > 0); 738 ASSERT(atomic_read(&bip->bli_refcount) > 0);
833 739
740 trace_xfs_trans_binval(bip);
741
834 if (bip->bli_flags & XFS_BLI_STALE) { 742 if (bip->bli_flags & XFS_BLI_STALE) {
835 /* 743 /*
836 * If the buffer is already invalidated, then 744 * If the buffer is already invalidated, then
@@ -843,8 +751,6 @@ xfs_trans_binval(
843 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 751 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
844 ASSERT(lidp->lid_flags & XFS_LID_DIRTY); 752 ASSERT(lidp->lid_flags & XFS_LID_DIRTY);
845 ASSERT(tp->t_flags & XFS_TRANS_DIRTY); 753 ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
846 xfs_buftrace("XFS_BINVAL RECUR", bp);
847 xfs_buf_item_trace("BINVAL RECUR", bip);
848 return; 754 return;
849 } 755 }
850 756
@@ -878,8 +784,6 @@ xfs_trans_binval(
878 (bip->bli_format.blf_map_size * sizeof(uint))); 784 (bip->bli_format.blf_map_size * sizeof(uint)));
879 lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE; 785 lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE;
880 tp->t_flags |= XFS_TRANS_DIRTY; 786 tp->t_flags |= XFS_TRANS_DIRTY;
881 xfs_buftrace("XFS_BINVAL", bp);
882 xfs_buf_item_trace("BINVAL", bip);
883} 787}
884 788
885/* 789/*