aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_da_btree.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2012-06-22 04:50:14 -0400
committerBen Myers <bpm@sgi.com>2012-07-01 15:50:07 -0400
commit1d9025e56143c0c4aebebdb62e46618d3d284218 (patch)
treec31b34008dbd17ac1811c73cc515ea7f6247e2f0 /fs/xfs/xfs_da_btree.c
parent3605431fb9739a30ccd0c6380ae8e3c6f8e670a5 (diff)
xfs: remove struct xfs_dabuf and infrastructure
The struct xfs_dabuf now only tracks a single xfs_buf and all the information it holds can be gained directly from the xfs_buf. Hence we can remove the struct dabuf and pass the xfs_buf around everywhere. Kill the struct dabuf and the associated infrastructure. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_da_btree.c')
-rw-r--r--fs/xfs/xfs_da_btree.c337
1 files changed, 118 insertions, 219 deletions
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 76e5dbaa95ea..7bfb7dd334fc 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -83,9 +83,9 @@ STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
83/* 83/*
84 * Utility routines. 84 * Utility routines.
85 */ 85 */
86STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count); 86STATIC uint xfs_da_node_lasthash(struct xfs_buf *bp, int *count);
87STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp); 87STATIC int xfs_da_node_order(struct xfs_buf *node1_bp,
88STATIC xfs_dabuf_t *xfs_da_buf_make(xfs_buf_t *bp); 88 struct xfs_buf *node2_bp);
89STATIC int xfs_da_blk_unlink(xfs_da_state_t *state, 89STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
90 xfs_da_state_blk_t *drop_blk, 90 xfs_da_state_blk_t *drop_blk,
91 xfs_da_state_blk_t *save_blk); 91 xfs_da_state_blk_t *save_blk);
@@ -100,10 +100,10 @@ STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
100 */ 100 */
101int 101int
102xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level, 102xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
103 xfs_dabuf_t **bpp, int whichfork) 103 struct xfs_buf **bpp, int whichfork)
104{ 104{
105 xfs_da_intnode_t *node; 105 xfs_da_intnode_t *node;
106 xfs_dabuf_t *bp; 106 struct xfs_buf *bp;
107 int error; 107 int error;
108 xfs_trans_t *tp; 108 xfs_trans_t *tp;
109 109
@@ -114,7 +114,7 @@ xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
114 if (error) 114 if (error)
115 return(error); 115 return(error);
116 ASSERT(bp != NULL); 116 ASSERT(bp != NULL);
117 node = bp->data; 117 node = bp->b_addr;
118 node->hdr.info.forw = 0; 118 node->hdr.info.forw = 0;
119 node->hdr.info.back = 0; 119 node->hdr.info.back = 0;
120 node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC); 120 node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
@@ -122,7 +122,7 @@ xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
122 node->hdr.count = 0; 122 node->hdr.count = 0;
123 node->hdr.level = cpu_to_be16(level); 123 node->hdr.level = cpu_to_be16(level);
124 124
125 xfs_da_log_buf(tp, bp, 125 xfs_trans_log_buf(tp, bp,
126 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); 126 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
127 127
128 *bpp = bp; 128 *bpp = bp;
@@ -138,7 +138,7 @@ xfs_da_split(xfs_da_state_t *state)
138{ 138{
139 xfs_da_state_blk_t *oldblk, *newblk, *addblk; 139 xfs_da_state_blk_t *oldblk, *newblk, *addblk;
140 xfs_da_intnode_t *node; 140 xfs_da_intnode_t *node;
141 xfs_dabuf_t *bp; 141 struct xfs_buf *bp;
142 int max, action, error, i; 142 int max, action, error, i;
143 143
144 trace_xfs_da_split(state->args); 144 trace_xfs_da_split(state->args);
@@ -203,7 +203,6 @@ xfs_da_split(xfs_da_state_t *state)
203 case XFS_DA_NODE_MAGIC: 203 case XFS_DA_NODE_MAGIC:
204 error = xfs_da_node_split(state, oldblk, newblk, addblk, 204 error = xfs_da_node_split(state, oldblk, newblk, addblk,
205 max - i, &action); 205 max - i, &action);
206 xfs_da_buf_done(addblk->bp);
207 addblk->bp = NULL; 206 addblk->bp = NULL;
208 if (error) 207 if (error)
209 return(error); /* GROT: dir is inconsistent */ 208 return(error); /* GROT: dir is inconsistent */
@@ -221,13 +220,6 @@ xfs_da_split(xfs_da_state_t *state)
221 * Update the btree to show the new hashval for this child. 220 * Update the btree to show the new hashval for this child.
222 */ 221 */
223 xfs_da_fixhashpath(state, &state->path); 222 xfs_da_fixhashpath(state, &state->path);
224 /*
225 * If we won't need this block again, it's getting dropped
226 * from the active path by the loop control, so we need
227 * to mark it done now.
228 */
229 if (i > 0 || !addblk)
230 xfs_da_buf_done(oldblk->bp);
231 } 223 }
232 if (!addblk) 224 if (!addblk)
233 return(0); 225 return(0);
@@ -239,8 +231,6 @@ xfs_da_split(xfs_da_state_t *state)
239 oldblk = &state->path.blk[0]; 231 oldblk = &state->path.blk[0];
240 error = xfs_da_root_split(state, oldblk, addblk); 232 error = xfs_da_root_split(state, oldblk, addblk);
241 if (error) { 233 if (error) {
242 xfs_da_buf_done(oldblk->bp);
243 xfs_da_buf_done(addblk->bp);
244 addblk->bp = NULL; 234 addblk->bp = NULL;
245 return(error); /* GROT: dir is inconsistent */ 235 return(error); /* GROT: dir is inconsistent */
246 } 236 }
@@ -252,7 +242,7 @@ xfs_da_split(xfs_da_state_t *state)
252 * and the original block 0 could be at any position in the list. 242 * and the original block 0 could be at any position in the list.
253 */ 243 */
254 244
255 node = oldblk->bp->data; 245 node = oldblk->bp->b_addr;
256 if (node->hdr.info.forw) { 246 if (node->hdr.info.forw) {
257 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) { 247 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
258 bp = addblk->bp; 248 bp = addblk->bp;
@@ -260,13 +250,13 @@ xfs_da_split(xfs_da_state_t *state)
260 ASSERT(state->extravalid); 250 ASSERT(state->extravalid);
261 bp = state->extrablk.bp; 251 bp = state->extrablk.bp;
262 } 252 }
263 node = bp->data; 253 node = bp->b_addr;
264 node->hdr.info.back = cpu_to_be32(oldblk->blkno); 254 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
265 xfs_da_log_buf(state->args->trans, bp, 255 xfs_trans_log_buf(state->args->trans, bp,
266 XFS_DA_LOGRANGE(node, &node->hdr.info, 256 XFS_DA_LOGRANGE(node, &node->hdr.info,
267 sizeof(node->hdr.info))); 257 sizeof(node->hdr.info)));
268 } 258 }
269 node = oldblk->bp->data; 259 node = oldblk->bp->b_addr;
270 if (node->hdr.info.back) { 260 if (node->hdr.info.back) {
271 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) { 261 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
272 bp = addblk->bp; 262 bp = addblk->bp;
@@ -274,14 +264,12 @@ xfs_da_split(xfs_da_state_t *state)
274 ASSERT(state->extravalid); 264 ASSERT(state->extravalid);
275 bp = state->extrablk.bp; 265 bp = state->extrablk.bp;
276 } 266 }
277 node = bp->data; 267 node = bp->b_addr;
278 node->hdr.info.forw = cpu_to_be32(oldblk->blkno); 268 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
279 xfs_da_log_buf(state->args->trans, bp, 269 xfs_trans_log_buf(state->args->trans, bp,
280 XFS_DA_LOGRANGE(node, &node->hdr.info, 270 XFS_DA_LOGRANGE(node, &node->hdr.info,
281 sizeof(node->hdr.info))); 271 sizeof(node->hdr.info)));
282 } 272 }
283 xfs_da_buf_done(oldblk->bp);
284 xfs_da_buf_done(addblk->bp);
285 addblk->bp = NULL; 273 addblk->bp = NULL;
286 return(0); 274 return(0);
287} 275}
@@ -298,7 +286,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
298 xfs_da_intnode_t *node, *oldroot; 286 xfs_da_intnode_t *node, *oldroot;
299 xfs_da_args_t *args; 287 xfs_da_args_t *args;
300 xfs_dablk_t blkno; 288 xfs_dablk_t blkno;
301 xfs_dabuf_t *bp; 289 struct xfs_buf *bp;
302 int error, size; 290 int error, size;
303 xfs_inode_t *dp; 291 xfs_inode_t *dp;
304 xfs_trans_t *tp; 292 xfs_trans_t *tp;
@@ -323,8 +311,8 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
323 if (error) 311 if (error)
324 return(error); 312 return(error);
325 ASSERT(bp != NULL); 313 ASSERT(bp != NULL);
326 node = bp->data; 314 node = bp->b_addr;
327 oldroot = blk1->bp->data; 315 oldroot = blk1->bp->b_addr;
328 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) { 316 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
329 size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] - 317 size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
330 (char *)oldroot); 318 (char *)oldroot);
@@ -335,8 +323,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
335 (char *)leaf); 323 (char *)leaf);
336 } 324 }
337 memcpy(node, oldroot, size); 325 memcpy(node, oldroot, size);
338 xfs_da_log_buf(tp, bp, 0, size - 1); 326 xfs_trans_log_buf(tp, bp, 0, size - 1);
339 xfs_da_buf_done(blk1->bp);
340 blk1->bp = bp; 327 blk1->bp = bp;
341 blk1->blkno = blkno; 328 blk1->blkno = blkno;
342 329
@@ -348,7 +335,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
348 be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork); 335 be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
349 if (error) 336 if (error)
350 return(error); 337 return(error);
351 node = bp->data; 338 node = bp->b_addr;
352 node->btree[0].hashval = cpu_to_be32(blk1->hashval); 339 node->btree[0].hashval = cpu_to_be32(blk1->hashval);
353 node->btree[0].before = cpu_to_be32(blk1->blkno); 340 node->btree[0].before = cpu_to_be32(blk1->blkno);
354 node->btree[1].hashval = cpu_to_be32(blk2->hashval); 341 node->btree[1].hashval = cpu_to_be32(blk2->hashval);
@@ -365,10 +352,9 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
365#endif 352#endif
366 353
367 /* Header is already logged by xfs_da_node_create */ 354 /* Header is already logged by xfs_da_node_create */
368 xfs_da_log_buf(tp, bp, 355 xfs_trans_log_buf(tp, bp,
369 XFS_DA_LOGRANGE(node, node->btree, 356 XFS_DA_LOGRANGE(node, node->btree,
370 sizeof(xfs_da_node_entry_t) * 2)); 357 sizeof(xfs_da_node_entry_t) * 2));
371 xfs_da_buf_done(bp);
372 358
373 return(0); 359 return(0);
374} 360}
@@ -389,7 +375,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
389 375
390 trace_xfs_da_node_split(state->args); 376 trace_xfs_da_node_split(state->args);
391 377
392 node = oldblk->bp->data; 378 node = oldblk->bp->b_addr;
393 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 379 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
394 380
395 /* 381 /*
@@ -436,7 +422,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
436 * 422 *
437 * If we had double-split op below us, then add the extra block too. 423 * If we had double-split op below us, then add the extra block too.
438 */ 424 */
439 node = oldblk->bp->data; 425 node = oldblk->bp->b_addr;
440 if (oldblk->index <= be16_to_cpu(node->hdr.count)) { 426 if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
441 oldblk->index++; 427 oldblk->index++;
442 xfs_da_node_add(state, oldblk, addblk); 428 xfs_da_node_add(state, oldblk, addblk);
@@ -477,8 +463,8 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
477 463
478 trace_xfs_da_node_rebalance(state->args); 464 trace_xfs_da_node_rebalance(state->args);
479 465
480 node1 = blk1->bp->data; 466 node1 = blk1->bp->b_addr;
481 node2 = blk2->bp->data; 467 node2 = blk2->bp->b_addr;
482 /* 468 /*
483 * Figure out how many entries need to move, and in which direction. 469 * Figure out how many entries need to move, and in which direction.
484 * Swap the nodes around if that makes it simpler. 470 * Swap the nodes around if that makes it simpler.
@@ -532,7 +518,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
532 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)]; 518 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
533 memcpy(btree_d, btree_s, tmp); 519 memcpy(btree_d, btree_s, tmp);
534 be16_add_cpu(&node1->hdr.count, count); 520 be16_add_cpu(&node1->hdr.count, count);
535 xfs_da_log_buf(tp, blk1->bp, 521 xfs_trans_log_buf(tp, blk1->bp,
536 XFS_DA_LOGRANGE(node1, btree_d, tmp)); 522 XFS_DA_LOGRANGE(node1, btree_d, tmp));
537 523
538 /* 524 /*
@@ -549,9 +535,9 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
549 /* 535 /*
550 * Log header of node 1 and all current bits of node 2. 536 * Log header of node 1 and all current bits of node 2.
551 */ 537 */
552 xfs_da_log_buf(tp, blk1->bp, 538 xfs_trans_log_buf(tp, blk1->bp,
553 XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr))); 539 XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
554 xfs_da_log_buf(tp, blk2->bp, 540 xfs_trans_log_buf(tp, blk2->bp,
555 XFS_DA_LOGRANGE(node2, &node2->hdr, 541 XFS_DA_LOGRANGE(node2, &node2->hdr,
556 sizeof(node2->hdr) + 542 sizeof(node2->hdr) +
557 sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count))); 543 sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
@@ -560,8 +546,8 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
560 * Record the last hashval from each block for upward propagation. 546 * Record the last hashval from each block for upward propagation.
561 * (note: don't use the swapped node pointers) 547 * (note: don't use the swapped node pointers)
562 */ 548 */
563 node1 = blk1->bp->data; 549 node1 = blk1->bp->b_addr;
564 node2 = blk2->bp->data; 550 node2 = blk2->bp->b_addr;
565 blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval); 551 blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
566 blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval); 552 blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
567 553
@@ -587,7 +573,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
587 573
588 trace_xfs_da_node_add(state->args); 574 trace_xfs_da_node_add(state->args);
589 575
590 node = oldblk->bp->data; 576 node = oldblk->bp->b_addr;
591 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 577 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
592 ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count))); 578 ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
593 ASSERT(newblk->blkno != 0); 579 ASSERT(newblk->blkno != 0);
@@ -606,10 +592,10 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
606 } 592 }
607 btree->hashval = cpu_to_be32(newblk->hashval); 593 btree->hashval = cpu_to_be32(newblk->hashval);
608 btree->before = cpu_to_be32(newblk->blkno); 594 btree->before = cpu_to_be32(newblk->blkno);
609 xfs_da_log_buf(state->args->trans, oldblk->bp, 595 xfs_trans_log_buf(state->args->trans, oldblk->bp,
610 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree))); 596 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
611 be16_add_cpu(&node->hdr.count, 1); 597 be16_add_cpu(&node->hdr.count, 1);
612 xfs_da_log_buf(state->args->trans, oldblk->bp, 598 xfs_trans_log_buf(state->args->trans, oldblk->bp,
613 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); 599 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
614 600
615 /* 601 /*
@@ -735,7 +721,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
735 xfs_da_intnode_t *oldroot; 721 xfs_da_intnode_t *oldroot;
736 xfs_da_args_t *args; 722 xfs_da_args_t *args;
737 xfs_dablk_t child; 723 xfs_dablk_t child;
738 xfs_dabuf_t *bp; 724 struct xfs_buf *bp;
739 int error; 725 int error;
740 726
741 trace_xfs_da_root_join(state->args); 727 trace_xfs_da_root_join(state->args);
@@ -743,7 +729,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
743 args = state->args; 729 args = state->args;
744 ASSERT(args != NULL); 730 ASSERT(args != NULL);
745 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); 731 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
746 oldroot = root_blk->bp->data; 732 oldroot = root_blk->bp->b_addr;
747 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 733 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
748 ASSERT(!oldroot->hdr.info.forw); 734 ASSERT(!oldroot->hdr.info.forw);
749 ASSERT(!oldroot->hdr.info.back); 735 ASSERT(!oldroot->hdr.info.back);
@@ -765,11 +751,11 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
765 if (error) 751 if (error)
766 return(error); 752 return(error);
767 ASSERT(bp != NULL); 753 ASSERT(bp != NULL);
768 xfs_da_blkinfo_onlychild_validate(bp->data, 754 xfs_da_blkinfo_onlychild_validate(bp->b_addr,
769 be16_to_cpu(oldroot->hdr.level)); 755 be16_to_cpu(oldroot->hdr.level));
770 756
771 memcpy(root_blk->bp->data, bp->data, state->blocksize); 757 memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
772 xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1); 758 xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
773 error = xfs_da_shrink_inode(args, child, bp); 759 error = xfs_da_shrink_inode(args, child, bp);
774 return(error); 760 return(error);
775} 761}
@@ -791,7 +777,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
791 xfs_da_blkinfo_t *info; 777 xfs_da_blkinfo_t *info;
792 int count, forward, error, retval, i; 778 int count, forward, error, retval, i;
793 xfs_dablk_t blkno; 779 xfs_dablk_t blkno;
794 xfs_dabuf_t *bp; 780 struct xfs_buf *bp;
795 781
796 /* 782 /*
797 * Check for the degenerate case of the block being over 50% full. 783 * Check for the degenerate case of the block being over 50% full.
@@ -799,7 +785,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
799 * to coalesce with a sibling. 785 * to coalesce with a sibling.
800 */ 786 */
801 blk = &state->path.blk[ state->path.active-1 ]; 787 blk = &state->path.blk[ state->path.active-1 ];
802 info = blk->bp->data; 788 info = blk->bp->b_addr;
803 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 789 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
804 node = (xfs_da_intnode_t *)info; 790 node = (xfs_da_intnode_t *)info;
805 count = be16_to_cpu(node->hdr.count); 791 count = be16_to_cpu(node->hdr.count);
@@ -859,10 +845,10 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
859 count = state->node_ents; 845 count = state->node_ents;
860 count -= state->node_ents >> 2; 846 count -= state->node_ents >> 2;
861 count -= be16_to_cpu(node->hdr.count); 847 count -= be16_to_cpu(node->hdr.count);
862 node = bp->data; 848 node = bp->b_addr;
863 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 849 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
864 count -= be16_to_cpu(node->hdr.count); 850 count -= be16_to_cpu(node->hdr.count);
865 xfs_da_brelse(state->args->trans, bp); 851 xfs_trans_brelse(state->args->trans, bp);
866 if (count >= 0) 852 if (count >= 0)
867 break; /* fits with at least 25% to spare */ 853 break; /* fits with at least 25% to spare */
868 } 854 }
@@ -934,14 +920,14 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
934 break; 920 break;
935 } 921 }
936 for (blk--, level--; level >= 0; blk--, level--) { 922 for (blk--, level--; level >= 0; blk--, level--) {
937 node = blk->bp->data; 923 node = blk->bp->b_addr;
938 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 924 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
939 btree = &node->btree[ blk->index ]; 925 btree = &node->btree[ blk->index ];
940 if (be32_to_cpu(btree->hashval) == lasthash) 926 if (be32_to_cpu(btree->hashval) == lasthash)
941 break; 927 break;
942 blk->hashval = lasthash; 928 blk->hashval = lasthash;
943 btree->hashval = cpu_to_be32(lasthash); 929 btree->hashval = cpu_to_be32(lasthash);
944 xfs_da_log_buf(state->args->trans, blk->bp, 930 xfs_trans_log_buf(state->args->trans, blk->bp,
945 XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); 931 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
946 932
947 lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); 933 lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
@@ -960,7 +946,7 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
960 946
961 trace_xfs_da_node_remove(state->args); 947 trace_xfs_da_node_remove(state->args);
962 948
963 node = drop_blk->bp->data; 949 node = drop_blk->bp->b_addr;
964 ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count)); 950 ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
965 ASSERT(drop_blk->index >= 0); 951 ASSERT(drop_blk->index >= 0);
966 952
@@ -972,15 +958,15 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
972 tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1; 958 tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
973 tmp *= (uint)sizeof(xfs_da_node_entry_t); 959 tmp *= (uint)sizeof(xfs_da_node_entry_t);
974 memmove(btree, btree + 1, tmp); 960 memmove(btree, btree + 1, tmp);
975 xfs_da_log_buf(state->args->trans, drop_blk->bp, 961 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
976 XFS_DA_LOGRANGE(node, btree, tmp)); 962 XFS_DA_LOGRANGE(node, btree, tmp));
977 btree = &node->btree[be16_to_cpu(node->hdr.count)-1]; 963 btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
978 } 964 }
979 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t)); 965 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
980 xfs_da_log_buf(state->args->trans, drop_blk->bp, 966 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
981 XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); 967 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
982 be16_add_cpu(&node->hdr.count, -1); 968 be16_add_cpu(&node->hdr.count, -1);
983 xfs_da_log_buf(state->args->trans, drop_blk->bp, 969 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
984 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); 970 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
985 971
986 /* 972 /*
@@ -1005,8 +991,8 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1005 991
1006 trace_xfs_da_node_unbalance(state->args); 992 trace_xfs_da_node_unbalance(state->args);
1007 993
1008 drop_node = drop_blk->bp->data; 994 drop_node = drop_blk->bp->b_addr;
1009 save_node = save_blk->bp->data; 995 save_node = save_blk->bp->b_addr;
1010 ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 996 ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1011 ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 997 ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1012 tp = state->args->trans; 998 tp = state->args->trans;
@@ -1023,13 +1009,13 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1023 tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t); 1009 tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1024 memmove(btree, &save_node->btree[0], tmp); 1010 memmove(btree, &save_node->btree[0], tmp);
1025 btree = &save_node->btree[0]; 1011 btree = &save_node->btree[0];
1026 xfs_da_log_buf(tp, save_blk->bp, 1012 xfs_trans_log_buf(tp, save_blk->bp,
1027 XFS_DA_LOGRANGE(save_node, btree, 1013 XFS_DA_LOGRANGE(save_node, btree,
1028 (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) * 1014 (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
1029 sizeof(xfs_da_node_entry_t))); 1015 sizeof(xfs_da_node_entry_t)));
1030 } else { 1016 } else {
1031 btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)]; 1017 btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
1032 xfs_da_log_buf(tp, save_blk->bp, 1018 xfs_trans_log_buf(tp, save_blk->bp,
1033 XFS_DA_LOGRANGE(save_node, btree, 1019 XFS_DA_LOGRANGE(save_node, btree,
1034 be16_to_cpu(drop_node->hdr.count) * 1020 be16_to_cpu(drop_node->hdr.count) *
1035 sizeof(xfs_da_node_entry_t))); 1021 sizeof(xfs_da_node_entry_t)));
@@ -1042,7 +1028,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1042 memcpy(btree, &drop_node->btree[0], tmp); 1028 memcpy(btree, &drop_node->btree[0], tmp);
1043 be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count)); 1029 be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
1044 1030
1045 xfs_da_log_buf(tp, save_blk->bp, 1031 xfs_trans_log_buf(tp, save_blk->bp,
1046 XFS_DA_LOGRANGE(save_node, &save_node->hdr, 1032 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1047 sizeof(save_node->hdr))); 1033 sizeof(save_node->hdr)));
1048 1034
@@ -1100,7 +1086,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1100 state->path.active--; 1086 state->path.active--;
1101 return(error); 1087 return(error);
1102 } 1088 }
1103 curr = blk->bp->data; 1089 curr = blk->bp->b_addr;
1104 blk->magic = be16_to_cpu(curr->magic); 1090 blk->magic = be16_to_cpu(curr->magic);
1105 ASSERT(blk->magic == XFS_DA_NODE_MAGIC || 1091 ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
1106 blk->magic == XFS_DIR2_LEAFN_MAGIC || 1092 blk->magic == XFS_DIR2_LEAFN_MAGIC ||
@@ -1110,7 +1096,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1110 * Search an intermediate node for a match. 1096 * Search an intermediate node for a match.
1111 */ 1097 */
1112 if (blk->magic == XFS_DA_NODE_MAGIC) { 1098 if (blk->magic == XFS_DA_NODE_MAGIC) {
1113 node = blk->bp->data; 1099 node = blk->bp->b_addr;
1114 max = be16_to_cpu(node->hdr.count); 1100 max = be16_to_cpu(node->hdr.count);
1115 blk->hashval = be32_to_cpu(node->btree[max-1].hashval); 1101 blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
1116 1102
@@ -1216,15 +1202,15 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1216 xfs_da_blkinfo_t *old_info, *new_info, *tmp_info; 1202 xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
1217 xfs_da_args_t *args; 1203 xfs_da_args_t *args;
1218 int before=0, error; 1204 int before=0, error;
1219 xfs_dabuf_t *bp; 1205 struct xfs_buf *bp;
1220 1206
1221 /* 1207 /*
1222 * Set up environment. 1208 * Set up environment.
1223 */ 1209 */
1224 args = state->args; 1210 args = state->args;
1225 ASSERT(args != NULL); 1211 ASSERT(args != NULL);
1226 old_info = old_blk->bp->data; 1212 old_info = old_blk->bp->b_addr;
1227 new_info = new_blk->bp->data; 1213 new_info = new_blk->bp->b_addr;
1228 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || 1214 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1229 old_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1215 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1230 old_blk->magic == XFS_ATTR_LEAF_MAGIC); 1216 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
@@ -1261,12 +1247,11 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1261 if (error) 1247 if (error)
1262 return(error); 1248 return(error);
1263 ASSERT(bp != NULL); 1249 ASSERT(bp != NULL);
1264 tmp_info = bp->data; 1250 tmp_info = bp->b_addr;
1265 ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic)); 1251 ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
1266 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno); 1252 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1267 tmp_info->forw = cpu_to_be32(new_blk->blkno); 1253 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1268 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1254 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1269 xfs_da_buf_done(bp);
1270 } 1255 }
1271 old_info->back = cpu_to_be32(new_blk->blkno); 1256 old_info->back = cpu_to_be32(new_blk->blkno);
1272 } else { 1257 } else {
@@ -1283,18 +1268,17 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1283 if (error) 1268 if (error)
1284 return(error); 1269 return(error);
1285 ASSERT(bp != NULL); 1270 ASSERT(bp != NULL);
1286 tmp_info = bp->data; 1271 tmp_info = bp->b_addr;
1287 ASSERT(tmp_info->magic == old_info->magic); 1272 ASSERT(tmp_info->magic == old_info->magic);
1288 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno); 1273 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1289 tmp_info->back = cpu_to_be32(new_blk->blkno); 1274 tmp_info->back = cpu_to_be32(new_blk->blkno);
1290 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1275 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1291 xfs_da_buf_done(bp);
1292 } 1276 }
1293 old_info->forw = cpu_to_be32(new_blk->blkno); 1277 old_info->forw = cpu_to_be32(new_blk->blkno);
1294 } 1278 }
1295 1279
1296 xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); 1280 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1297 xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); 1281 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1298 return(0); 1282 return(0);
1299} 1283}
1300 1284
@@ -1302,12 +1286,14 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1302 * Compare two intermediate nodes for "order". 1286 * Compare two intermediate nodes for "order".
1303 */ 1287 */
1304STATIC int 1288STATIC int
1305xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp) 1289xfs_da_node_order(
1290 struct xfs_buf *node1_bp,
1291 struct xfs_buf *node2_bp)
1306{ 1292{
1307 xfs_da_intnode_t *node1, *node2; 1293 xfs_da_intnode_t *node1, *node2;
1308 1294
1309 node1 = node1_bp->data; 1295 node1 = node1_bp->b_addr;
1310 node2 = node2_bp->data; 1296 node2 = node2_bp->b_addr;
1311 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) && 1297 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
1312 node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 1298 node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1313 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) && 1299 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
@@ -1324,11 +1310,13 @@ xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
1324 * Pick up the last hashvalue from an intermediate node. 1310 * Pick up the last hashvalue from an intermediate node.
1325 */ 1311 */
1326STATIC uint 1312STATIC uint
1327xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count) 1313xfs_da_node_lasthash(
1314 struct xfs_buf *bp,
1315 int *count)
1328{ 1316{
1329 xfs_da_intnode_t *node; 1317 xfs_da_intnode_t *node;
1330 1318
1331 node = bp->data; 1319 node = bp->b_addr;
1332 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 1320 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1333 if (count) 1321 if (count)
1334 *count = be16_to_cpu(node->hdr.count); 1322 *count = be16_to_cpu(node->hdr.count);
@@ -1346,7 +1334,7 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1346{ 1334{
1347 xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info; 1335 xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
1348 xfs_da_args_t *args; 1336 xfs_da_args_t *args;
1349 xfs_dabuf_t *bp; 1337 struct xfs_buf *bp;
1350 int error; 1338 int error;
1351 1339
1352 /* 1340 /*
@@ -1354,8 +1342,8 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1354 */ 1342 */
1355 args = state->args; 1343 args = state->args;
1356 ASSERT(args != NULL); 1344 ASSERT(args != NULL);
1357 save_info = save_blk->bp->data; 1345 save_info = save_blk->bp->b_addr;
1358 drop_info = drop_blk->bp->data; 1346 drop_info = drop_blk->bp->b_addr;
1359 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || 1347 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1360 save_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1348 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1361 save_blk->magic == XFS_ATTR_LEAF_MAGIC); 1349 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
@@ -1380,13 +1368,12 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1380 if (error) 1368 if (error)
1381 return(error); 1369 return(error);
1382 ASSERT(bp != NULL); 1370 ASSERT(bp != NULL);
1383 tmp_info = bp->data; 1371 tmp_info = bp->b_addr;
1384 ASSERT(tmp_info->magic == save_info->magic); 1372 ASSERT(tmp_info->magic == save_info->magic);
1385 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno); 1373 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1386 tmp_info->forw = cpu_to_be32(save_blk->blkno); 1374 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1387 xfs_da_log_buf(args->trans, bp, 0, 1375 xfs_trans_log_buf(args->trans, bp, 0,
1388 sizeof(*tmp_info) - 1); 1376 sizeof(*tmp_info) - 1);
1389 xfs_da_buf_done(bp);
1390 } 1377 }
1391 } else { 1378 } else {
1392 trace_xfs_da_unlink_forward(args); 1379 trace_xfs_da_unlink_forward(args);
@@ -1398,17 +1385,16 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1398 if (error) 1385 if (error)
1399 return(error); 1386 return(error);
1400 ASSERT(bp != NULL); 1387 ASSERT(bp != NULL);
1401 tmp_info = bp->data; 1388 tmp_info = bp->b_addr;
1402 ASSERT(tmp_info->magic == save_info->magic); 1389 ASSERT(tmp_info->magic == save_info->magic);
1403 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno); 1390 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1404 tmp_info->back = cpu_to_be32(save_blk->blkno); 1391 tmp_info->back = cpu_to_be32(save_blk->blkno);
1405 xfs_da_log_buf(args->trans, bp, 0, 1392 xfs_trans_log_buf(args->trans, bp, 0,
1406 sizeof(*tmp_info) - 1); 1393 sizeof(*tmp_info) - 1);
1407 xfs_da_buf_done(bp);
1408 } 1394 }
1409 } 1395 }
1410 1396
1411 xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); 1397 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1412 return(0); 1398 return(0);
1413} 1399}
1414 1400
@@ -1443,7 +1429,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1443 level = (path->active-1) - 1; /* skip bottom layer in path */ 1429 level = (path->active-1) - 1; /* skip bottom layer in path */
1444 for (blk = &path->blk[level]; level >= 0; blk--, level--) { 1430 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1445 ASSERT(blk->bp != NULL); 1431 ASSERT(blk->bp != NULL);
1446 node = blk->bp->data; 1432 node = blk->bp->b_addr;
1447 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 1433 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1448 if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) { 1434 if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
1449 blk->index++; 1435 blk->index++;
@@ -1471,7 +1457,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1471 * (if it's dirty, trans won't actually let go) 1457 * (if it's dirty, trans won't actually let go)
1472 */ 1458 */
1473 if (release) 1459 if (release)
1474 xfs_da_brelse(args->trans, blk->bp); 1460 xfs_trans_brelse(args->trans, blk->bp);
1475 1461
1476 /* 1462 /*
1477 * Read the next child block. 1463 * Read the next child block.
@@ -1482,7 +1468,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1482 if (error) 1468 if (error)
1483 return(error); 1469 return(error);
1484 ASSERT(blk->bp != NULL); 1470 ASSERT(blk->bp != NULL);
1485 info = blk->bp->data; 1471 info = blk->bp->b_addr;
1486 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 1472 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1487 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 1473 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1488 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1474 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
@@ -1702,11 +1688,13 @@ xfs_da_grow_inode(
1702 * a bmap btree split to do that. 1688 * a bmap btree split to do that.
1703 */ 1689 */
1704STATIC int 1690STATIC int
1705xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, 1691xfs_da_swap_lastblock(
1706 xfs_dabuf_t **dead_bufp) 1692 xfs_da_args_t *args,
1693 xfs_dablk_t *dead_blknop,
1694 struct xfs_buf **dead_bufp)
1707{ 1695{
1708 xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno; 1696 xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
1709 xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf; 1697 struct xfs_buf *dead_buf, *last_buf, *sib_buf, *par_buf;
1710 xfs_fileoff_t lastoff; 1698 xfs_fileoff_t lastoff;
1711 xfs_inode_t *ip; 1699 xfs_inode_t *ip;
1712 xfs_trans_t *tp; 1700 xfs_trans_t *tp;
@@ -1744,9 +1732,9 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1744 /* 1732 /*
1745 * Copy the last block into the dead buffer and log it. 1733 * Copy the last block into the dead buffer and log it.
1746 */ 1734 */
1747 memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize); 1735 memcpy(dead_buf->b_addr, last_buf->b_addr, mp->m_dirblksize);
1748 xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1); 1736 xfs_trans_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
1749 dead_info = dead_buf->data; 1737 dead_info = dead_buf->b_addr;
1750 /* 1738 /*
1751 * Get values from the moved block. 1739 * Get values from the moved block.
1752 */ 1740 */
@@ -1767,7 +1755,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1767 if ((sib_blkno = be32_to_cpu(dead_info->back))) { 1755 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
1768 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) 1756 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1769 goto done; 1757 goto done;
1770 sib_info = sib_buf->data; 1758 sib_info = sib_buf->b_addr;
1771 if (unlikely( 1759 if (unlikely(
1772 be32_to_cpu(sib_info->forw) != last_blkno || 1760 be32_to_cpu(sib_info->forw) != last_blkno ||
1773 sib_info->magic != dead_info->magic)) { 1761 sib_info->magic != dead_info->magic)) {
@@ -1777,10 +1765,9 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1777 goto done; 1765 goto done;
1778 } 1766 }
1779 sib_info->forw = cpu_to_be32(dead_blkno); 1767 sib_info->forw = cpu_to_be32(dead_blkno);
1780 xfs_da_log_buf(tp, sib_buf, 1768 xfs_trans_log_buf(tp, sib_buf,
1781 XFS_DA_LOGRANGE(sib_info, &sib_info->forw, 1769 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
1782 sizeof(sib_info->forw))); 1770 sizeof(sib_info->forw)));
1783 xfs_da_buf_done(sib_buf);
1784 sib_buf = NULL; 1771 sib_buf = NULL;
1785 } 1772 }
1786 /* 1773 /*
@@ -1789,7 +1776,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1789 if ((sib_blkno = be32_to_cpu(dead_info->forw))) { 1776 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
1790 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) 1777 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1791 goto done; 1778 goto done;
1792 sib_info = sib_buf->data; 1779 sib_info = sib_buf->b_addr;
1793 if (unlikely( 1780 if (unlikely(
1794 be32_to_cpu(sib_info->back) != last_blkno || 1781 be32_to_cpu(sib_info->back) != last_blkno ||
1795 sib_info->magic != dead_info->magic)) { 1782 sib_info->magic != dead_info->magic)) {
@@ -1799,10 +1786,9 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1799 goto done; 1786 goto done;
1800 } 1787 }
1801 sib_info->back = cpu_to_be32(dead_blkno); 1788 sib_info->back = cpu_to_be32(dead_blkno);
1802 xfs_da_log_buf(tp, sib_buf, 1789 xfs_trans_log_buf(tp, sib_buf,
1803 XFS_DA_LOGRANGE(sib_info, &sib_info->back, 1790 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
1804 sizeof(sib_info->back))); 1791 sizeof(sib_info->back)));
1805 xfs_da_buf_done(sib_buf);
1806 sib_buf = NULL; 1792 sib_buf = NULL;
1807 } 1793 }
1808 par_blkno = mp->m_dirleafblk; 1794 par_blkno = mp->m_dirleafblk;
@@ -1813,7 +1799,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1813 for (;;) { 1799 for (;;) {
1814 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w))) 1800 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1815 goto done; 1801 goto done;
1816 par_node = par_buf->data; 1802 par_node = par_buf->b_addr;
1817 if (unlikely(par_node->hdr.info.magic != 1803 if (unlikely(par_node->hdr.info.magic !=
1818 cpu_to_be16(XFS_DA_NODE_MAGIC) || 1804 cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1819 (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) { 1805 (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
@@ -1837,7 +1823,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1837 par_blkno = be32_to_cpu(par_node->btree[entno].before); 1823 par_blkno = be32_to_cpu(par_node->btree[entno].before);
1838 if (level == dead_level + 1) 1824 if (level == dead_level + 1)
1839 break; 1825 break;
1840 xfs_da_brelse(tp, par_buf); 1826 xfs_trans_brelse(tp, par_buf);
1841 par_buf = NULL; 1827 par_buf = NULL;
1842 } 1828 }
1843 /* 1829 /*
@@ -1853,7 +1839,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1853 if (entno < be16_to_cpu(par_node->hdr.count)) 1839 if (entno < be16_to_cpu(par_node->hdr.count))
1854 break; 1840 break;
1855 par_blkno = be32_to_cpu(par_node->hdr.info.forw); 1841 par_blkno = be32_to_cpu(par_node->hdr.info.forw);
1856 xfs_da_brelse(tp, par_buf); 1842 xfs_trans_brelse(tp, par_buf);
1857 par_buf = NULL; 1843 par_buf = NULL;
1858 if (unlikely(par_blkno == 0)) { 1844 if (unlikely(par_blkno == 0)) {
1859 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)", 1845 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
@@ -1863,7 +1849,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1863 } 1849 }
1864 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w))) 1850 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1865 goto done; 1851 goto done;
1866 par_node = par_buf->data; 1852 par_node = par_buf->b_addr;
1867 if (unlikely( 1853 if (unlikely(
1868 be16_to_cpu(par_node->hdr.level) != level || 1854 be16_to_cpu(par_node->hdr.level) != level ||
1869 par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) { 1855 par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
@@ -1878,20 +1864,18 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1878 * Update the parent entry pointing to the moved block. 1864 * Update the parent entry pointing to the moved block.
1879 */ 1865 */
1880 par_node->btree[entno].before = cpu_to_be32(dead_blkno); 1866 par_node->btree[entno].before = cpu_to_be32(dead_blkno);
1881 xfs_da_log_buf(tp, par_buf, 1867 xfs_trans_log_buf(tp, par_buf,
1882 XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before, 1868 XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
1883 sizeof(par_node->btree[entno].before))); 1869 sizeof(par_node->btree[entno].before)));
1884 xfs_da_buf_done(par_buf);
1885 xfs_da_buf_done(dead_buf);
1886 *dead_blknop = last_blkno; 1870 *dead_blknop = last_blkno;
1887 *dead_bufp = last_buf; 1871 *dead_bufp = last_buf;
1888 return 0; 1872 return 0;
1889done: 1873done:
1890 if (par_buf) 1874 if (par_buf)
1891 xfs_da_brelse(tp, par_buf); 1875 xfs_trans_brelse(tp, par_buf);
1892 if (sib_buf) 1876 if (sib_buf)
1893 xfs_da_brelse(tp, sib_buf); 1877 xfs_trans_brelse(tp, sib_buf);
1894 xfs_da_brelse(tp, last_buf); 1878 xfs_trans_brelse(tp, last_buf);
1895 return error; 1879 return error;
1896} 1880}
1897 1881
@@ -1899,8 +1883,10 @@ done:
1899 * Remove a btree block from a directory or attribute. 1883 * Remove a btree block from a directory or attribute.
1900 */ 1884 */
1901int 1885int
1902xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, 1886xfs_da_shrink_inode(
1903 xfs_dabuf_t *dead_buf) 1887 xfs_da_args_t *args,
1888 xfs_dablk_t dead_blkno,
1889 struct xfs_buf *dead_buf)
1904{ 1890{
1905 xfs_inode_t *dp; 1891 xfs_inode_t *dp;
1906 int done, error, w, count; 1892 int done, error, w, count;
@@ -1935,7 +1921,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
1935 break; 1921 break;
1936 } 1922 }
1937 } 1923 }
1938 xfs_da_binval(tp, dead_buf); 1924 xfs_trans_binval(tp, dead_buf);
1939 return error; 1925 return error;
1940} 1926}
1941 1927
@@ -2099,7 +2085,7 @@ xfs_da_get_buf(
2099 struct xfs_inode *dp, 2085 struct xfs_inode *dp,
2100 xfs_dablk_t bno, 2086 xfs_dablk_t bno,
2101 xfs_daddr_t mappedbno, 2087 xfs_daddr_t mappedbno,
2102 xfs_dabuf_t **bpp, 2088 struct xfs_buf **bpp,
2103 int whichfork) 2089 int whichfork)
2104{ 2090{
2105 struct xfs_buf *bp; 2091 struct xfs_buf *bp;
@@ -2128,7 +2114,7 @@ xfs_da_get_buf(
2128 goto out_free; 2114 goto out_free;
2129 } 2115 }
2130 2116
2131 *bpp = xfs_da_buf_make(bp); 2117 *bpp = bp;
2132 2118
2133out_free: 2119out_free:
2134 if (mapp != &map) 2120 if (mapp != &map)
@@ -2146,7 +2132,7 @@ xfs_da_read_buf(
2146 struct xfs_inode *dp, 2132 struct xfs_inode *dp,
2147 xfs_dablk_t bno, 2133 xfs_dablk_t bno,
2148 xfs_daddr_t mappedbno, 2134 xfs_daddr_t mappedbno,
2149 xfs_dabuf_t **bpp, 2135 struct xfs_buf **bpp,
2150 int whichfork) 2136 int whichfork)
2151{ 2137{
2152 struct xfs_buf *bp; 2138 struct xfs_buf *bp;
@@ -2178,16 +2164,14 @@ xfs_da_read_buf(
2178 else 2164 else
2179 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF); 2165 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2180 2166
2181 *bpp = xfs_da_buf_make(bp);
2182
2183 /* 2167 /*
2184 * This verification code will be moved to a CRC verification callback 2168 * This verification code will be moved to a CRC verification callback
2185 * function so just leave it here unchanged until then. 2169 * function so just leave it here unchanged until then.
2186 */ 2170 */
2187 { 2171 {
2188 xfs_dir2_data_hdr_t *hdr = (*bpp)->data; 2172 xfs_dir2_data_hdr_t *hdr = bp->b_addr;
2189 xfs_dir2_free_t *free = (*bpp)->data; 2173 xfs_dir2_free_t *free = bp->b_addr;
2190 xfs_da_blkinfo_t *info = (*bpp)->data; 2174 xfs_da_blkinfo_t *info = bp->b_addr;
2191 uint magic, magic1; 2175 uint magic, magic1;
2192 struct xfs_mount *mp = dp->i_mount; 2176 struct xfs_mount *mp = dp->i_mount;
2193 2177
@@ -2207,11 +2191,11 @@ xfs_da_read_buf(
2207 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)", 2191 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2208 XFS_ERRLEVEL_LOW, mp, info); 2192 XFS_ERRLEVEL_LOW, mp, info);
2209 error = XFS_ERROR(EFSCORRUPTED); 2193 error = XFS_ERROR(EFSCORRUPTED);
2210 xfs_da_brelse(trans, *bpp); 2194 xfs_trans_brelse(trans, bp);
2211 goto out_free; 2195 goto out_free;
2212 } 2196 }
2213 } 2197 }
2214 2198 *bpp = bp;
2215out_free: 2199out_free:
2216 if (mapp != &map) 2200 if (mapp != &map)
2217 kmem_free(mapp); 2201 kmem_free(mapp);
@@ -2259,7 +2243,6 @@ out_free:
2259} 2243}
2260 2244
2261kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */ 2245kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
2262kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
2263 2246
2264/* 2247/*
2265 * Allocate a dir-state structure. 2248 * Allocate a dir-state structure.
@@ -2279,13 +2262,8 @@ xfs_da_state_kill_altpath(xfs_da_state_t *state)
2279{ 2262{
2280 int i; 2263 int i;
2281 2264
2282 for (i = 0; i < state->altpath.active; i++) { 2265 for (i = 0; i < state->altpath.active; i++)
2283 if (state->altpath.blk[i].bp) { 2266 state->altpath.blk[i].bp = NULL;
2284 if (state->altpath.blk[i].bp != state->path.blk[i].bp)
2285 xfs_da_buf_done(state->altpath.blk[i].bp);
2286 state->altpath.blk[i].bp = NULL;
2287 }
2288 }
2289 state->altpath.active = 0; 2267 state->altpath.active = 0;
2290} 2268}
2291 2269
@@ -2295,88 +2273,9 @@ xfs_da_state_kill_altpath(xfs_da_state_t *state)
2295void 2273void
2296xfs_da_state_free(xfs_da_state_t *state) 2274xfs_da_state_free(xfs_da_state_t *state)
2297{ 2275{
2298 int i;
2299
2300 xfs_da_state_kill_altpath(state); 2276 xfs_da_state_kill_altpath(state);
2301 for (i = 0; i < state->path.active; i++) {
2302 if (state->path.blk[i].bp)
2303 xfs_da_buf_done(state->path.blk[i].bp);
2304 }
2305 if (state->extravalid && state->extrablk.bp)
2306 xfs_da_buf_done(state->extrablk.bp);
2307#ifdef DEBUG 2277#ifdef DEBUG
2308 memset((char *)state, 0, sizeof(*state)); 2278 memset((char *)state, 0, sizeof(*state));
2309#endif /* DEBUG */ 2279#endif /* DEBUG */
2310 kmem_zone_free(xfs_da_state_zone, state); 2280 kmem_zone_free(xfs_da_state_zone, state);
2311} 2281}
2312
2313/*
2314 * Create a dabuf.
2315 */
2316/* ARGSUSED */
2317STATIC xfs_dabuf_t *
2318xfs_da_buf_make(xfs_buf_t *bp)
2319{
2320 xfs_dabuf_t *dabuf;
2321
2322 dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
2323 dabuf->bbcount = bp->b_length;
2324 dabuf->data = bp->b_addr;
2325 dabuf->bp = bp;
2326 return dabuf;
2327}
2328
2329/*
2330 * Release a dabuf.
2331 */
2332void
2333xfs_da_buf_done(xfs_dabuf_t *dabuf)
2334{
2335 ASSERT(dabuf->data && dabuf->bbcount && dabuf->bp);
2336 kmem_zone_free(xfs_dabuf_zone, dabuf);
2337}
2338
2339/*
2340 * Log transaction from a dabuf.
2341 */
2342void
2343xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
2344{
2345 ASSERT(dabuf->data && dabuf->bbcount && dabuf->bp);
2346 ASSERT(dabuf->data == dabuf->bp->b_addr);
2347 xfs_trans_log_buf(tp, dabuf->bp, first, last);
2348}
2349
2350/*
2351 * Release dabuf from a transaction.
2352 * Have to free up the dabuf before the buffers are released,
2353 * since the synchronization on the dabuf is really the lock on the buffer.
2354 */
2355void
2356xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2357{
2358 ASSERT(dabuf->data && dabuf->bbcount && dabuf->bp);
2359 xfs_trans_brelse(tp, dabuf->bp);
2360 xfs_da_buf_done(dabuf);
2361}
2362
2363/*
2364 * Invalidate dabuf from a transaction.
2365 */
2366void
2367xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2368{
2369 ASSERT(dabuf->data && dabuf->bbcount && dabuf->bp);
2370 xfs_da_buf_done(dabuf);
2371 xfs_trans_binval(tp, dabuf->bp);
2372}
2373
2374/*
2375 * Get the first daddr from a dabuf.
2376 */
2377xfs_daddr_t
2378xfs_da_blkno(xfs_dabuf_t *dabuf)
2379{
2380 ASSERT(dabuf->data);
2381 return XFS_BUF_ADDR(dabuf->bp);
2382}