aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_alloc_btree.c
diff options
context:
space:
mode:
authorEric Sandeen <sandeen@sandeen.net>2006-09-27 21:05:40 -0400
committerTim Shimmin <tes@sgi.com>2006-09-27 21:05:40 -0400
commit91d87232044c1ceb8371625c27479e982984a848 (patch)
tree535e6ae2630625624c245dee818523b70ea4fb9f /fs/xfs/xfs_alloc_btree.c
parentedcd4bce5e58987c8c039bdf7705a22cd229fe96 (diff)
[XFS] Reduce endian flipping in alloc_btree, same as was done for
ialloc_btree. SGI-PV: 955302 SGI-Modid: xfs-linux-melb:xfs-kern:26910a Signed-off-by: Eric Sandeen <sandeen@sandeen.net> Signed-off-by: Nathan Scott <nathans@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_alloc_btree.c')
-rw-r--r--fs/xfs/xfs_alloc_btree.c108
1 files changed, 57 insertions, 51 deletions
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 500a1d75403a..74cadf95d4e8 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -92,6 +92,7 @@ xfs_alloc_delrec(
92 xfs_alloc_key_t *rkp; /* right block key pointer */ 92 xfs_alloc_key_t *rkp; /* right block key pointer */
93 xfs_alloc_ptr_t *rpp; /* right block address pointer */ 93 xfs_alloc_ptr_t *rpp; /* right block address pointer */
94 int rrecs=0; /* number of records in right block */ 94 int rrecs=0; /* number of records in right block */
95 int numrecs;
95 xfs_alloc_rec_t *rrp; /* right block record pointer */ 96 xfs_alloc_rec_t *rrp; /* right block record pointer */
96 xfs_btree_cur_t *tcur; /* temporary btree cursor */ 97 xfs_btree_cur_t *tcur; /* temporary btree cursor */
97 98
@@ -115,7 +116,8 @@ xfs_alloc_delrec(
115 /* 116 /*
116 * Fail if we're off the end of the block. 117 * Fail if we're off the end of the block.
117 */ 118 */
118 if (ptr > be16_to_cpu(block->bb_numrecs)) { 119 numrecs = be16_to_cpu(block->bb_numrecs);
120 if (ptr > numrecs) {
119 *stat = 0; 121 *stat = 0;
120 return 0; 122 return 0;
121 } 123 }
@@ -129,18 +131,18 @@ xfs_alloc_delrec(
129 lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur); 131 lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
130 lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur); 132 lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
131#ifdef DEBUG 133#ifdef DEBUG
132 for (i = ptr; i < be16_to_cpu(block->bb_numrecs); i++) { 134 for (i = ptr; i < numrecs; i++) {
133 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) 135 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
134 return error; 136 return error;
135 } 137 }
136#endif 138#endif
137 if (ptr < be16_to_cpu(block->bb_numrecs)) { 139 if (ptr < numrecs) {
138 memmove(&lkp[ptr - 1], &lkp[ptr], 140 memmove(&lkp[ptr - 1], &lkp[ptr],
139 (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lkp)); 141 (numrecs - ptr) * sizeof(*lkp));
140 memmove(&lpp[ptr - 1], &lpp[ptr], 142 memmove(&lpp[ptr - 1], &lpp[ptr],
141 (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lpp)); 143 (numrecs - ptr) * sizeof(*lpp));
142 xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); 144 xfs_alloc_log_ptrs(cur, bp, ptr, numrecs - 1);
143 xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); 145 xfs_alloc_log_keys(cur, bp, ptr, numrecs - 1);
144 } 146 }
145 } 147 }
146 /* 148 /*
@@ -149,10 +151,10 @@ xfs_alloc_delrec(
149 */ 151 */
150 else { 152 else {
151 lrp = XFS_ALLOC_REC_ADDR(block, 1, cur); 153 lrp = XFS_ALLOC_REC_ADDR(block, 1, cur);
152 if (ptr < be16_to_cpu(block->bb_numrecs)) { 154 if (ptr < numrecs) {
153 memmove(&lrp[ptr - 1], &lrp[ptr], 155 memmove(&lrp[ptr - 1], &lrp[ptr],
154 (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lrp)); 156 (numrecs - ptr) * sizeof(*lrp));
155 xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); 157 xfs_alloc_log_recs(cur, bp, ptr, numrecs - 1);
156 } 158 }
157 /* 159 /*
158 * If it's the first record in the block, we'll need a key 160 * If it's the first record in the block, we'll need a key
@@ -167,7 +169,8 @@ xfs_alloc_delrec(
167 /* 169 /*
168 * Decrement and log the number of entries in the block. 170 * Decrement and log the number of entries in the block.
169 */ 171 */
170 be16_add(&block->bb_numrecs, -1); 172 numrecs--;
173 block->bb_numrecs = cpu_to_be16(numrecs);
171 xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); 174 xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
172 /* 175 /*
173 * See if the longest free extent in the allocation group was 176 * See if the longest free extent in the allocation group was
@@ -181,14 +184,14 @@ xfs_alloc_delrec(
181 if (level == 0 && 184 if (level == 0 &&
182 cur->bc_btnum == XFS_BTNUM_CNT && 185 cur->bc_btnum == XFS_BTNUM_CNT &&
183 be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && 186 be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
184 ptr > be16_to_cpu(block->bb_numrecs)) { 187 ptr > numrecs) {
185 ASSERT(ptr == be16_to_cpu(block->bb_numrecs) + 1); 188 ASSERT(ptr == numrecs + 1);
186 /* 189 /*
187 * There are still records in the block. Grab the size 190 * There are still records in the block. Grab the size
188 * from the last one. 191 * from the last one.
189 */ 192 */
190 if (be16_to_cpu(block->bb_numrecs)) { 193 if (numrecs) {
191 rrp = XFS_ALLOC_REC_ADDR(block, be16_to_cpu(block->bb_numrecs), cur); 194 rrp = XFS_ALLOC_REC_ADDR(block, numrecs, cur);
192 agf->agf_longest = rrp->ar_blockcount; 195 agf->agf_longest = rrp->ar_blockcount;
193 } 196 }
194 /* 197 /*
@@ -211,7 +214,7 @@ xfs_alloc_delrec(
211 * and it's NOT the leaf level, 214 * and it's NOT the leaf level,
212 * then we can get rid of this level. 215 * then we can get rid of this level.
213 */ 216 */
214 if (be16_to_cpu(block->bb_numrecs) == 1 && level > 0) { 217 if (numrecs == 1 && level > 0) {
215 /* 218 /*
216 * lpp is still set to the first pointer in the block. 219 * lpp is still set to the first pointer in the block.
217 * Make it the new root of the btree. 220 * Make it the new root of the btree.
@@ -267,7 +270,7 @@ xfs_alloc_delrec(
267 * If the number of records remaining in the block is at least 270 * If the number of records remaining in the block is at least
268 * the minimum, we're done. 271 * the minimum, we're done.
269 */ 272 */
270 if (be16_to_cpu(block->bb_numrecs) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { 273 if (numrecs >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
271 if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) 274 if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i)))
272 return error; 275 return error;
273 *stat = 1; 276 *stat = 1;
@@ -419,19 +422,21 @@ xfs_alloc_delrec(
419 * See if we can join with the left neighbor block. 422 * See if we can join with the left neighbor block.
420 */ 423 */
421 if (lbno != NULLAGBLOCK && 424 if (lbno != NULLAGBLOCK &&
422 lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { 425 lrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
423 /* 426 /*
424 * Set "right" to be the starting block, 427 * Set "right" to be the starting block,
425 * "left" to be the left neighbor. 428 * "left" to be the left neighbor.
426 */ 429 */
427 rbno = bno; 430 rbno = bno;
428 right = block; 431 right = block;
432 rrecs = be16_to_cpu(right->bb_numrecs);
429 rbp = bp; 433 rbp = bp;
430 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, 434 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
431 cur->bc_private.a.agno, lbno, 0, &lbp, 435 cur->bc_private.a.agno, lbno, 0, &lbp,
432 XFS_ALLOC_BTREE_REF))) 436 XFS_ALLOC_BTREE_REF)))
433 return error; 437 return error;
434 left = XFS_BUF_TO_ALLOC_BLOCK(lbp); 438 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
439 lrecs = be16_to_cpu(left->bb_numrecs);
435 if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) 440 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
436 return error; 441 return error;
437 } 442 }
@@ -439,20 +444,21 @@ xfs_alloc_delrec(
439 * If that won't work, see if we can join with the right neighbor block. 444 * If that won't work, see if we can join with the right neighbor block.
440 */ 445 */
441 else if (rbno != NULLAGBLOCK && 446 else if (rbno != NULLAGBLOCK &&
442 rrecs + be16_to_cpu(block->bb_numrecs) <= 447 rrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
443 XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
444 /* 448 /*
445 * Set "left" to be the starting block, 449 * Set "left" to be the starting block,
446 * "right" to be the right neighbor. 450 * "right" to be the right neighbor.
447 */ 451 */
448 lbno = bno; 452 lbno = bno;
449 left = block; 453 left = block;
454 lrecs = be16_to_cpu(left->bb_numrecs);
450 lbp = bp; 455 lbp = bp;
451 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, 456 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
452 cur->bc_private.a.agno, rbno, 0, &rbp, 457 cur->bc_private.a.agno, rbno, 0, &rbp,
453 XFS_ALLOC_BTREE_REF))) 458 XFS_ALLOC_BTREE_REF)))
454 return error; 459 return error;
455 right = XFS_BUF_TO_ALLOC_BLOCK(rbp); 460 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
461 rrecs = be16_to_cpu(right->bb_numrecs);
456 if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) 462 if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
457 return error; 463 return error;
458 } 464 }
@@ -474,34 +480,28 @@ xfs_alloc_delrec(
474 /* 480 /*
475 * It's a non-leaf. Move keys and pointers. 481 * It's a non-leaf. Move keys and pointers.
476 */ 482 */
477 lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); 483 lkp = XFS_ALLOC_KEY_ADDR(left, lrecs + 1, cur);
478 lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); 484 lpp = XFS_ALLOC_PTR_ADDR(left, lrecs + 1, cur);
479 rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); 485 rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
480 rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); 486 rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
481#ifdef DEBUG 487#ifdef DEBUG
482 for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { 488 for (i = 0; i < rrecs; i++) {
483 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) 489 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
484 return error; 490 return error;
485 } 491 }
486#endif 492#endif
487 memcpy(lkp, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*lkp)); 493 memcpy(lkp, rkp, rrecs * sizeof(*lkp));
488 memcpy(lpp, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*lpp)); 494 memcpy(lpp, rpp, rrecs * sizeof(*lpp));
489 xfs_alloc_log_keys(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, 495 xfs_alloc_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
490 be16_to_cpu(left->bb_numrecs) + 496 xfs_alloc_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
491 be16_to_cpu(right->bb_numrecs));
492 xfs_alloc_log_ptrs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1,
493 be16_to_cpu(left->bb_numrecs) +
494 be16_to_cpu(right->bb_numrecs));
495 } else { 497 } else {
496 /* 498 /*
497 * It's a leaf. Move records. 499 * It's a leaf. Move records.
498 */ 500 */
499 lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); 501 lrp = XFS_ALLOC_REC_ADDR(left, lrecs + 1, cur);
500 rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); 502 rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
501 memcpy(lrp, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*lrp)); 503 memcpy(lrp, rrp, rrecs * sizeof(*lrp));
502 xfs_alloc_log_recs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, 504 xfs_alloc_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
503 be16_to_cpu(left->bb_numrecs) +
504 be16_to_cpu(right->bb_numrecs));
505 } 505 }
506 /* 506 /*
507 * If we joined with the left neighbor, set the buffer in the 507 * If we joined with the left neighbor, set the buffer in the
@@ -509,7 +509,7 @@ xfs_alloc_delrec(
509 */ 509 */
510 if (bp != lbp) { 510 if (bp != lbp) {
511 xfs_btree_setbuf(cur, level, lbp); 511 xfs_btree_setbuf(cur, level, lbp);
512 cur->bc_ptrs[level] += be16_to_cpu(left->bb_numrecs); 512 cur->bc_ptrs[level] += lrecs;
513 } 513 }
514 /* 514 /*
515 * If we joined with the right neighbor and there's a level above 515 * If we joined with the right neighbor and there's a level above
@@ -521,7 +521,8 @@ xfs_alloc_delrec(
521 /* 521 /*
522 * Fix up the number of records in the surviving block. 522 * Fix up the number of records in the surviving block.
523 */ 523 */
524 be16_add(&left->bb_numrecs, be16_to_cpu(right->bb_numrecs)); 524 lrecs += rrecs;
525 left->bb_numrecs = cpu_to_be16(lrecs);
525 /* 526 /*
526 * Fix up the right block pointer in the surviving block, and log it. 527 * Fix up the right block pointer in the surviving block, and log it.
527 */ 528 */
@@ -608,6 +609,7 @@ xfs_alloc_insrec(
608 xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */ 609 xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */
609 xfs_alloc_key_t nkey; /* new key value, from split */ 610 xfs_alloc_key_t nkey; /* new key value, from split */
610 xfs_alloc_rec_t nrec; /* new record value, for caller */ 611 xfs_alloc_rec_t nrec; /* new record value, for caller */
612 int numrecs;
611 int optr; /* old ptr value */ 613 int optr; /* old ptr value */
612 xfs_alloc_ptr_t *pp; /* pointer to btree addresses */ 614 xfs_alloc_ptr_t *pp; /* pointer to btree addresses */
613 int ptr; /* index in btree block for this rec */ 615 int ptr; /* index in btree block for this rec */
@@ -653,13 +655,14 @@ xfs_alloc_insrec(
653 */ 655 */
654 bp = cur->bc_bufs[level]; 656 bp = cur->bc_bufs[level];
655 block = XFS_BUF_TO_ALLOC_BLOCK(bp); 657 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
658 numrecs = be16_to_cpu(block->bb_numrecs);
656#ifdef DEBUG 659#ifdef DEBUG
657 if ((error = xfs_btree_check_sblock(cur, block, level, bp))) 660 if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
658 return error; 661 return error;
659 /* 662 /*
660 * Check that the new entry is being inserted in the right place. 663 * Check that the new entry is being inserted in the right place.
661 */ 664 */
662 if (ptr <= be16_to_cpu(block->bb_numrecs)) { 665 if (ptr <= numrecs) {
663 if (level == 0) { 666 if (level == 0) {
664 rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); 667 rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
665 xfs_btree_check_rec(cur->bc_btnum, recp, rp); 668 xfs_btree_check_rec(cur->bc_btnum, recp, rp);
@@ -675,7 +678,7 @@ xfs_alloc_insrec(
675 * If the block is full, we can't insert the new entry until we 678 * If the block is full, we can't insert the new entry until we
676 * make the block un-full. 679 * make the block un-full.
677 */ 680 */
678 if (be16_to_cpu(block->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { 681 if (numrecs == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
679 /* 682 /*
680 * First, try shifting an entry to the right neighbor. 683 * First, try shifting an entry to the right neighbor.
681 */ 684 */
@@ -729,6 +732,7 @@ xfs_alloc_insrec(
729 * At this point we know there's room for our new entry in the block 732 * At this point we know there's room for our new entry in the block
730 * we're pointing at. 733 * we're pointing at.
731 */ 734 */
735 numrecs = be16_to_cpu(block->bb_numrecs);
732 if (level > 0) { 736 if (level > 0) {
733 /* 737 /*
734 * It's a non-leaf entry. Make a hole for the new data 738 * It's a non-leaf entry. Make a hole for the new data
@@ -737,15 +741,15 @@ xfs_alloc_insrec(
737 kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); 741 kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
738 pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); 742 pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
739#ifdef DEBUG 743#ifdef DEBUG
740 for (i = be16_to_cpu(block->bb_numrecs); i >= ptr; i--) { 744 for (i = numrecs; i >= ptr; i--) {
741 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level))) 745 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level)))
742 return error; 746 return error;
743 } 747 }
744#endif 748#endif
745 memmove(&kp[ptr], &kp[ptr - 1], 749 memmove(&kp[ptr], &kp[ptr - 1],
746 (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*kp)); 750 (numrecs - ptr + 1) * sizeof(*kp));
747 memmove(&pp[ptr], &pp[ptr - 1], 751 memmove(&pp[ptr], &pp[ptr - 1],
748 (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*pp)); 752 (numrecs - ptr + 1) * sizeof(*pp));
749#ifdef DEBUG 753#ifdef DEBUG
750 if ((error = xfs_btree_check_sptr(cur, *bnop, level))) 754 if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
751 return error; 755 return error;
@@ -755,11 +759,12 @@ xfs_alloc_insrec(
755 */ 759 */
756 kp[ptr - 1] = key; 760 kp[ptr - 1] = key;
757 pp[ptr - 1] = cpu_to_be32(*bnop); 761 pp[ptr - 1] = cpu_to_be32(*bnop);
758 be16_add(&block->bb_numrecs, 1); 762 numrecs++;
759 xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); 763 block->bb_numrecs = cpu_to_be16(numrecs);
760 xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); 764 xfs_alloc_log_keys(cur, bp, ptr, numrecs);
765 xfs_alloc_log_ptrs(cur, bp, ptr, numrecs);
761#ifdef DEBUG 766#ifdef DEBUG
762 if (ptr < be16_to_cpu(block->bb_numrecs)) 767 if (ptr < numrecs)
763 xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, 768 xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1,
764 kp + ptr); 769 kp + ptr);
765#endif 770#endif
@@ -769,16 +774,17 @@ xfs_alloc_insrec(
769 */ 774 */
770 rp = XFS_ALLOC_REC_ADDR(block, 1, cur); 775 rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
771 memmove(&rp[ptr], &rp[ptr - 1], 776 memmove(&rp[ptr], &rp[ptr - 1],
772 (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*rp)); 777 (numrecs - ptr + 1) * sizeof(*rp));
773 /* 778 /*
774 * Now stuff the new record in, bump numrecs 779 * Now stuff the new record in, bump numrecs
775 * and log the new data. 780 * and log the new data.
776 */ 781 */
777 rp[ptr - 1] = *recp; 782 rp[ptr - 1] = *recp;
778 be16_add(&block->bb_numrecs, 1); 783 numrecs++;
779 xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); 784 block->bb_numrecs = cpu_to_be16(numrecs);
785 xfs_alloc_log_recs(cur, bp, ptr, numrecs);
780#ifdef DEBUG 786#ifdef DEBUG
781 if (ptr < be16_to_cpu(block->bb_numrecs)) 787 if (ptr < numrecs)
782 xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, 788 xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1,
783 rp + ptr); 789 rp + ptr);
784#endif 790#endif