diff options
Diffstat (limited to 'fs/xfs/xfs_alloc_btree.c')
-rw-r--r-- | fs/xfs/xfs_alloc_btree.c | 413 |
1 files changed, 208 insertions, 205 deletions
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 7ecc8c0611d1..a1d92da86ccd 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c | |||
@@ -117,7 +117,7 @@ xfs_alloc_delrec( | |||
117 | /* | 117 | /* |
118 | * Fail if we're off the end of the block. | 118 | * Fail if we're off the end of the block. |
119 | */ | 119 | */ |
120 | if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 120 | if (ptr > be16_to_cpu(block->bb_numrecs)) { |
121 | *stat = 0; | 121 | *stat = 0; |
122 | return 0; | 122 | return 0; |
123 | } | 123 | } |
@@ -131,18 +131,18 @@ xfs_alloc_delrec( | |||
131 | lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur); | 131 | lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur); |
132 | lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur); | 132 | lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur); |
133 | #ifdef DEBUG | 133 | #ifdef DEBUG |
134 | for (i = ptr; i < INT_GET(block->bb_numrecs, ARCH_CONVERT); i++) { | 134 | for (i = ptr; i < be16_to_cpu(block->bb_numrecs); i++) { |
135 | if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) | 135 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) |
136 | return error; | 136 | return error; |
137 | } | 137 | } |
138 | #endif | 138 | #endif |
139 | if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 139 | if (ptr < be16_to_cpu(block->bb_numrecs)) { |
140 | memmove(&lkp[ptr - 1], &lkp[ptr], | 140 | memmove(&lkp[ptr - 1], &lkp[ptr], |
141 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lkp)); /* INT_: mem copy */ | 141 | (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lkp)); |
142 | memmove(&lpp[ptr - 1], &lpp[ptr], | 142 | memmove(&lpp[ptr - 1], &lpp[ptr], |
143 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lpp)); /* INT_: mem copy */ | 143 | (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lpp)); |
144 | xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); | 144 | xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); |
145 | xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); | 145 | xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); |
146 | } | 146 | } |
147 | } | 147 | } |
148 | /* | 148 | /* |
@@ -151,25 +151,25 @@ xfs_alloc_delrec( | |||
151 | */ | 151 | */ |
152 | else { | 152 | else { |
153 | lrp = XFS_ALLOC_REC_ADDR(block, 1, cur); | 153 | lrp = XFS_ALLOC_REC_ADDR(block, 1, cur); |
154 | if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 154 | if (ptr < be16_to_cpu(block->bb_numrecs)) { |
155 | memmove(&lrp[ptr - 1], &lrp[ptr], | 155 | memmove(&lrp[ptr - 1], &lrp[ptr], |
156 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lrp)); | 156 | (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lrp)); |
157 | xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); | 157 | xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); |
158 | } | 158 | } |
159 | /* | 159 | /* |
160 | * If it's the first record in the block, we'll need a key | 160 | * If it's the first record in the block, we'll need a key |
161 | * structure to pass up to the next level (updkey). | 161 | * structure to pass up to the next level (updkey). |
162 | */ | 162 | */ |
163 | if (ptr == 1) { | 163 | if (ptr == 1) { |
164 | key.ar_startblock = lrp->ar_startblock; /* INT_: direct copy */ | 164 | key.ar_startblock = lrp->ar_startblock; |
165 | key.ar_blockcount = lrp->ar_blockcount; /* INT_: direct copy */ | 165 | key.ar_blockcount = lrp->ar_blockcount; |
166 | lkp = &key; | 166 | lkp = &key; |
167 | } | 167 | } |
168 | } | 168 | } |
169 | /* | 169 | /* |
170 | * Decrement and log the number of entries in the block. | 170 | * Decrement and log the number of entries in the block. |
171 | */ | 171 | */ |
172 | INT_MOD(block->bb_numrecs, ARCH_CONVERT, -1); | 172 | be16_add(&block->bb_numrecs, -1); |
173 | xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); | 173 | xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); |
174 | /* | 174 | /* |
175 | * See if the longest free extent in the allocation group was | 175 | * See if the longest free extent in the allocation group was |
@@ -182,24 +182,24 @@ xfs_alloc_delrec( | |||
182 | 182 | ||
183 | if (level == 0 && | 183 | if (level == 0 && |
184 | cur->bc_btnum == XFS_BTNUM_CNT && | 184 | cur->bc_btnum == XFS_BTNUM_CNT && |
185 | INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && | 185 | be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && |
186 | ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 186 | ptr > be16_to_cpu(block->bb_numrecs)) { |
187 | ASSERT(ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT) + 1); | 187 | ASSERT(ptr == be16_to_cpu(block->bb_numrecs) + 1); |
188 | /* | 188 | /* |
189 | * There are still records in the block. Grab the size | 189 | * There are still records in the block. Grab the size |
190 | * from the last one. | 190 | * from the last one. |
191 | */ | 191 | */ |
192 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 192 | if (be16_to_cpu(block->bb_numrecs)) { |
193 | rrp = XFS_ALLOC_REC_ADDR(block, INT_GET(block->bb_numrecs, ARCH_CONVERT), cur); | 193 | rrp = XFS_ALLOC_REC_ADDR(block, be16_to_cpu(block->bb_numrecs), cur); |
194 | INT_COPY(agf->agf_longest, rrp->ar_blockcount, ARCH_CONVERT); | 194 | agf->agf_longest = rrp->ar_blockcount; |
195 | } | 195 | } |
196 | /* | 196 | /* |
197 | * No free extents left. | 197 | * No free extents left. |
198 | */ | 198 | */ |
199 | else | 199 | else |
200 | agf->agf_longest = 0; | 200 | agf->agf_longest = 0; |
201 | mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest = | 201 | mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest = |
202 | INT_GET(agf->agf_longest, ARCH_CONVERT); | 202 | be32_to_cpu(agf->agf_longest); |
203 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 203 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
204 | XFS_AGF_LONGEST); | 204 | XFS_AGF_LONGEST); |
205 | } | 205 | } |
@@ -213,15 +213,15 @@ xfs_alloc_delrec( | |||
213 | * and it's NOT the leaf level, | 213 | * and it's NOT the leaf level, |
214 | * then we can get rid of this level. | 214 | * then we can get rid of this level. |
215 | */ | 215 | */ |
216 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == 1 && level > 0) { | 216 | if (be16_to_cpu(block->bb_numrecs) == 1 && level > 0) { |
217 | /* | 217 | /* |
218 | * lpp is still set to the first pointer in the block. | 218 | * lpp is still set to the first pointer in the block. |
219 | * Make it the new root of the btree. | 219 | * Make it the new root of the btree. |
220 | */ | 220 | */ |
221 | bno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT); | 221 | bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); |
222 | INT_COPY(agf->agf_roots[cur->bc_btnum], *lpp, ARCH_CONVERT); | 222 | agf->agf_roots[cur->bc_btnum] = *lpp; |
223 | INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, -1); | 223 | be32_add(&agf->agf_levels[cur->bc_btnum], -1); |
224 | mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_levels[cur->bc_btnum]--; | 224 | mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--; |
225 | /* | 225 | /* |
226 | * Put this buffer/block on the ag's freelist. | 226 | * Put this buffer/block on the ag's freelist. |
227 | */ | 227 | */ |
@@ -243,7 +243,7 @@ xfs_alloc_delrec( | |||
243 | * that freed the block. | 243 | * that freed the block. |
244 | */ | 244 | */ |
245 | xfs_alloc_mark_busy(cur->bc_tp, | 245 | xfs_alloc_mark_busy(cur->bc_tp, |
246 | INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); | 246 | be32_to_cpu(agf->agf_seqno), bno, 1); |
247 | 247 | ||
248 | xfs_trans_agbtree_delta(cur->bc_tp, -1); | 248 | xfs_trans_agbtree_delta(cur->bc_tp, -1); |
249 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 249 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
@@ -269,7 +269,7 @@ xfs_alloc_delrec( | |||
269 | * If the number of records remaining in the block is at least | 269 | * If the number of records remaining in the block is at least |
270 | * the minimum, we're done. | 270 | * the minimum, we're done. |
271 | */ | 271 | */ |
272 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { | 272 | if (be16_to_cpu(block->bb_numrecs) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { |
273 | if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) | 273 | if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) |
274 | return error; | 274 | return error; |
275 | *stat = 1; | 275 | *stat = 1; |
@@ -280,8 +280,8 @@ xfs_alloc_delrec( | |||
280 | * tree balanced. Look at the left and right sibling blocks to | 280 | * tree balanced. Look at the left and right sibling blocks to |
281 | * see if we can re-balance by moving only one record. | 281 | * see if we can re-balance by moving only one record. |
282 | */ | 282 | */ |
283 | rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); | 283 | rbno = be32_to_cpu(block->bb_rightsib); |
284 | lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); | 284 | lbno = be32_to_cpu(block->bb_leftsib); |
285 | bno = NULLAGBLOCK; | 285 | bno = NULLAGBLOCK; |
286 | ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); | 286 | ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); |
287 | /* | 287 | /* |
@@ -318,18 +318,18 @@ xfs_alloc_delrec( | |||
318 | /* | 318 | /* |
319 | * Grab the current block number, for future use. | 319 | * Grab the current block number, for future use. |
320 | */ | 320 | */ |
321 | bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); | 321 | bno = be32_to_cpu(right->bb_leftsib); |
322 | /* | 322 | /* |
323 | * If right block is full enough so that removing one entry | 323 | * If right block is full enough so that removing one entry |
324 | * won't make it too empty, and left-shifting an entry out | 324 | * won't make it too empty, and left-shifting an entry out |
325 | * of right to us works, we're done. | 325 | * of right to us works, we're done. |
326 | */ | 326 | */ |
327 | if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= | 327 | if (be16_to_cpu(right->bb_numrecs) - 1 >= |
328 | XFS_ALLOC_BLOCK_MINRECS(level, cur)) { | 328 | XFS_ALLOC_BLOCK_MINRECS(level, cur)) { |
329 | if ((error = xfs_alloc_lshift(tcur, level, &i))) | 329 | if ((error = xfs_alloc_lshift(tcur, level, &i))) |
330 | goto error0; | 330 | goto error0; |
331 | if (i) { | 331 | if (i) { |
332 | ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= | 332 | ASSERT(be16_to_cpu(block->bb_numrecs) >= |
333 | XFS_ALLOC_BLOCK_MINRECS(level, cur)); | 333 | XFS_ALLOC_BLOCK_MINRECS(level, cur)); |
334 | xfs_btree_del_cursor(tcur, | 334 | xfs_btree_del_cursor(tcur, |
335 | XFS_BTREE_NOERROR); | 335 | XFS_BTREE_NOERROR); |
@@ -346,7 +346,7 @@ xfs_alloc_delrec( | |||
346 | * future reference, and fix up the temp cursor to point | 346 | * future reference, and fix up the temp cursor to point |
347 | * to our block again (last record). | 347 | * to our block again (last record). |
348 | */ | 348 | */ |
349 | rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); | 349 | rrecs = be16_to_cpu(right->bb_numrecs); |
350 | if (lbno != NULLAGBLOCK) { | 350 | if (lbno != NULLAGBLOCK) { |
351 | i = xfs_btree_firstrec(tcur, level); | 351 | i = xfs_btree_firstrec(tcur, level); |
352 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | 352 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); |
@@ -382,18 +382,18 @@ xfs_alloc_delrec( | |||
382 | /* | 382 | /* |
383 | * Grab the current block number, for future use. | 383 | * Grab the current block number, for future use. |
384 | */ | 384 | */ |
385 | bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); | 385 | bno = be32_to_cpu(left->bb_rightsib); |
386 | /* | 386 | /* |
387 | * If left block is full enough so that removing one entry | 387 | * If left block is full enough so that removing one entry |
388 | * won't make it too empty, and right-shifting an entry out | 388 | * won't make it too empty, and right-shifting an entry out |
389 | * of left to us works, we're done. | 389 | * of left to us works, we're done. |
390 | */ | 390 | */ |
391 | if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= | 391 | if (be16_to_cpu(left->bb_numrecs) - 1 >= |
392 | XFS_ALLOC_BLOCK_MINRECS(level, cur)) { | 392 | XFS_ALLOC_BLOCK_MINRECS(level, cur)) { |
393 | if ((error = xfs_alloc_rshift(tcur, level, &i))) | 393 | if ((error = xfs_alloc_rshift(tcur, level, &i))) |
394 | goto error0; | 394 | goto error0; |
395 | if (i) { | 395 | if (i) { |
396 | ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= | 396 | ASSERT(be16_to_cpu(block->bb_numrecs) >= |
397 | XFS_ALLOC_BLOCK_MINRECS(level, cur)); | 397 | XFS_ALLOC_BLOCK_MINRECS(level, cur)); |
398 | xfs_btree_del_cursor(tcur, | 398 | xfs_btree_del_cursor(tcur, |
399 | XFS_BTREE_NOERROR); | 399 | XFS_BTREE_NOERROR); |
@@ -407,7 +407,7 @@ xfs_alloc_delrec( | |||
407 | * Otherwise, grab the number of records in right for | 407 | * Otherwise, grab the number of records in right for |
408 | * future reference. | 408 | * future reference. |
409 | */ | 409 | */ |
410 | lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); | 410 | lrecs = be16_to_cpu(left->bb_numrecs); |
411 | } | 411 | } |
412 | /* | 412 | /* |
413 | * Delete the temp cursor, we're done with it. | 413 | * Delete the temp cursor, we're done with it. |
@@ -421,7 +421,7 @@ xfs_alloc_delrec( | |||
421 | * See if we can join with the left neighbor block. | 421 | * See if we can join with the left neighbor block. |
422 | */ | 422 | */ |
423 | if (lbno != NULLAGBLOCK && | 423 | if (lbno != NULLAGBLOCK && |
424 | lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 424 | lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
425 | /* | 425 | /* |
426 | * Set "right" to be the starting block, | 426 | * Set "right" to be the starting block, |
427 | * "left" to be the left neighbor. | 427 | * "left" to be the left neighbor. |
@@ -441,7 +441,7 @@ xfs_alloc_delrec( | |||
441 | * If that won't work, see if we can join with the right neighbor block. | 441 | * If that won't work, see if we can join with the right neighbor block. |
442 | */ | 442 | */ |
443 | else if (rbno != NULLAGBLOCK && | 443 | else if (rbno != NULLAGBLOCK && |
444 | rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= | 444 | rrecs + be16_to_cpu(block->bb_numrecs) <= |
445 | XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 445 | XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
446 | /* | 446 | /* |
447 | * Set "left" to be the starting block, | 447 | * Set "left" to be the starting block, |
@@ -476,31 +476,34 @@ xfs_alloc_delrec( | |||
476 | /* | 476 | /* |
477 | * It's a non-leaf. Move keys and pointers. | 477 | * It's a non-leaf. Move keys and pointers. |
478 | */ | 478 | */ |
479 | lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); | 479 | lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); |
480 | lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); | 480 | lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); |
481 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); | 481 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); |
482 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); | 482 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); |
483 | #ifdef DEBUG | 483 | #ifdef DEBUG |
484 | for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { | 484 | for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { |
485 | if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) | 485 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) |
486 | return error; | 486 | return error; |
487 | } | 487 | } |
488 | #endif | 488 | #endif |
489 | memcpy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp)); /* INT_: structure copy */ | 489 | memcpy(lkp, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*lkp)); |
490 | memcpy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp)); /* INT_: structure copy */ | 490 | memcpy(lpp, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*lpp)); |
491 | xfs_alloc_log_keys(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, | 491 | xfs_alloc_log_keys(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, |
492 | INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 492 | be16_to_cpu(left->bb_numrecs) + |
493 | xfs_alloc_log_ptrs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, | 493 | be16_to_cpu(right->bb_numrecs)); |
494 | INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 494 | xfs_alloc_log_ptrs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, |
495 | be16_to_cpu(left->bb_numrecs) + | ||
496 | be16_to_cpu(right->bb_numrecs)); | ||
495 | } else { | 497 | } else { |
496 | /* | 498 | /* |
497 | * It's a leaf. Move records. | 499 | * It's a leaf. Move records. |
498 | */ | 500 | */ |
499 | lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); | 501 | lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); |
500 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); | 502 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); |
501 | memcpy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp)); | 503 | memcpy(lrp, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*lrp)); |
502 | xfs_alloc_log_recs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, | 504 | xfs_alloc_log_recs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, |
503 | INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 505 | be16_to_cpu(left->bb_numrecs) + |
506 | be16_to_cpu(right->bb_numrecs)); | ||
504 | } | 507 | } |
505 | /* | 508 | /* |
506 | * If we joined with the left neighbor, set the buffer in the | 509 | * If we joined with the left neighbor, set the buffer in the |
@@ -508,7 +511,7 @@ xfs_alloc_delrec( | |||
508 | */ | 511 | */ |
509 | if (bp != lbp) { | 512 | if (bp != lbp) { |
510 | xfs_btree_setbuf(cur, level, lbp); | 513 | xfs_btree_setbuf(cur, level, lbp); |
511 | cur->bc_ptrs[level] += INT_GET(left->bb_numrecs, ARCH_CONVERT); | 514 | cur->bc_ptrs[level] += be16_to_cpu(left->bb_numrecs); |
512 | } | 515 | } |
513 | /* | 516 | /* |
514 | * If we joined with the right neighbor and there's a level above | 517 | * If we joined with the right neighbor and there's a level above |
@@ -520,28 +523,28 @@ xfs_alloc_delrec( | |||
520 | /* | 523 | /* |
521 | * Fix up the number of records in the surviving block. | 524 | * Fix up the number of records in the surviving block. |
522 | */ | 525 | */ |
523 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 526 | be16_add(&left->bb_numrecs, be16_to_cpu(right->bb_numrecs)); |
524 | /* | 527 | /* |
525 | * Fix up the right block pointer in the surviving block, and log it. | 528 | * Fix up the right block pointer in the surviving block, and log it. |
526 | */ | 529 | */ |
527 | left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */ | 530 | left->bb_rightsib = right->bb_rightsib; |
528 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); | 531 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); |
529 | /* | 532 | /* |
530 | * If there is a right sibling now, make it point to the | 533 | * If there is a right sibling now, make it point to the |
531 | * remaining block. | 534 | * remaining block. |
532 | */ | 535 | */ |
533 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 536 | if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) { |
534 | xfs_alloc_block_t *rrblock; | 537 | xfs_alloc_block_t *rrblock; |
535 | xfs_buf_t *rrbp; | 538 | xfs_buf_t *rrbp; |
536 | 539 | ||
537 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 540 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
538 | cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, | 541 | cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0, |
539 | &rrbp, XFS_ALLOC_BTREE_REF))) | 542 | &rrbp, XFS_ALLOC_BTREE_REF))) |
540 | return error; | 543 | return error; |
541 | rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); | 544 | rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); |
542 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) | 545 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) |
543 | return error; | 546 | return error; |
544 | INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); | 547 | rrblock->bb_leftsib = cpu_to_be32(lbno); |
545 | xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); | 548 | xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); |
546 | } | 549 | } |
547 | /* | 550 | /* |
@@ -562,10 +565,9 @@ xfs_alloc_delrec( | |||
562 | * busy block is allocated, the iclog is pushed up to the | 565 | * busy block is allocated, the iclog is pushed up to the |
563 | * LSN that freed the block. | 566 | * LSN that freed the block. |
564 | */ | 567 | */ |
565 | xfs_alloc_mark_busy(cur->bc_tp, | 568 | xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1); |
566 | INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); | ||
567 | |||
568 | xfs_trans_agbtree_delta(cur->bc_tp, -1); | 569 | xfs_trans_agbtree_delta(cur->bc_tp, -1); |
570 | |||
569 | /* | 571 | /* |
570 | * Adjust the current level's cursor so that we're left referring | 572 | * Adjust the current level's cursor so that we're left referring |
571 | * to the right node, after we're done. | 573 | * to the right node, after we're done. |
@@ -613,7 +615,7 @@ xfs_alloc_insrec( | |||
613 | int ptr; /* index in btree block for this rec */ | 615 | int ptr; /* index in btree block for this rec */ |
614 | xfs_alloc_rec_t *rp; /* pointer to btree records */ | 616 | xfs_alloc_rec_t *rp; /* pointer to btree records */ |
615 | 617 | ||
616 | ASSERT(INT_GET(recp->ar_blockcount, ARCH_CONVERT) > 0); | 618 | ASSERT(be32_to_cpu(recp->ar_blockcount) > 0); |
617 | 619 | ||
618 | /* | 620 | /* |
619 | * GCC doesn't understand the (arguably complex) control flow in | 621 | * GCC doesn't understand the (arguably complex) control flow in |
@@ -637,8 +639,8 @@ xfs_alloc_insrec( | |||
637 | /* | 639 | /* |
638 | * Make a key out of the record data to be inserted, and save it. | 640 | * Make a key out of the record data to be inserted, and save it. |
639 | */ | 641 | */ |
640 | key.ar_startblock = recp->ar_startblock; /* INT_: direct copy */ | 642 | key.ar_startblock = recp->ar_startblock; |
641 | key.ar_blockcount = recp->ar_blockcount; /* INT_: direct copy */ | 643 | key.ar_blockcount = recp->ar_blockcount; |
642 | optr = ptr = cur->bc_ptrs[level]; | 644 | optr = ptr = cur->bc_ptrs[level]; |
643 | /* | 645 | /* |
644 | * If we're off the left edge, return failure. | 646 | * If we're off the left edge, return failure. |
@@ -659,7 +661,7 @@ xfs_alloc_insrec( | |||
659 | /* | 661 | /* |
660 | * Check that the new entry is being inserted in the right place. | 662 | * Check that the new entry is being inserted in the right place. |
661 | */ | 663 | */ |
662 | if (ptr <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 664 | if (ptr <= be16_to_cpu(block->bb_numrecs)) { |
663 | if (level == 0) { | 665 | if (level == 0) { |
664 | rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); | 666 | rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); |
665 | xfs_btree_check_rec(cur->bc_btnum, recp, rp); | 667 | xfs_btree_check_rec(cur->bc_btnum, recp, rp); |
@@ -675,7 +677,7 @@ xfs_alloc_insrec( | |||
675 | * If the block is full, we can't insert the new entry until we | 677 | * If the block is full, we can't insert the new entry until we |
676 | * make the block un-full. | 678 | * make the block un-full. |
677 | */ | 679 | */ |
678 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 680 | if (be16_to_cpu(block->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
679 | /* | 681 | /* |
680 | * First, try shifting an entry to the right neighbor. | 682 | * First, try shifting an entry to the right neighbor. |
681 | */ | 683 | */ |
@@ -712,8 +714,8 @@ xfs_alloc_insrec( | |||
712 | return error; | 714 | return error; |
713 | #endif | 715 | #endif |
714 | ptr = cur->bc_ptrs[level]; | 716 | ptr = cur->bc_ptrs[level]; |
715 | nrec.ar_startblock = nkey.ar_startblock; /* INT_: direct copy */ | 717 | nrec.ar_startblock = nkey.ar_startblock; |
716 | nrec.ar_blockcount = nkey.ar_blockcount; /* INT_: direct copy */ | 718 | nrec.ar_blockcount = nkey.ar_blockcount; |
717 | } | 719 | } |
718 | /* | 720 | /* |
719 | * Otherwise the insert fails. | 721 | * Otherwise the insert fails. |
@@ -737,15 +739,15 @@ xfs_alloc_insrec( | |||
737 | kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); | 739 | kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); |
738 | pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); | 740 | pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); |
739 | #ifdef DEBUG | 741 | #ifdef DEBUG |
740 | for (i = INT_GET(block->bb_numrecs, ARCH_CONVERT); i >= ptr; i--) { | 742 | for (i = be16_to_cpu(block->bb_numrecs); i >= ptr; i--) { |
741 | if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level))) | 743 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level))) |
742 | return error; | 744 | return error; |
743 | } | 745 | } |
744 | #endif | 746 | #endif |
745 | memmove(&kp[ptr], &kp[ptr - 1], | 747 | memmove(&kp[ptr], &kp[ptr - 1], |
746 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*kp)); /* INT_: copy */ | 748 | (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*kp)); |
747 | memmove(&pp[ptr], &pp[ptr - 1], | 749 | memmove(&pp[ptr], &pp[ptr - 1], |
748 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*pp)); /* INT_: copy */ | 750 | (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*pp)); |
749 | #ifdef DEBUG | 751 | #ifdef DEBUG |
750 | if ((error = xfs_btree_check_sptr(cur, *bnop, level))) | 752 | if ((error = xfs_btree_check_sptr(cur, *bnop, level))) |
751 | return error; | 753 | return error; |
@@ -754,12 +756,12 @@ xfs_alloc_insrec( | |||
754 | * Now stuff the new data in, bump numrecs and log the new data. | 756 | * Now stuff the new data in, bump numrecs and log the new data. |
755 | */ | 757 | */ |
756 | kp[ptr - 1] = key; | 758 | kp[ptr - 1] = key; |
757 | INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); | 759 | pp[ptr - 1] = cpu_to_be32(*bnop); |
758 | INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); | 760 | be16_add(&block->bb_numrecs, 1); |
759 | xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); | 761 | xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); |
760 | xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); | 762 | xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); |
761 | #ifdef DEBUG | 763 | #ifdef DEBUG |
762 | if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 764 | if (ptr < be16_to_cpu(block->bb_numrecs)) |
763 | xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, | 765 | xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, |
764 | kp + ptr); | 766 | kp + ptr); |
765 | #endif | 767 | #endif |
@@ -769,16 +771,16 @@ xfs_alloc_insrec( | |||
769 | */ | 771 | */ |
770 | rp = XFS_ALLOC_REC_ADDR(block, 1, cur); | 772 | rp = XFS_ALLOC_REC_ADDR(block, 1, cur); |
771 | memmove(&rp[ptr], &rp[ptr - 1], | 773 | memmove(&rp[ptr], &rp[ptr - 1], |
772 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*rp)); | 774 | (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*rp)); |
773 | /* | 775 | /* |
774 | * Now stuff the new record in, bump numrecs | 776 | * Now stuff the new record in, bump numrecs |
775 | * and log the new data. | 777 | * and log the new data. |
776 | */ | 778 | */ |
777 | rp[ptr - 1] = *recp; /* INT_: struct copy */ | 779 | rp[ptr - 1] = *recp; /* INT_: struct copy */ |
778 | INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); | 780 | be16_add(&block->bb_numrecs, 1); |
779 | xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); | 781 | xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); |
780 | #ifdef DEBUG | 782 | #ifdef DEBUG |
781 | if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 783 | if (ptr < be16_to_cpu(block->bb_numrecs)) |
782 | xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, | 784 | xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, |
783 | rp + ptr); | 785 | rp + ptr); |
784 | #endif | 786 | #endif |
@@ -800,16 +802,16 @@ xfs_alloc_insrec( | |||
800 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 802 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
801 | if (level == 0 && | 803 | if (level == 0 && |
802 | cur->bc_btnum == XFS_BTNUM_CNT && | 804 | cur->bc_btnum == XFS_BTNUM_CNT && |
803 | INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && | 805 | be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && |
804 | INT_GET(recp->ar_blockcount, ARCH_CONVERT) > INT_GET(agf->agf_longest, ARCH_CONVERT)) { | 806 | be32_to_cpu(recp->ar_blockcount) > be32_to_cpu(agf->agf_longest)) { |
805 | /* | 807 | /* |
806 | * If this is a leaf in the by-size btree and there | 808 | * If this is a leaf in the by-size btree and there |
807 | * is no right sibling block and this block is bigger | 809 | * is no right sibling block and this block is bigger |
808 | * than the previous longest block, update it. | 810 | * than the previous longest block, update it. |
809 | */ | 811 | */ |
810 | INT_COPY(agf->agf_longest, recp->ar_blockcount, ARCH_CONVERT); | 812 | agf->agf_longest = recp->ar_blockcount; |
811 | cur->bc_mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest | 813 | cur->bc_mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest |
812 | = INT_GET(recp->ar_blockcount, ARCH_CONVERT); | 814 | = be32_to_cpu(recp->ar_blockcount); |
813 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 815 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
814 | XFS_AGF_LONGEST); | 816 | XFS_AGF_LONGEST); |
815 | } | 817 | } |
@@ -919,8 +921,9 @@ xfs_alloc_log_recs( | |||
919 | 921 | ||
920 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 922 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
921 | for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++) | 923 | for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++) |
922 | ASSERT(INT_GET(p->ar_startblock, ARCH_CONVERT) + INT_GET(p->ar_blockcount, ARCH_CONVERT) <= | 924 | ASSERT(be32_to_cpu(p->ar_startblock) + |
923 | INT_GET(agf->agf_length, ARCH_CONVERT)); | 925 | be32_to_cpu(p->ar_blockcount) <= |
926 | be32_to_cpu(agf->agf_length)); | ||
924 | } | 927 | } |
925 | #endif | 928 | #endif |
926 | first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); | 929 | first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); |
@@ -957,8 +960,8 @@ xfs_alloc_lookup( | |||
957 | xfs_agf_t *agf; /* a.g. freespace header */ | 960 | xfs_agf_t *agf; /* a.g. freespace header */ |
958 | 961 | ||
959 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 962 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
960 | agno = INT_GET(agf->agf_seqno, ARCH_CONVERT); | 963 | agno = be32_to_cpu(agf->agf_seqno); |
961 | agbno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT); | 964 | agbno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); |
962 | } | 965 | } |
963 | /* | 966 | /* |
964 | * Iterate over each level in the btree, starting at the root. | 967 | * Iterate over each level in the btree, starting at the root. |
@@ -1025,7 +1028,7 @@ xfs_alloc_lookup( | |||
1025 | * Set low and high entry numbers, 1-based. | 1028 | * Set low and high entry numbers, 1-based. |
1026 | */ | 1029 | */ |
1027 | low = 1; | 1030 | low = 1; |
1028 | if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { | 1031 | if (!(high = be16_to_cpu(block->bb_numrecs))) { |
1029 | /* | 1032 | /* |
1030 | * If the block is empty, the tree must | 1033 | * If the block is empty, the tree must |
1031 | * be an empty leaf. | 1034 | * be an empty leaf. |
@@ -1054,14 +1057,14 @@ xfs_alloc_lookup( | |||
1054 | xfs_alloc_key_t *kkp; | 1057 | xfs_alloc_key_t *kkp; |
1055 | 1058 | ||
1056 | kkp = kkbase + keyno - 1; | 1059 | kkp = kkbase + keyno - 1; |
1057 | startblock = INT_GET(kkp->ar_startblock, ARCH_CONVERT); | 1060 | startblock = be32_to_cpu(kkp->ar_startblock); |
1058 | blockcount = INT_GET(kkp->ar_blockcount, ARCH_CONVERT); | 1061 | blockcount = be32_to_cpu(kkp->ar_blockcount); |
1059 | } else { | 1062 | } else { |
1060 | xfs_alloc_rec_t *krp; | 1063 | xfs_alloc_rec_t *krp; |
1061 | 1064 | ||
1062 | krp = krbase + keyno - 1; | 1065 | krp = krbase + keyno - 1; |
1063 | startblock = INT_GET(krp->ar_startblock, ARCH_CONVERT); | 1066 | startblock = be32_to_cpu(krp->ar_startblock); |
1064 | blockcount = INT_GET(krp->ar_blockcount, ARCH_CONVERT); | 1067 | blockcount = be32_to_cpu(krp->ar_blockcount); |
1065 | } | 1068 | } |
1066 | /* | 1069 | /* |
1067 | * Compute difference to get next direction. | 1070 | * Compute difference to get next direction. |
@@ -1101,7 +1104,7 @@ xfs_alloc_lookup( | |||
1101 | */ | 1104 | */ |
1102 | if (diff > 0 && --keyno < 1) | 1105 | if (diff > 0 && --keyno < 1) |
1103 | keyno = 1; | 1106 | keyno = 1; |
1104 | agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, keyno, cur), ARCH_CONVERT); | 1107 | agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, keyno, cur)); |
1105 | #ifdef DEBUG | 1108 | #ifdef DEBUG |
1106 | if ((error = xfs_btree_check_sptr(cur, agbno, level))) | 1109 | if ((error = xfs_btree_check_sptr(cur, agbno, level))) |
1107 | return error; | 1110 | return error; |
@@ -1120,8 +1123,8 @@ xfs_alloc_lookup( | |||
1120 | * not the last block, we're in the wrong block. | 1123 | * not the last block, we're in the wrong block. |
1121 | */ | 1124 | */ |
1122 | if (dir == XFS_LOOKUP_GE && | 1125 | if (dir == XFS_LOOKUP_GE && |
1123 | keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && | 1126 | keyno > be16_to_cpu(block->bb_numrecs) && |
1124 | INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 1127 | be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) { |
1125 | int i; | 1128 | int i; |
1126 | 1129 | ||
1127 | cur->bc_ptrs[0] = keyno; | 1130 | cur->bc_ptrs[0] = keyno; |
@@ -1138,7 +1141,7 @@ xfs_alloc_lookup( | |||
1138 | /* | 1141 | /* |
1139 | * Return if we succeeded or not. | 1142 | * Return if we succeeded or not. |
1140 | */ | 1143 | */ |
1141 | if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 1144 | if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) |
1142 | *stat = 0; | 1145 | *stat = 0; |
1143 | else | 1146 | else |
1144 | *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); | 1147 | *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); |
@@ -1181,7 +1184,7 @@ xfs_alloc_lshift( | |||
1181 | /* | 1184 | /* |
1182 | * If we've got no left sibling then we can't shift an entry left. | 1185 | * If we've got no left sibling then we can't shift an entry left. |
1183 | */ | 1186 | */ |
1184 | if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1187 | if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) { |
1185 | *stat = 0; | 1188 | *stat = 0; |
1186 | return 0; | 1189 | return 0; |
1187 | } | 1190 | } |
@@ -1197,8 +1200,8 @@ xfs_alloc_lshift( | |||
1197 | * Set up the left neighbor as "left". | 1200 | * Set up the left neighbor as "left". |
1198 | */ | 1201 | */ |
1199 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1202 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1200 | cur->bc_private.a.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp, | 1203 | cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib), |
1201 | XFS_ALLOC_BTREE_REF))) | 1204 | 0, &lbp, XFS_ALLOC_BTREE_REF))) |
1202 | return error; | 1205 | return error; |
1203 | left = XFS_BUF_TO_ALLOC_BLOCK(lbp); | 1206 | left = XFS_BUF_TO_ALLOC_BLOCK(lbp); |
1204 | if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) | 1207 | if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) |
@@ -1206,11 +1209,11 @@ xfs_alloc_lshift( | |||
1206 | /* | 1209 | /* |
1207 | * If it's full, it can't take another entry. | 1210 | * If it's full, it can't take another entry. |
1208 | */ | 1211 | */ |
1209 | if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 1212 | if (be16_to_cpu(left->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
1210 | *stat = 0; | 1213 | *stat = 0; |
1211 | return 0; | 1214 | return 0; |
1212 | } | 1215 | } |
1213 | nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; | 1216 | nrec = be16_to_cpu(left->bb_numrecs) + 1; |
1214 | /* | 1217 | /* |
1215 | * If non-leaf, copy a key and a ptr to the left block. | 1218 | * If non-leaf, copy a key and a ptr to the left block. |
1216 | */ | 1219 | */ |
@@ -1225,7 +1228,7 @@ xfs_alloc_lshift( | |||
1225 | lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur); | 1228 | lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur); |
1226 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); | 1229 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); |
1227 | #ifdef DEBUG | 1230 | #ifdef DEBUG |
1228 | if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) | 1231 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level))) |
1229 | return error; | 1232 | return error; |
1230 | #endif | 1233 | #endif |
1231 | *lpp = *rpp; /* INT_: copy */ | 1234 | *lpp = *rpp; /* INT_: copy */ |
@@ -1247,30 +1250,30 @@ xfs_alloc_lshift( | |||
1247 | /* | 1250 | /* |
1248 | * Bump and log left's numrecs, decrement and log right's numrecs. | 1251 | * Bump and log left's numrecs, decrement and log right's numrecs. |
1249 | */ | 1252 | */ |
1250 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1); | 1253 | be16_add(&left->bb_numrecs, 1); |
1251 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); | 1254 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); |
1252 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1); | 1255 | be16_add(&right->bb_numrecs, -1); |
1253 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); | 1256 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); |
1254 | /* | 1257 | /* |
1255 | * Slide the contents of right down one entry. | 1258 | * Slide the contents of right down one entry. |
1256 | */ | 1259 | */ |
1257 | if (level > 0) { | 1260 | if (level > 0) { |
1258 | #ifdef DEBUG | 1261 | #ifdef DEBUG |
1259 | for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { | 1262 | for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { |
1260 | if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), | 1263 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]), |
1261 | level))) | 1264 | level))) |
1262 | return error; | 1265 | return error; |
1263 | } | 1266 | } |
1264 | #endif | 1267 | #endif |
1265 | memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); | 1268 | memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1266 | memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); | 1269 | memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1267 | xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1270 | xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1268 | xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1271 | xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1269 | } else { | 1272 | } else { |
1270 | memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1273 | memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1271 | xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1274 | xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1272 | key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ | 1275 | key.ar_startblock = rrp->ar_startblock; |
1273 | key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ | 1276 | key.ar_blockcount = rrp->ar_blockcount; |
1274 | rkp = &key; | 1277 | rkp = &key; |
1275 | } | 1278 | } |
1276 | /* | 1279 | /* |
@@ -1335,9 +1338,9 @@ xfs_alloc_newroot( | |||
1335 | xfs_agnumber_t seqno; | 1338 | xfs_agnumber_t seqno; |
1336 | 1339 | ||
1337 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 1340 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
1338 | INT_SET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT, nbno); | 1341 | agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno); |
1339 | INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, 1); | 1342 | be32_add(&agf->agf_levels[cur->bc_btnum], 1); |
1340 | seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT); | 1343 | seqno = be32_to_cpu(agf->agf_seqno); |
1341 | mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; | 1344 | mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; |
1342 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 1345 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
1343 | XFS_AGF_ROOTS | XFS_AGF_LEVELS); | 1346 | XFS_AGF_ROOTS | XFS_AGF_LEVELS); |
@@ -1354,12 +1357,12 @@ xfs_alloc_newroot( | |||
1354 | if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp))) | 1357 | if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp))) |
1355 | return error; | 1358 | return error; |
1356 | #endif | 1359 | #endif |
1357 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 1360 | if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) { |
1358 | /* | 1361 | /* |
1359 | * Our block is left, pick up the right block. | 1362 | * Our block is left, pick up the right block. |
1360 | */ | 1363 | */ |
1361 | lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp)); | 1364 | lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp)); |
1362 | rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT); | 1365 | rbno = be32_to_cpu(left->bb_rightsib); |
1363 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 1366 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
1364 | cur->bc_private.a.agno, rbno, 0, &rbp, | 1367 | cur->bc_private.a.agno, rbno, 0, &rbp, |
1365 | XFS_ALLOC_BTREE_REF))) | 1368 | XFS_ALLOC_BTREE_REF))) |
@@ -1376,7 +1379,7 @@ xfs_alloc_newroot( | |||
1376 | rbp = lbp; | 1379 | rbp = lbp; |
1377 | right = left; | 1380 | right = left; |
1378 | rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp)); | 1381 | rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp)); |
1379 | lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT); | 1382 | lbno = be32_to_cpu(right->bb_leftsib); |
1380 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 1383 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
1381 | cur->bc_private.a.agno, lbno, 0, &lbp, | 1384 | cur->bc_private.a.agno, lbno, 0, &lbp, |
1382 | XFS_ALLOC_BTREE_REF))) | 1385 | XFS_ALLOC_BTREE_REF))) |
@@ -1390,11 +1393,11 @@ xfs_alloc_newroot( | |||
1390 | /* | 1393 | /* |
1391 | * Fill in the new block's btree header and log it. | 1394 | * Fill in the new block's btree header and log it. |
1392 | */ | 1395 | */ |
1393 | INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); | 1396 | new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); |
1394 | INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels); | 1397 | new->bb_level = cpu_to_be16(cur->bc_nlevels); |
1395 | INT_SET(new->bb_numrecs, ARCH_CONVERT, 2); | 1398 | new->bb_numrecs = cpu_to_be16(2); |
1396 | INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); | 1399 | new->bb_leftsib = cpu_to_be32(NULLAGBLOCK); |
1397 | INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); | 1400 | new->bb_rightsib = cpu_to_be32(NULLAGBLOCK); |
1398 | xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS); | 1401 | xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS); |
1399 | ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); | 1402 | ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); |
1400 | /* | 1403 | /* |
@@ -1404,18 +1407,18 @@ xfs_alloc_newroot( | |||
1404 | xfs_alloc_key_t *kp; /* btree key pointer */ | 1407 | xfs_alloc_key_t *kp; /* btree key pointer */ |
1405 | 1408 | ||
1406 | kp = XFS_ALLOC_KEY_ADDR(new, 1, cur); | 1409 | kp = XFS_ALLOC_KEY_ADDR(new, 1, cur); |
1407 | if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) { | 1410 | if (be16_to_cpu(left->bb_level) > 0) { |
1408 | kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */ | 1411 | kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */ |
1409 | kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */ | 1412 | kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */ |
1410 | } else { | 1413 | } else { |
1411 | xfs_alloc_rec_t *rp; /* btree record pointer */ | 1414 | xfs_alloc_rec_t *rp; /* btree record pointer */ |
1412 | 1415 | ||
1413 | rp = XFS_ALLOC_REC_ADDR(left, 1, cur); | 1416 | rp = XFS_ALLOC_REC_ADDR(left, 1, cur); |
1414 | kp[0].ar_startblock = rp->ar_startblock; /* INT_: direct copy */ | 1417 | kp[0].ar_startblock = rp->ar_startblock; |
1415 | kp[0].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */ | 1418 | kp[0].ar_blockcount = rp->ar_blockcount; |
1416 | rp = XFS_ALLOC_REC_ADDR(right, 1, cur); | 1419 | rp = XFS_ALLOC_REC_ADDR(right, 1, cur); |
1417 | kp[1].ar_startblock = rp->ar_startblock; /* INT_: direct copy */ | 1420 | kp[1].ar_startblock = rp->ar_startblock; |
1418 | kp[1].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */ | 1421 | kp[1].ar_blockcount = rp->ar_blockcount; |
1419 | } | 1422 | } |
1420 | } | 1423 | } |
1421 | xfs_alloc_log_keys(cur, nbp, 1, 2); | 1424 | xfs_alloc_log_keys(cur, nbp, 1, 2); |
@@ -1426,8 +1429,8 @@ xfs_alloc_newroot( | |||
1426 | xfs_alloc_ptr_t *pp; /* btree address pointer */ | 1429 | xfs_alloc_ptr_t *pp; /* btree address pointer */ |
1427 | 1430 | ||
1428 | pp = XFS_ALLOC_PTR_ADDR(new, 1, cur); | 1431 | pp = XFS_ALLOC_PTR_ADDR(new, 1, cur); |
1429 | INT_SET(pp[0], ARCH_CONVERT, lbno); | 1432 | pp[0] = cpu_to_be32(lbno); |
1430 | INT_SET(pp[1], ARCH_CONVERT, rbno); | 1433 | pp[1] = cpu_to_be32(rbno); |
1431 | } | 1434 | } |
1432 | xfs_alloc_log_ptrs(cur, nbp, 1, 2); | 1435 | xfs_alloc_log_ptrs(cur, nbp, 1, 2); |
1433 | /* | 1436 | /* |
@@ -1472,7 +1475,7 @@ xfs_alloc_rshift( | |||
1472 | /* | 1475 | /* |
1473 | * If we've got no right sibling then we can't shift an entry right. | 1476 | * If we've got no right sibling then we can't shift an entry right. |
1474 | */ | 1477 | */ |
1475 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1478 | if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) { |
1476 | *stat = 0; | 1479 | *stat = 0; |
1477 | return 0; | 1480 | return 0; |
1478 | } | 1481 | } |
@@ -1480,7 +1483,7 @@ xfs_alloc_rshift( | |||
1480 | * If the cursor entry is the one that would be moved, don't | 1483 | * If the cursor entry is the one that would be moved, don't |
1481 | * do it... it's too complicated. | 1484 | * do it... it's too complicated. |
1482 | */ | 1485 | */ |
1483 | if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { | 1486 | if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) { |
1484 | *stat = 0; | 1487 | *stat = 0; |
1485 | return 0; | 1488 | return 0; |
1486 | } | 1489 | } |
@@ -1488,8 +1491,8 @@ xfs_alloc_rshift( | |||
1488 | * Set up the right neighbor as "right". | 1491 | * Set up the right neighbor as "right". |
1489 | */ | 1492 | */ |
1490 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1493 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1491 | cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp, | 1494 | cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), |
1492 | XFS_ALLOC_BTREE_REF))) | 1495 | 0, &rbp, XFS_ALLOC_BTREE_REF))) |
1493 | return error; | 1496 | return error; |
1494 | right = XFS_BUF_TO_ALLOC_BLOCK(rbp); | 1497 | right = XFS_BUF_TO_ALLOC_BLOCK(rbp); |
1495 | if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) | 1498 | if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) |
@@ -1497,7 +1500,7 @@ xfs_alloc_rshift( | |||
1497 | /* | 1500 | /* |
1498 | * If it's full, it can't take another entry. | 1501 | * If it's full, it can't take another entry. |
1499 | */ | 1502 | */ |
1500 | if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 1503 | if (be16_to_cpu(right->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
1501 | *stat = 0; | 1504 | *stat = 0; |
1502 | return 0; | 1505 | return 0; |
1503 | } | 1506 | } |
@@ -1510,47 +1513,47 @@ xfs_alloc_rshift( | |||
1510 | xfs_alloc_ptr_t *lpp; /* address pointer for left block */ | 1513 | xfs_alloc_ptr_t *lpp; /* address pointer for left block */ |
1511 | xfs_alloc_ptr_t *rpp; /* address pointer for right block */ | 1514 | xfs_alloc_ptr_t *rpp; /* address pointer for right block */ |
1512 | 1515 | ||
1513 | lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1516 | lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1514 | lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1517 | lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1515 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); | 1518 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); |
1516 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); | 1519 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); |
1517 | #ifdef DEBUG | 1520 | #ifdef DEBUG |
1518 | for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { | 1521 | for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) { |
1519 | if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) | 1522 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) |
1520 | return error; | 1523 | return error; |
1521 | } | 1524 | } |
1522 | #endif | 1525 | #endif |
1523 | memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); | 1526 | memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1524 | memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); | 1527 | memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1525 | #ifdef DEBUG | 1528 | #ifdef DEBUG |
1526 | if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) | 1529 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level))) |
1527 | return error; | 1530 | return error; |
1528 | #endif | 1531 | #endif |
1529 | *rkp = *lkp; /* INT_: copy */ | 1532 | *rkp = *lkp; /* INT_: copy */ |
1530 | *rpp = *lpp; /* INT_: copy */ | 1533 | *rpp = *lpp; /* INT_: copy */ |
1531 | xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1534 | xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1532 | xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1535 | xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1533 | xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); | 1536 | xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); |
1534 | } else { | 1537 | } else { |
1535 | xfs_alloc_rec_t *lrp; /* record pointer for left block */ | 1538 | xfs_alloc_rec_t *lrp; /* record pointer for left block */ |
1536 | xfs_alloc_rec_t *rrp; /* record pointer for right block */ | 1539 | xfs_alloc_rec_t *rrp; /* record pointer for right block */ |
1537 | 1540 | ||
1538 | lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1541 | lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1539 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); | 1542 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); |
1540 | memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1543 | memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1541 | *rrp = *lrp; | 1544 | *rrp = *lrp; |
1542 | xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1545 | xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1543 | key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ | 1546 | key.ar_startblock = rrp->ar_startblock; |
1544 | key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ | 1547 | key.ar_blockcount = rrp->ar_blockcount; |
1545 | rkp = &key; | 1548 | rkp = &key; |
1546 | xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); | 1549 | xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); |
1547 | } | 1550 | } |
1548 | /* | 1551 | /* |
1549 | * Decrement and log left's numrecs, bump and log right's numrecs. | 1552 | * Decrement and log left's numrecs, bump and log right's numrecs. |
1550 | */ | 1553 | */ |
1551 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); | 1554 | be16_add(&left->bb_numrecs, -1); |
1552 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); | 1555 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); |
1553 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); | 1556 | be16_add(&right->bb_numrecs, 1); |
1554 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); | 1557 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); |
1555 | /* | 1558 | /* |
1556 | * Using a temporary cursor, update the parent key values of the | 1559 | * Using a temporary cursor, update the parent key values of the |
@@ -1623,17 +1626,17 @@ xfs_alloc_split( | |||
1623 | /* | 1626 | /* |
1624 | * Fill in the btree header for the new block. | 1627 | * Fill in the btree header for the new block. |
1625 | */ | 1628 | */ |
1626 | INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); | 1629 | right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); |
1627 | right->bb_level = left->bb_level; /* INT_: direct copy */ | 1630 | right->bb_level = left->bb_level; |
1628 | INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); | 1631 | right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); |
1629 | /* | 1632 | /* |
1630 | * Make sure that if there's an odd number of entries now, that | 1633 | * Make sure that if there's an odd number of entries now, that |
1631 | * each new block will have the same number of entries. | 1634 | * each new block will have the same number of entries. |
1632 | */ | 1635 | */ |
1633 | if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && | 1636 | if ((be16_to_cpu(left->bb_numrecs) & 1) && |
1634 | cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) | 1637 | cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) |
1635 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); | 1638 | be16_add(&right->bb_numrecs, 1); |
1636 | i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; | 1639 | i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; |
1637 | /* | 1640 | /* |
1638 | * For non-leaf blocks, copy keys and addresses over to the new block. | 1641 | * For non-leaf blocks, copy keys and addresses over to the new block. |
1639 | */ | 1642 | */ |
@@ -1648,15 +1651,15 @@ xfs_alloc_split( | |||
1648 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); | 1651 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); |
1649 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); | 1652 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); |
1650 | #ifdef DEBUG | 1653 | #ifdef DEBUG |
1651 | for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { | 1654 | for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { |
1652 | if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) | 1655 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) |
1653 | return error; | 1656 | return error; |
1654 | } | 1657 | } |
1655 | #endif | 1658 | #endif |
1656 | memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); /* INT_: copy */ | 1659 | memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1657 | memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); /* INT_: copy */ | 1660 | memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1658 | xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1661 | xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1659 | xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1662 | xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1660 | *keyp = *rkp; | 1663 | *keyp = *rkp; |
1661 | } | 1664 | } |
1662 | /* | 1665 | /* |
@@ -1668,38 +1671,38 @@ xfs_alloc_split( | |||
1668 | 1671 | ||
1669 | lrp = XFS_ALLOC_REC_ADDR(left, i, cur); | 1672 | lrp = XFS_ALLOC_REC_ADDR(left, i, cur); |
1670 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); | 1673 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); |
1671 | memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1674 | memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1672 | xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1675 | xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1673 | keyp->ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ | 1676 | keyp->ar_startblock = rrp->ar_startblock; |
1674 | keyp->ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ | 1677 | keyp->ar_blockcount = rrp->ar_blockcount; |
1675 | } | 1678 | } |
1676 | /* | 1679 | /* |
1677 | * Find the left block number by looking in the buffer. | 1680 | * Find the left block number by looking in the buffer. |
1678 | * Adjust numrecs, sibling pointers. | 1681 | * Adjust numrecs, sibling pointers. |
1679 | */ | 1682 | */ |
1680 | lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); | 1683 | lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); |
1681 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); | 1684 | be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); |
1682 | right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ | 1685 | right->bb_rightsib = left->bb_rightsib; |
1683 | INT_SET(left->bb_rightsib, ARCH_CONVERT, rbno); | 1686 | left->bb_rightsib = cpu_to_be32(rbno); |
1684 | INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); | 1687 | right->bb_leftsib = cpu_to_be32(lbno); |
1685 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS); | 1688 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS); |
1686 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); | 1689 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); |
1687 | /* | 1690 | /* |
1688 | * If there's a block to the new block's right, make that block | 1691 | * If there's a block to the new block's right, make that block |
1689 | * point back to right instead of to left. | 1692 | * point back to right instead of to left. |
1690 | */ | 1693 | */ |
1691 | if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 1694 | if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) { |
1692 | xfs_alloc_block_t *rrblock; /* rr btree block */ | 1695 | xfs_alloc_block_t *rrblock; /* rr btree block */ |
1693 | xfs_buf_t *rrbp; /* buffer for rrblock */ | 1696 | xfs_buf_t *rrbp; /* buffer for rrblock */ |
1694 | 1697 | ||
1695 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1698 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1696 | cur->bc_private.a.agno, INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, | 1699 | cur->bc_private.a.agno, be32_to_cpu(right->bb_rightsib), 0, |
1697 | &rrbp, XFS_ALLOC_BTREE_REF))) | 1700 | &rrbp, XFS_ALLOC_BTREE_REF))) |
1698 | return error; | 1701 | return error; |
1699 | rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); | 1702 | rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); |
1700 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) | 1703 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) |
1701 | return error; | 1704 | return error; |
1702 | INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, rbno); | 1705 | rrblock->bb_leftsib = cpu_to_be32(rbno); |
1703 | xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); | 1706 | xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); |
1704 | } | 1707 | } |
1705 | /* | 1708 | /* |
@@ -1707,9 +1710,9 @@ xfs_alloc_split( | |||
1707 | * If it's just pointing past the last entry in left, then we'll | 1710 | * If it's just pointing past the last entry in left, then we'll |
1708 | * insert there, so don't change anything in that case. | 1711 | * insert there, so don't change anything in that case. |
1709 | */ | 1712 | */ |
1710 | if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { | 1713 | if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) { |
1711 | xfs_btree_setbuf(cur, level, rbp); | 1714 | xfs_btree_setbuf(cur, level, rbp); |
1712 | cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); | 1715 | cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs); |
1713 | } | 1716 | } |
1714 | /* | 1717 | /* |
1715 | * If there are more levels, we'll need another cursor which refers to | 1718 | * If there are more levels, we'll need another cursor which refers to |
@@ -1807,7 +1810,7 @@ xfs_alloc_decrement( | |||
1807 | /* | 1810 | /* |
1808 | * If we just went off the left edge of the tree, return failure. | 1811 | * If we just went off the left edge of the tree, return failure. |
1809 | */ | 1812 | */ |
1810 | if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1813 | if (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK) { |
1811 | *stat = 0; | 1814 | *stat = 0; |
1812 | return 0; | 1815 | return 0; |
1813 | } | 1816 | } |
@@ -1836,7 +1839,7 @@ xfs_alloc_decrement( | |||
1836 | xfs_agblock_t agbno; /* block number of btree block */ | 1839 | xfs_agblock_t agbno; /* block number of btree block */ |
1837 | xfs_buf_t *bp; /* buffer pointer for block */ | 1840 | xfs_buf_t *bp; /* buffer pointer for block */ |
1838 | 1841 | ||
1839 | agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); | 1842 | agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); |
1840 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1843 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1841 | cur->bc_private.a.agno, agbno, 0, &bp, | 1844 | cur->bc_private.a.agno, agbno, 0, &bp, |
1842 | XFS_ALLOC_BTREE_REF))) | 1845 | XFS_ALLOC_BTREE_REF))) |
@@ -1846,7 +1849,7 @@ xfs_alloc_decrement( | |||
1846 | block = XFS_BUF_TO_ALLOC_BLOCK(bp); | 1849 | block = XFS_BUF_TO_ALLOC_BLOCK(bp); |
1847 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) | 1850 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) |
1848 | return error; | 1851 | return error; |
1849 | cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 1852 | cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs); |
1850 | } | 1853 | } |
1851 | *stat = 1; | 1854 | *stat = 1; |
1852 | return 0; | 1855 | return 0; |
@@ -1913,7 +1916,7 @@ xfs_alloc_get_rec( | |||
1913 | /* | 1916 | /* |
1914 | * Off the right end or left end, return failure. | 1917 | * Off the right end or left end, return failure. |
1915 | */ | 1918 | */ |
1916 | if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { | 1919 | if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) { |
1917 | *stat = 0; | 1920 | *stat = 0; |
1918 | return 0; | 1921 | return 0; |
1919 | } | 1922 | } |
@@ -1924,8 +1927,8 @@ xfs_alloc_get_rec( | |||
1924 | xfs_alloc_rec_t *rec; /* record data */ | 1927 | xfs_alloc_rec_t *rec; /* record data */ |
1925 | 1928 | ||
1926 | rec = XFS_ALLOC_REC_ADDR(block, ptr, cur); | 1929 | rec = XFS_ALLOC_REC_ADDR(block, ptr, cur); |
1927 | *bno = INT_GET(rec->ar_startblock, ARCH_CONVERT); | 1930 | *bno = be32_to_cpu(rec->ar_startblock); |
1928 | *len = INT_GET(rec->ar_blockcount, ARCH_CONVERT); | 1931 | *len = be32_to_cpu(rec->ar_blockcount); |
1929 | } | 1932 | } |
1930 | *stat = 1; | 1933 | *stat = 1; |
1931 | return 0; | 1934 | return 0; |
@@ -1964,14 +1967,14 @@ xfs_alloc_increment( | |||
1964 | * Increment the ptr at this level. If we're still in the block | 1967 | * Increment the ptr at this level. If we're still in the block |
1965 | * then we're done. | 1968 | * then we're done. |
1966 | */ | 1969 | */ |
1967 | if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 1970 | if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) { |
1968 | *stat = 1; | 1971 | *stat = 1; |
1969 | return 0; | 1972 | return 0; |
1970 | } | 1973 | } |
1971 | /* | 1974 | /* |
1972 | * If we just went off the right edge of the tree, return failure. | 1975 | * If we just went off the right edge of the tree, return failure. |
1973 | */ | 1976 | */ |
1974 | if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1977 | if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK) { |
1975 | *stat = 0; | 1978 | *stat = 0; |
1976 | return 0; | 1979 | return 0; |
1977 | } | 1980 | } |
@@ -1986,7 +1989,7 @@ xfs_alloc_increment( | |||
1986 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) | 1989 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) |
1987 | return error; | 1990 | return error; |
1988 | #endif | 1991 | #endif |
1989 | if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 1992 | if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) |
1990 | break; | 1993 | break; |
1991 | /* | 1994 | /* |
1992 | * Read-ahead the right block, we're going to read it | 1995 | * Read-ahead the right block, we're going to read it |
@@ -2006,7 +2009,7 @@ xfs_alloc_increment( | |||
2006 | lev > level; ) { | 2009 | lev > level; ) { |
2007 | xfs_agblock_t agbno; /* block number of btree block */ | 2010 | xfs_agblock_t agbno; /* block number of btree block */ |
2008 | 2011 | ||
2009 | agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); | 2012 | agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); |
2010 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 2013 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
2011 | cur->bc_private.a.agno, agbno, 0, &bp, | 2014 | cur->bc_private.a.agno, agbno, 0, &bp, |
2012 | XFS_ALLOC_BTREE_REF))) | 2015 | XFS_ALLOC_BTREE_REF))) |
@@ -2041,8 +2044,8 @@ xfs_alloc_insert( | |||
2041 | 2044 | ||
2042 | level = 0; | 2045 | level = 0; |
2043 | nbno = NULLAGBLOCK; | 2046 | nbno = NULLAGBLOCK; |
2044 | INT_SET(nrec.ar_startblock, ARCH_CONVERT, cur->bc_rec.a.ar_startblock); | 2047 | nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); |
2045 | INT_SET(nrec.ar_blockcount, ARCH_CONVERT, cur->bc_rec.a.ar_blockcount); | 2048 | nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); |
2046 | ncur = (xfs_btree_cur_t *)0; | 2049 | ncur = (xfs_btree_cur_t *)0; |
2047 | pcur = cur; | 2050 | pcur = cur; |
2048 | /* | 2051 | /* |
@@ -2163,8 +2166,8 @@ xfs_alloc_update( | |||
2163 | /* | 2166 | /* |
2164 | * Fill in the new contents and log them. | 2167 | * Fill in the new contents and log them. |
2165 | */ | 2168 | */ |
2166 | INT_SET(rp->ar_startblock, ARCH_CONVERT, bno); | 2169 | rp->ar_startblock = cpu_to_be32(bno); |
2167 | INT_SET(rp->ar_blockcount, ARCH_CONVERT, len); | 2170 | rp->ar_blockcount = cpu_to_be32(len); |
2168 | xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr); | 2171 | xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr); |
2169 | } | 2172 | } |
2170 | /* | 2173 | /* |
@@ -2173,15 +2176,15 @@ xfs_alloc_update( | |||
2173 | * extent in the a.g., which we cache in the a.g. freelist header. | 2176 | * extent in the a.g., which we cache in the a.g. freelist header. |
2174 | */ | 2177 | */ |
2175 | if (cur->bc_btnum == XFS_BTNUM_CNT && | 2178 | if (cur->bc_btnum == XFS_BTNUM_CNT && |
2176 | INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && | 2179 | be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && |
2177 | ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 2180 | ptr == be16_to_cpu(block->bb_numrecs)) { |
2178 | xfs_agf_t *agf; /* a.g. freespace header */ | 2181 | xfs_agf_t *agf; /* a.g. freespace header */ |
2179 | xfs_agnumber_t seqno; | 2182 | xfs_agnumber_t seqno; |
2180 | 2183 | ||
2181 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 2184 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
2182 | seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT); | 2185 | seqno = be32_to_cpu(agf->agf_seqno); |
2183 | cur->bc_mp->m_perag[seqno].pagf_longest = len; | 2186 | cur->bc_mp->m_perag[seqno].pagf_longest = len; |
2184 | INT_SET(agf->agf_longest, ARCH_CONVERT, len); | 2187 | agf->agf_longest = cpu_to_be32(len); |
2185 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 2188 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
2186 | XFS_AGF_LONGEST); | 2189 | XFS_AGF_LONGEST); |
2187 | } | 2190 | } |
@@ -2191,8 +2194,8 @@ xfs_alloc_update( | |||
2191 | if (ptr == 1) { | 2194 | if (ptr == 1) { |
2192 | xfs_alloc_key_t key; /* key containing [bno, len] */ | 2195 | xfs_alloc_key_t key; /* key containing [bno, len] */ |
2193 | 2196 | ||
2194 | INT_SET(key.ar_startblock, ARCH_CONVERT, bno); | 2197 | key.ar_startblock = cpu_to_be32(bno); |
2195 | INT_SET(key.ar_blockcount, ARCH_CONVERT, len); | 2198 | key.ar_blockcount = cpu_to_be32(len); |
2196 | if ((error = xfs_alloc_updkey(cur, &key, 1))) | 2199 | if ((error = xfs_alloc_updkey(cur, &key, 1))) |
2197 | return error; | 2200 | return error; |
2198 | } | 2201 | } |