diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-03 19:25:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-03 19:25:58 -0500 |
commit | 0f3278d14f0255e4cd9e07ccefc33ff12d8bb59c (patch) | |
tree | 9bbb209cab841f700162a96e158dfa3ecd361f46 /fs/xfs/xfs_alloc_btree.c | |
parent | 06024f217d607369f0ee0071034ebb03071d5fb2 (diff) | |
parent | 15c84a470116b2a3b58a7353a6cf711c29a91854 (diff) |
Merge git://oss.sgi.com:8090/oss/git/xfs-2.6
Diffstat (limited to 'fs/xfs/xfs_alloc_btree.c')
-rw-r--r-- | fs/xfs/xfs_alloc_btree.c | 475 |
1 files changed, 237 insertions, 238 deletions
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index e0355a12d946..a1d92da86ccd 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c | |||
@@ -1,53 +1,41 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. | 2 | * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | ||
3 | * | 4 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or |
5 | * under the terms of version 2 of the GNU General Public License as | 6 | * modify it under the terms of the GNU General Public License as |
6 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
7 | * | 8 | * |
8 | * This program is distributed in the hope that it would be useful, but | 9 | * This program is distributed in the hope that it would be useful, |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | ||
11 | * | 13 | * |
12 | * Further, this software is distributed without any warranty that it is | 14 | * You should have received a copy of the GNU General Public License |
13 | * free of the rightful claim of any third person regarding infringement | 15 | * along with this program; if not, write the Free Software Foundation, |
14 | * or the like. Any license provided herein, whether implied or | 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
15 | * otherwise, applies only to this software file. Patent licenses, if | ||
16 | * any, provided herein do not apply to combinations of this program with | ||
17 | * other software, or any other product whatsoever. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along | ||
20 | * with this program; if not, write the Free Software Foundation, Inc., 59 | ||
21 | * Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
22 | * | ||
23 | * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, | ||
24 | * Mountain View, CA 94043, or: | ||
25 | * | ||
26 | * http://www.sgi.com | ||
27 | * | ||
28 | * For further information regarding this notice, see: | ||
29 | * | ||
30 | * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ | ||
31 | */ | 17 | */ |
32 | |||
33 | /* | ||
34 | * Free space allocation for XFS. | ||
35 | */ | ||
36 | |||
37 | #include "xfs.h" | 18 | #include "xfs.h" |
38 | #include "xfs_macros.h" | 19 | #include "xfs_fs.h" |
39 | #include "xfs_types.h" | 20 | #include "xfs_types.h" |
40 | #include "xfs_inum.h" | 21 | #include "xfs_bit.h" |
41 | #include "xfs_log.h" | 22 | #include "xfs_log.h" |
23 | #include "xfs_inum.h" | ||
42 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
43 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
44 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
45 | #include "xfs_dir.h" | 27 | #include "xfs_dir.h" |
28 | #include "xfs_dir2.h" | ||
46 | #include "xfs_dmapi.h" | 29 | #include "xfs_dmapi.h" |
47 | #include "xfs_mount.h" | 30 | #include "xfs_mount.h" |
31 | #include "xfs_bmap_btree.h" | ||
48 | #include "xfs_alloc_btree.h" | 32 | #include "xfs_alloc_btree.h" |
49 | #include "xfs_ialloc_btree.h" | 33 | #include "xfs_ialloc_btree.h" |
50 | #include "xfs_bmap_btree.h" | 34 | #include "xfs_dir_sf.h" |
35 | #include "xfs_dir2_sf.h" | ||
36 | #include "xfs_attr_sf.h" | ||
37 | #include "xfs_dinode.h" | ||
38 | #include "xfs_inode.h" | ||
51 | #include "xfs_btree.h" | 39 | #include "xfs_btree.h" |
52 | #include "xfs_ialloc.h" | 40 | #include "xfs_ialloc.h" |
53 | #include "xfs_alloc.h" | 41 | #include "xfs_alloc.h" |
@@ -129,7 +117,7 @@ xfs_alloc_delrec( | |||
129 | /* | 117 | /* |
130 | * Fail if we're off the end of the block. | 118 | * Fail if we're off the end of the block. |
131 | */ | 119 | */ |
132 | if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 120 | if (ptr > be16_to_cpu(block->bb_numrecs)) { |
133 | *stat = 0; | 121 | *stat = 0; |
134 | return 0; | 122 | return 0; |
135 | } | 123 | } |
@@ -143,18 +131,18 @@ xfs_alloc_delrec( | |||
143 | lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur); | 131 | lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur); |
144 | lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur); | 132 | lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur); |
145 | #ifdef DEBUG | 133 | #ifdef DEBUG |
146 | for (i = ptr; i < INT_GET(block->bb_numrecs, ARCH_CONVERT); i++) { | 134 | for (i = ptr; i < be16_to_cpu(block->bb_numrecs); i++) { |
147 | if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) | 135 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) |
148 | return error; | 136 | return error; |
149 | } | 137 | } |
150 | #endif | 138 | #endif |
151 | if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 139 | if (ptr < be16_to_cpu(block->bb_numrecs)) { |
152 | memmove(&lkp[ptr - 1], &lkp[ptr], | 140 | memmove(&lkp[ptr - 1], &lkp[ptr], |
153 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lkp)); /* INT_: mem copy */ | 141 | (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lkp)); |
154 | memmove(&lpp[ptr - 1], &lpp[ptr], | 142 | memmove(&lpp[ptr - 1], &lpp[ptr], |
155 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lpp)); /* INT_: mem copy */ | 143 | (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lpp)); |
156 | xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); | 144 | xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); |
157 | xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); | 145 | xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); |
158 | } | 146 | } |
159 | } | 147 | } |
160 | /* | 148 | /* |
@@ -163,25 +151,25 @@ xfs_alloc_delrec( | |||
163 | */ | 151 | */ |
164 | else { | 152 | else { |
165 | lrp = XFS_ALLOC_REC_ADDR(block, 1, cur); | 153 | lrp = XFS_ALLOC_REC_ADDR(block, 1, cur); |
166 | if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 154 | if (ptr < be16_to_cpu(block->bb_numrecs)) { |
167 | memmove(&lrp[ptr - 1], &lrp[ptr], | 155 | memmove(&lrp[ptr - 1], &lrp[ptr], |
168 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lrp)); | 156 | (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lrp)); |
169 | xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); | 157 | xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); |
170 | } | 158 | } |
171 | /* | 159 | /* |
172 | * If it's the first record in the block, we'll need a key | 160 | * If it's the first record in the block, we'll need a key |
173 | * structure to pass up to the next level (updkey). | 161 | * structure to pass up to the next level (updkey). |
174 | */ | 162 | */ |
175 | if (ptr == 1) { | 163 | if (ptr == 1) { |
176 | key.ar_startblock = lrp->ar_startblock; /* INT_: direct copy */ | 164 | key.ar_startblock = lrp->ar_startblock; |
177 | key.ar_blockcount = lrp->ar_blockcount; /* INT_: direct copy */ | 165 | key.ar_blockcount = lrp->ar_blockcount; |
178 | lkp = &key; | 166 | lkp = &key; |
179 | } | 167 | } |
180 | } | 168 | } |
181 | /* | 169 | /* |
182 | * Decrement and log the number of entries in the block. | 170 | * Decrement and log the number of entries in the block. |
183 | */ | 171 | */ |
184 | INT_MOD(block->bb_numrecs, ARCH_CONVERT, -1); | 172 | be16_add(&block->bb_numrecs, -1); |
185 | xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); | 173 | xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); |
186 | /* | 174 | /* |
187 | * See if the longest free extent in the allocation group was | 175 | * See if the longest free extent in the allocation group was |
@@ -194,24 +182,24 @@ xfs_alloc_delrec( | |||
194 | 182 | ||
195 | if (level == 0 && | 183 | if (level == 0 && |
196 | cur->bc_btnum == XFS_BTNUM_CNT && | 184 | cur->bc_btnum == XFS_BTNUM_CNT && |
197 | INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && | 185 | be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && |
198 | ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 186 | ptr > be16_to_cpu(block->bb_numrecs)) { |
199 | ASSERT(ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT) + 1); | 187 | ASSERT(ptr == be16_to_cpu(block->bb_numrecs) + 1); |
200 | /* | 188 | /* |
201 | * There are still records in the block. Grab the size | 189 | * There are still records in the block. Grab the size |
202 | * from the last one. | 190 | * from the last one. |
203 | */ | 191 | */ |
204 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 192 | if (be16_to_cpu(block->bb_numrecs)) { |
205 | rrp = XFS_ALLOC_REC_ADDR(block, INT_GET(block->bb_numrecs, ARCH_CONVERT), cur); | 193 | rrp = XFS_ALLOC_REC_ADDR(block, be16_to_cpu(block->bb_numrecs), cur); |
206 | INT_COPY(agf->agf_longest, rrp->ar_blockcount, ARCH_CONVERT); | 194 | agf->agf_longest = rrp->ar_blockcount; |
207 | } | 195 | } |
208 | /* | 196 | /* |
209 | * No free extents left. | 197 | * No free extents left. |
210 | */ | 198 | */ |
211 | else | 199 | else |
212 | agf->agf_longest = 0; | 200 | agf->agf_longest = 0; |
213 | mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest = | 201 | mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest = |
214 | INT_GET(agf->agf_longest, ARCH_CONVERT); | 202 | be32_to_cpu(agf->agf_longest); |
215 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 203 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
216 | XFS_AGF_LONGEST); | 204 | XFS_AGF_LONGEST); |
217 | } | 205 | } |
@@ -225,15 +213,15 @@ xfs_alloc_delrec( | |||
225 | * and it's NOT the leaf level, | 213 | * and it's NOT the leaf level, |
226 | * then we can get rid of this level. | 214 | * then we can get rid of this level. |
227 | */ | 215 | */ |
228 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == 1 && level > 0) { | 216 | if (be16_to_cpu(block->bb_numrecs) == 1 && level > 0) { |
229 | /* | 217 | /* |
230 | * lpp is still set to the first pointer in the block. | 218 | * lpp is still set to the first pointer in the block. |
231 | * Make it the new root of the btree. | 219 | * Make it the new root of the btree. |
232 | */ | 220 | */ |
233 | bno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT); | 221 | bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); |
234 | INT_COPY(agf->agf_roots[cur->bc_btnum], *lpp, ARCH_CONVERT); | 222 | agf->agf_roots[cur->bc_btnum] = *lpp; |
235 | INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, -1); | 223 | be32_add(&agf->agf_levels[cur->bc_btnum], -1); |
236 | mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_levels[cur->bc_btnum]--; | 224 | mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--; |
237 | /* | 225 | /* |
238 | * Put this buffer/block on the ag's freelist. | 226 | * Put this buffer/block on the ag's freelist. |
239 | */ | 227 | */ |
@@ -255,7 +243,7 @@ xfs_alloc_delrec( | |||
255 | * that freed the block. | 243 | * that freed the block. |
256 | */ | 244 | */ |
257 | xfs_alloc_mark_busy(cur->bc_tp, | 245 | xfs_alloc_mark_busy(cur->bc_tp, |
258 | INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); | 246 | be32_to_cpu(agf->agf_seqno), bno, 1); |
259 | 247 | ||
260 | xfs_trans_agbtree_delta(cur->bc_tp, -1); | 248 | xfs_trans_agbtree_delta(cur->bc_tp, -1); |
261 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 249 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
@@ -281,7 +269,7 @@ xfs_alloc_delrec( | |||
281 | * If the number of records remaining in the block is at least | 269 | * If the number of records remaining in the block is at least |
282 | * the minimum, we're done. | 270 | * the minimum, we're done. |
283 | */ | 271 | */ |
284 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { | 272 | if (be16_to_cpu(block->bb_numrecs) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { |
285 | if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) | 273 | if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) |
286 | return error; | 274 | return error; |
287 | *stat = 1; | 275 | *stat = 1; |
@@ -292,8 +280,8 @@ xfs_alloc_delrec( | |||
292 | * tree balanced. Look at the left and right sibling blocks to | 280 | * tree balanced. Look at the left and right sibling blocks to |
293 | * see if we can re-balance by moving only one record. | 281 | * see if we can re-balance by moving only one record. |
294 | */ | 282 | */ |
295 | rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); | 283 | rbno = be32_to_cpu(block->bb_rightsib); |
296 | lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); | 284 | lbno = be32_to_cpu(block->bb_leftsib); |
297 | bno = NULLAGBLOCK; | 285 | bno = NULLAGBLOCK; |
298 | ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); | 286 | ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); |
299 | /* | 287 | /* |
@@ -330,18 +318,18 @@ xfs_alloc_delrec( | |||
330 | /* | 318 | /* |
331 | * Grab the current block number, for future use. | 319 | * Grab the current block number, for future use. |
332 | */ | 320 | */ |
333 | bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); | 321 | bno = be32_to_cpu(right->bb_leftsib); |
334 | /* | 322 | /* |
335 | * If right block is full enough so that removing one entry | 323 | * If right block is full enough so that removing one entry |
336 | * won't make it too empty, and left-shifting an entry out | 324 | * won't make it too empty, and left-shifting an entry out |
337 | * of right to us works, we're done. | 325 | * of right to us works, we're done. |
338 | */ | 326 | */ |
339 | if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= | 327 | if (be16_to_cpu(right->bb_numrecs) - 1 >= |
340 | XFS_ALLOC_BLOCK_MINRECS(level, cur)) { | 328 | XFS_ALLOC_BLOCK_MINRECS(level, cur)) { |
341 | if ((error = xfs_alloc_lshift(tcur, level, &i))) | 329 | if ((error = xfs_alloc_lshift(tcur, level, &i))) |
342 | goto error0; | 330 | goto error0; |
343 | if (i) { | 331 | if (i) { |
344 | ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= | 332 | ASSERT(be16_to_cpu(block->bb_numrecs) >= |
345 | XFS_ALLOC_BLOCK_MINRECS(level, cur)); | 333 | XFS_ALLOC_BLOCK_MINRECS(level, cur)); |
346 | xfs_btree_del_cursor(tcur, | 334 | xfs_btree_del_cursor(tcur, |
347 | XFS_BTREE_NOERROR); | 335 | XFS_BTREE_NOERROR); |
@@ -358,7 +346,7 @@ xfs_alloc_delrec( | |||
358 | * future reference, and fix up the temp cursor to point | 346 | * future reference, and fix up the temp cursor to point |
359 | * to our block again (last record). | 347 | * to our block again (last record). |
360 | */ | 348 | */ |
361 | rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); | 349 | rrecs = be16_to_cpu(right->bb_numrecs); |
362 | if (lbno != NULLAGBLOCK) { | 350 | if (lbno != NULLAGBLOCK) { |
363 | i = xfs_btree_firstrec(tcur, level); | 351 | i = xfs_btree_firstrec(tcur, level); |
364 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | 352 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); |
@@ -394,18 +382,18 @@ xfs_alloc_delrec( | |||
394 | /* | 382 | /* |
395 | * Grab the current block number, for future use. | 383 | * Grab the current block number, for future use. |
396 | */ | 384 | */ |
397 | bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); | 385 | bno = be32_to_cpu(left->bb_rightsib); |
398 | /* | 386 | /* |
399 | * If left block is full enough so that removing one entry | 387 | * If left block is full enough so that removing one entry |
400 | * won't make it too empty, and right-shifting an entry out | 388 | * won't make it too empty, and right-shifting an entry out |
401 | * of left to us works, we're done. | 389 | * of left to us works, we're done. |
402 | */ | 390 | */ |
403 | if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= | 391 | if (be16_to_cpu(left->bb_numrecs) - 1 >= |
404 | XFS_ALLOC_BLOCK_MINRECS(level, cur)) { | 392 | XFS_ALLOC_BLOCK_MINRECS(level, cur)) { |
405 | if ((error = xfs_alloc_rshift(tcur, level, &i))) | 393 | if ((error = xfs_alloc_rshift(tcur, level, &i))) |
406 | goto error0; | 394 | goto error0; |
407 | if (i) { | 395 | if (i) { |
408 | ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= | 396 | ASSERT(be16_to_cpu(block->bb_numrecs) >= |
409 | XFS_ALLOC_BLOCK_MINRECS(level, cur)); | 397 | XFS_ALLOC_BLOCK_MINRECS(level, cur)); |
410 | xfs_btree_del_cursor(tcur, | 398 | xfs_btree_del_cursor(tcur, |
411 | XFS_BTREE_NOERROR); | 399 | XFS_BTREE_NOERROR); |
@@ -419,7 +407,7 @@ xfs_alloc_delrec( | |||
419 | * Otherwise, grab the number of records in right for | 407 | * Otherwise, grab the number of records in right for |
420 | * future reference. | 408 | * future reference. |
421 | */ | 409 | */ |
422 | lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); | 410 | lrecs = be16_to_cpu(left->bb_numrecs); |
423 | } | 411 | } |
424 | /* | 412 | /* |
425 | * Delete the temp cursor, we're done with it. | 413 | * Delete the temp cursor, we're done with it. |
@@ -433,7 +421,7 @@ xfs_alloc_delrec( | |||
433 | * See if we can join with the left neighbor block. | 421 | * See if we can join with the left neighbor block. |
434 | */ | 422 | */ |
435 | if (lbno != NULLAGBLOCK && | 423 | if (lbno != NULLAGBLOCK && |
436 | lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 424 | lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
437 | /* | 425 | /* |
438 | * Set "right" to be the starting block, | 426 | * Set "right" to be the starting block, |
439 | * "left" to be the left neighbor. | 427 | * "left" to be the left neighbor. |
@@ -453,7 +441,7 @@ xfs_alloc_delrec( | |||
453 | * If that won't work, see if we can join with the right neighbor block. | 441 | * If that won't work, see if we can join with the right neighbor block. |
454 | */ | 442 | */ |
455 | else if (rbno != NULLAGBLOCK && | 443 | else if (rbno != NULLAGBLOCK && |
456 | rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= | 444 | rrecs + be16_to_cpu(block->bb_numrecs) <= |
457 | XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 445 | XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
458 | /* | 446 | /* |
459 | * Set "left" to be the starting block, | 447 | * Set "left" to be the starting block, |
@@ -488,31 +476,34 @@ xfs_alloc_delrec( | |||
488 | /* | 476 | /* |
489 | * It's a non-leaf. Move keys and pointers. | 477 | * It's a non-leaf. Move keys and pointers. |
490 | */ | 478 | */ |
491 | lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); | 479 | lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); |
492 | lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); | 480 | lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); |
493 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); | 481 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); |
494 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); | 482 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); |
495 | #ifdef DEBUG | 483 | #ifdef DEBUG |
496 | for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { | 484 | for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { |
497 | if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) | 485 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) |
498 | return error; | 486 | return error; |
499 | } | 487 | } |
500 | #endif | 488 | #endif |
501 | memcpy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp)); /* INT_: structure copy */ | 489 | memcpy(lkp, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*lkp)); |
502 | memcpy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp)); /* INT_: structure copy */ | 490 | memcpy(lpp, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*lpp)); |
503 | xfs_alloc_log_keys(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, | 491 | xfs_alloc_log_keys(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, |
504 | INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 492 | be16_to_cpu(left->bb_numrecs) + |
505 | xfs_alloc_log_ptrs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, | 493 | be16_to_cpu(right->bb_numrecs)); |
506 | INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 494 | xfs_alloc_log_ptrs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, |
495 | be16_to_cpu(left->bb_numrecs) + | ||
496 | be16_to_cpu(right->bb_numrecs)); | ||
507 | } else { | 497 | } else { |
508 | /* | 498 | /* |
509 | * It's a leaf. Move records. | 499 | * It's a leaf. Move records. |
510 | */ | 500 | */ |
511 | lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); | 501 | lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); |
512 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); | 502 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); |
513 | memcpy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp)); | 503 | memcpy(lrp, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*lrp)); |
514 | xfs_alloc_log_recs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, | 504 | xfs_alloc_log_recs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, |
515 | INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 505 | be16_to_cpu(left->bb_numrecs) + |
506 | be16_to_cpu(right->bb_numrecs)); | ||
516 | } | 507 | } |
517 | /* | 508 | /* |
518 | * If we joined with the left neighbor, set the buffer in the | 509 | * If we joined with the left neighbor, set the buffer in the |
@@ -520,7 +511,7 @@ xfs_alloc_delrec( | |||
520 | */ | 511 | */ |
521 | if (bp != lbp) { | 512 | if (bp != lbp) { |
522 | xfs_btree_setbuf(cur, level, lbp); | 513 | xfs_btree_setbuf(cur, level, lbp); |
523 | cur->bc_ptrs[level] += INT_GET(left->bb_numrecs, ARCH_CONVERT); | 514 | cur->bc_ptrs[level] += be16_to_cpu(left->bb_numrecs); |
524 | } | 515 | } |
525 | /* | 516 | /* |
526 | * If we joined with the right neighbor and there's a level above | 517 | * If we joined with the right neighbor and there's a level above |
@@ -532,28 +523,28 @@ xfs_alloc_delrec( | |||
532 | /* | 523 | /* |
533 | * Fix up the number of records in the surviving block. | 524 | * Fix up the number of records in the surviving block. |
534 | */ | 525 | */ |
535 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 526 | be16_add(&left->bb_numrecs, be16_to_cpu(right->bb_numrecs)); |
536 | /* | 527 | /* |
537 | * Fix up the right block pointer in the surviving block, and log it. | 528 | * Fix up the right block pointer in the surviving block, and log it. |
538 | */ | 529 | */ |
539 | left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */ | 530 | left->bb_rightsib = right->bb_rightsib; |
540 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); | 531 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); |
541 | /* | 532 | /* |
542 | * If there is a right sibling now, make it point to the | 533 | * If there is a right sibling now, make it point to the |
543 | * remaining block. | 534 | * remaining block. |
544 | */ | 535 | */ |
545 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 536 | if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) { |
546 | xfs_alloc_block_t *rrblock; | 537 | xfs_alloc_block_t *rrblock; |
547 | xfs_buf_t *rrbp; | 538 | xfs_buf_t *rrbp; |
548 | 539 | ||
549 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 540 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
550 | cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, | 541 | cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0, |
551 | &rrbp, XFS_ALLOC_BTREE_REF))) | 542 | &rrbp, XFS_ALLOC_BTREE_REF))) |
552 | return error; | 543 | return error; |
553 | rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); | 544 | rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); |
554 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) | 545 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) |
555 | return error; | 546 | return error; |
556 | INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); | 547 | rrblock->bb_leftsib = cpu_to_be32(lbno); |
557 | xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); | 548 | xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); |
558 | } | 549 | } |
559 | /* | 550 | /* |
@@ -574,10 +565,9 @@ xfs_alloc_delrec( | |||
574 | * busy block is allocated, the iclog is pushed up to the | 565 | * busy block is allocated, the iclog is pushed up to the |
575 | * LSN that freed the block. | 566 | * LSN that freed the block. |
576 | */ | 567 | */ |
577 | xfs_alloc_mark_busy(cur->bc_tp, | 568 | xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1); |
578 | INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); | ||
579 | |||
580 | xfs_trans_agbtree_delta(cur->bc_tp, -1); | 569 | xfs_trans_agbtree_delta(cur->bc_tp, -1); |
570 | |||
581 | /* | 571 | /* |
582 | * Adjust the current level's cursor so that we're left referring | 572 | * Adjust the current level's cursor so that we're left referring |
583 | * to the right node, after we're done. | 573 | * to the right node, after we're done. |
@@ -625,7 +615,15 @@ xfs_alloc_insrec( | |||
625 | int ptr; /* index in btree block for this rec */ | 615 | int ptr; /* index in btree block for this rec */ |
626 | xfs_alloc_rec_t *rp; /* pointer to btree records */ | 616 | xfs_alloc_rec_t *rp; /* pointer to btree records */ |
627 | 617 | ||
628 | ASSERT(INT_GET(recp->ar_blockcount, ARCH_CONVERT) > 0); | 618 | ASSERT(be32_to_cpu(recp->ar_blockcount) > 0); |
619 | |||
620 | /* | ||
621 | * GCC doesn't understand the (arguably complex) control flow in | ||
622 | * this function and complains about uninitialized structure fields | ||
623 | * without this. | ||
624 | */ | ||
625 | memset(&nrec, 0, sizeof(nrec)); | ||
626 | |||
629 | /* | 627 | /* |
630 | * If we made it to the root level, allocate a new root block | 628 | * If we made it to the root level, allocate a new root block |
631 | * and we're done. | 629 | * and we're done. |
@@ -641,8 +639,8 @@ xfs_alloc_insrec( | |||
641 | /* | 639 | /* |
642 | * Make a key out of the record data to be inserted, and save it. | 640 | * Make a key out of the record data to be inserted, and save it. |
643 | */ | 641 | */ |
644 | key.ar_startblock = recp->ar_startblock; /* INT_: direct copy */ | 642 | key.ar_startblock = recp->ar_startblock; |
645 | key.ar_blockcount = recp->ar_blockcount; /* INT_: direct copy */ | 643 | key.ar_blockcount = recp->ar_blockcount; |
646 | optr = ptr = cur->bc_ptrs[level]; | 644 | optr = ptr = cur->bc_ptrs[level]; |
647 | /* | 645 | /* |
648 | * If we're off the left edge, return failure. | 646 | * If we're off the left edge, return failure. |
@@ -663,7 +661,7 @@ xfs_alloc_insrec( | |||
663 | /* | 661 | /* |
664 | * Check that the new entry is being inserted in the right place. | 662 | * Check that the new entry is being inserted in the right place. |
665 | */ | 663 | */ |
666 | if (ptr <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 664 | if (ptr <= be16_to_cpu(block->bb_numrecs)) { |
667 | if (level == 0) { | 665 | if (level == 0) { |
668 | rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); | 666 | rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); |
669 | xfs_btree_check_rec(cur->bc_btnum, recp, rp); | 667 | xfs_btree_check_rec(cur->bc_btnum, recp, rp); |
@@ -679,7 +677,7 @@ xfs_alloc_insrec( | |||
679 | * If the block is full, we can't insert the new entry until we | 677 | * If the block is full, we can't insert the new entry until we |
680 | * make the block un-full. | 678 | * make the block un-full. |
681 | */ | 679 | */ |
682 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 680 | if (be16_to_cpu(block->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
683 | /* | 681 | /* |
684 | * First, try shifting an entry to the right neighbor. | 682 | * First, try shifting an entry to the right neighbor. |
685 | */ | 683 | */ |
@@ -716,8 +714,8 @@ xfs_alloc_insrec( | |||
716 | return error; | 714 | return error; |
717 | #endif | 715 | #endif |
718 | ptr = cur->bc_ptrs[level]; | 716 | ptr = cur->bc_ptrs[level]; |
719 | nrec.ar_startblock = nkey.ar_startblock; /* INT_: direct copy */ | 717 | nrec.ar_startblock = nkey.ar_startblock; |
720 | nrec.ar_blockcount = nkey.ar_blockcount; /* INT_: direct copy */ | 718 | nrec.ar_blockcount = nkey.ar_blockcount; |
721 | } | 719 | } |
722 | /* | 720 | /* |
723 | * Otherwise the insert fails. | 721 | * Otherwise the insert fails. |
@@ -741,15 +739,15 @@ xfs_alloc_insrec( | |||
741 | kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); | 739 | kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); |
742 | pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); | 740 | pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); |
743 | #ifdef DEBUG | 741 | #ifdef DEBUG |
744 | for (i = INT_GET(block->bb_numrecs, ARCH_CONVERT); i >= ptr; i--) { | 742 | for (i = be16_to_cpu(block->bb_numrecs); i >= ptr; i--) { |
745 | if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level))) | 743 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level))) |
746 | return error; | 744 | return error; |
747 | } | 745 | } |
748 | #endif | 746 | #endif |
749 | memmove(&kp[ptr], &kp[ptr - 1], | 747 | memmove(&kp[ptr], &kp[ptr - 1], |
750 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*kp)); /* INT_: copy */ | 748 | (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*kp)); |
751 | memmove(&pp[ptr], &pp[ptr - 1], | 749 | memmove(&pp[ptr], &pp[ptr - 1], |
752 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*pp)); /* INT_: copy */ | 750 | (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*pp)); |
753 | #ifdef DEBUG | 751 | #ifdef DEBUG |
754 | if ((error = xfs_btree_check_sptr(cur, *bnop, level))) | 752 | if ((error = xfs_btree_check_sptr(cur, *bnop, level))) |
755 | return error; | 753 | return error; |
@@ -758,12 +756,12 @@ xfs_alloc_insrec( | |||
758 | * Now stuff the new data in, bump numrecs and log the new data. | 756 | * Now stuff the new data in, bump numrecs and log the new data. |
759 | */ | 757 | */ |
760 | kp[ptr - 1] = key; | 758 | kp[ptr - 1] = key; |
761 | INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); | 759 | pp[ptr - 1] = cpu_to_be32(*bnop); |
762 | INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); | 760 | be16_add(&block->bb_numrecs, 1); |
763 | xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); | 761 | xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); |
764 | xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); | 762 | xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); |
765 | #ifdef DEBUG | 763 | #ifdef DEBUG |
766 | if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 764 | if (ptr < be16_to_cpu(block->bb_numrecs)) |
767 | xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, | 765 | xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, |
768 | kp + ptr); | 766 | kp + ptr); |
769 | #endif | 767 | #endif |
@@ -773,16 +771,16 @@ xfs_alloc_insrec( | |||
773 | */ | 771 | */ |
774 | rp = XFS_ALLOC_REC_ADDR(block, 1, cur); | 772 | rp = XFS_ALLOC_REC_ADDR(block, 1, cur); |
775 | memmove(&rp[ptr], &rp[ptr - 1], | 773 | memmove(&rp[ptr], &rp[ptr - 1], |
776 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*rp)); | 774 | (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*rp)); |
777 | /* | 775 | /* |
778 | * Now stuff the new record in, bump numrecs | 776 | * Now stuff the new record in, bump numrecs |
779 | * and log the new data. | 777 | * and log the new data. |
780 | */ | 778 | */ |
781 | rp[ptr - 1] = *recp; /* INT_: struct copy */ | 779 | rp[ptr - 1] = *recp; /* INT_: struct copy */ |
782 | INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); | 780 | be16_add(&block->bb_numrecs, 1); |
783 | xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); | 781 | xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); |
784 | #ifdef DEBUG | 782 | #ifdef DEBUG |
785 | if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 783 | if (ptr < be16_to_cpu(block->bb_numrecs)) |
786 | xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, | 784 | xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, |
787 | rp + ptr); | 785 | rp + ptr); |
788 | #endif | 786 | #endif |
@@ -804,16 +802,16 @@ xfs_alloc_insrec( | |||
804 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 802 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
805 | if (level == 0 && | 803 | if (level == 0 && |
806 | cur->bc_btnum == XFS_BTNUM_CNT && | 804 | cur->bc_btnum == XFS_BTNUM_CNT && |
807 | INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && | 805 | be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && |
808 | INT_GET(recp->ar_blockcount, ARCH_CONVERT) > INT_GET(agf->agf_longest, ARCH_CONVERT)) { | 806 | be32_to_cpu(recp->ar_blockcount) > be32_to_cpu(agf->agf_longest)) { |
809 | /* | 807 | /* |
810 | * If this is a leaf in the by-size btree and there | 808 | * If this is a leaf in the by-size btree and there |
811 | * is no right sibling block and this block is bigger | 809 | * is no right sibling block and this block is bigger |
812 | * than the previous longest block, update it. | 810 | * than the previous longest block, update it. |
813 | */ | 811 | */ |
814 | INT_COPY(agf->agf_longest, recp->ar_blockcount, ARCH_CONVERT); | 812 | agf->agf_longest = recp->ar_blockcount; |
815 | cur->bc_mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest | 813 | cur->bc_mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest |
816 | = INT_GET(recp->ar_blockcount, ARCH_CONVERT); | 814 | = be32_to_cpu(recp->ar_blockcount); |
817 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 815 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
818 | XFS_AGF_LONGEST); | 816 | XFS_AGF_LONGEST); |
819 | } | 817 | } |
@@ -923,8 +921,9 @@ xfs_alloc_log_recs( | |||
923 | 921 | ||
924 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 922 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
925 | for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++) | 923 | for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++) |
926 | ASSERT(INT_GET(p->ar_startblock, ARCH_CONVERT) + INT_GET(p->ar_blockcount, ARCH_CONVERT) <= | 924 | ASSERT(be32_to_cpu(p->ar_startblock) + |
927 | INT_GET(agf->agf_length, ARCH_CONVERT)); | 925 | be32_to_cpu(p->ar_blockcount) <= |
926 | be32_to_cpu(agf->agf_length)); | ||
928 | } | 927 | } |
929 | #endif | 928 | #endif |
930 | first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); | 929 | first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); |
@@ -961,8 +960,8 @@ xfs_alloc_lookup( | |||
961 | xfs_agf_t *agf; /* a.g. freespace header */ | 960 | xfs_agf_t *agf; /* a.g. freespace header */ |
962 | 961 | ||
963 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 962 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
964 | agno = INT_GET(agf->agf_seqno, ARCH_CONVERT); | 963 | agno = be32_to_cpu(agf->agf_seqno); |
965 | agbno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT); | 964 | agbno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); |
966 | } | 965 | } |
967 | /* | 966 | /* |
968 | * Iterate over each level in the btree, starting at the root. | 967 | * Iterate over each level in the btree, starting at the root. |
@@ -1029,7 +1028,7 @@ xfs_alloc_lookup( | |||
1029 | * Set low and high entry numbers, 1-based. | 1028 | * Set low and high entry numbers, 1-based. |
1030 | */ | 1029 | */ |
1031 | low = 1; | 1030 | low = 1; |
1032 | if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { | 1031 | if (!(high = be16_to_cpu(block->bb_numrecs))) { |
1033 | /* | 1032 | /* |
1034 | * If the block is empty, the tree must | 1033 | * If the block is empty, the tree must |
1035 | * be an empty leaf. | 1034 | * be an empty leaf. |
@@ -1058,14 +1057,14 @@ xfs_alloc_lookup( | |||
1058 | xfs_alloc_key_t *kkp; | 1057 | xfs_alloc_key_t *kkp; |
1059 | 1058 | ||
1060 | kkp = kkbase + keyno - 1; | 1059 | kkp = kkbase + keyno - 1; |
1061 | startblock = INT_GET(kkp->ar_startblock, ARCH_CONVERT); | 1060 | startblock = be32_to_cpu(kkp->ar_startblock); |
1062 | blockcount = INT_GET(kkp->ar_blockcount, ARCH_CONVERT); | 1061 | blockcount = be32_to_cpu(kkp->ar_blockcount); |
1063 | } else { | 1062 | } else { |
1064 | xfs_alloc_rec_t *krp; | 1063 | xfs_alloc_rec_t *krp; |
1065 | 1064 | ||
1066 | krp = krbase + keyno - 1; | 1065 | krp = krbase + keyno - 1; |
1067 | startblock = INT_GET(krp->ar_startblock, ARCH_CONVERT); | 1066 | startblock = be32_to_cpu(krp->ar_startblock); |
1068 | blockcount = INT_GET(krp->ar_blockcount, ARCH_CONVERT); | 1067 | blockcount = be32_to_cpu(krp->ar_blockcount); |
1069 | } | 1068 | } |
1070 | /* | 1069 | /* |
1071 | * Compute difference to get next direction. | 1070 | * Compute difference to get next direction. |
@@ -1105,7 +1104,7 @@ xfs_alloc_lookup( | |||
1105 | */ | 1104 | */ |
1106 | if (diff > 0 && --keyno < 1) | 1105 | if (diff > 0 && --keyno < 1) |
1107 | keyno = 1; | 1106 | keyno = 1; |
1108 | agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, keyno, cur), ARCH_CONVERT); | 1107 | agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, keyno, cur)); |
1109 | #ifdef DEBUG | 1108 | #ifdef DEBUG |
1110 | if ((error = xfs_btree_check_sptr(cur, agbno, level))) | 1109 | if ((error = xfs_btree_check_sptr(cur, agbno, level))) |
1111 | return error; | 1110 | return error; |
@@ -1124,8 +1123,8 @@ xfs_alloc_lookup( | |||
1124 | * not the last block, we're in the wrong block. | 1123 | * not the last block, we're in the wrong block. |
1125 | */ | 1124 | */ |
1126 | if (dir == XFS_LOOKUP_GE && | 1125 | if (dir == XFS_LOOKUP_GE && |
1127 | keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && | 1126 | keyno > be16_to_cpu(block->bb_numrecs) && |
1128 | INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 1127 | be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) { |
1129 | int i; | 1128 | int i; |
1130 | 1129 | ||
1131 | cur->bc_ptrs[0] = keyno; | 1130 | cur->bc_ptrs[0] = keyno; |
@@ -1142,7 +1141,7 @@ xfs_alloc_lookup( | |||
1142 | /* | 1141 | /* |
1143 | * Return if we succeeded or not. | 1142 | * Return if we succeeded or not. |
1144 | */ | 1143 | */ |
1145 | if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 1144 | if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) |
1146 | *stat = 0; | 1145 | *stat = 0; |
1147 | else | 1146 | else |
1148 | *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); | 1147 | *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); |
@@ -1185,7 +1184,7 @@ xfs_alloc_lshift( | |||
1185 | /* | 1184 | /* |
1186 | * If we've got no left sibling then we can't shift an entry left. | 1185 | * If we've got no left sibling then we can't shift an entry left. |
1187 | */ | 1186 | */ |
1188 | if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1187 | if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) { |
1189 | *stat = 0; | 1188 | *stat = 0; |
1190 | return 0; | 1189 | return 0; |
1191 | } | 1190 | } |
@@ -1201,8 +1200,8 @@ xfs_alloc_lshift( | |||
1201 | * Set up the left neighbor as "left". | 1200 | * Set up the left neighbor as "left". |
1202 | */ | 1201 | */ |
1203 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1202 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1204 | cur->bc_private.a.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp, | 1203 | cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib), |
1205 | XFS_ALLOC_BTREE_REF))) | 1204 | 0, &lbp, XFS_ALLOC_BTREE_REF))) |
1206 | return error; | 1205 | return error; |
1207 | left = XFS_BUF_TO_ALLOC_BLOCK(lbp); | 1206 | left = XFS_BUF_TO_ALLOC_BLOCK(lbp); |
1208 | if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) | 1207 | if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) |
@@ -1210,11 +1209,11 @@ xfs_alloc_lshift( | |||
1210 | /* | 1209 | /* |
1211 | * If it's full, it can't take another entry. | 1210 | * If it's full, it can't take another entry. |
1212 | */ | 1211 | */ |
1213 | if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 1212 | if (be16_to_cpu(left->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
1214 | *stat = 0; | 1213 | *stat = 0; |
1215 | return 0; | 1214 | return 0; |
1216 | } | 1215 | } |
1217 | nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; | 1216 | nrec = be16_to_cpu(left->bb_numrecs) + 1; |
1218 | /* | 1217 | /* |
1219 | * If non-leaf, copy a key and a ptr to the left block. | 1218 | * If non-leaf, copy a key and a ptr to the left block. |
1220 | */ | 1219 | */ |
@@ -1229,7 +1228,7 @@ xfs_alloc_lshift( | |||
1229 | lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur); | 1228 | lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur); |
1230 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); | 1229 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); |
1231 | #ifdef DEBUG | 1230 | #ifdef DEBUG |
1232 | if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) | 1231 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level))) |
1233 | return error; | 1232 | return error; |
1234 | #endif | 1233 | #endif |
1235 | *lpp = *rpp; /* INT_: copy */ | 1234 | *lpp = *rpp; /* INT_: copy */ |
@@ -1251,30 +1250,30 @@ xfs_alloc_lshift( | |||
1251 | /* | 1250 | /* |
1252 | * Bump and log left's numrecs, decrement and log right's numrecs. | 1251 | * Bump and log left's numrecs, decrement and log right's numrecs. |
1253 | */ | 1252 | */ |
1254 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1); | 1253 | be16_add(&left->bb_numrecs, 1); |
1255 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); | 1254 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); |
1256 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1); | 1255 | be16_add(&right->bb_numrecs, -1); |
1257 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); | 1256 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); |
1258 | /* | 1257 | /* |
1259 | * Slide the contents of right down one entry. | 1258 | * Slide the contents of right down one entry. |
1260 | */ | 1259 | */ |
1261 | if (level > 0) { | 1260 | if (level > 0) { |
1262 | #ifdef DEBUG | 1261 | #ifdef DEBUG |
1263 | for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { | 1262 | for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { |
1264 | if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), | 1263 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]), |
1265 | level))) | 1264 | level))) |
1266 | return error; | 1265 | return error; |
1267 | } | 1266 | } |
1268 | #endif | 1267 | #endif |
1269 | memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); | 1268 | memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1270 | memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); | 1269 | memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1271 | xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1270 | xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1272 | xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1271 | xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1273 | } else { | 1272 | } else { |
1274 | memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1273 | memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1275 | xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1274 | xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1276 | key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ | 1275 | key.ar_startblock = rrp->ar_startblock; |
1277 | key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ | 1276 | key.ar_blockcount = rrp->ar_blockcount; |
1278 | rkp = &key; | 1277 | rkp = &key; |
1279 | } | 1278 | } |
1280 | /* | 1279 | /* |
@@ -1339,9 +1338,9 @@ xfs_alloc_newroot( | |||
1339 | xfs_agnumber_t seqno; | 1338 | xfs_agnumber_t seqno; |
1340 | 1339 | ||
1341 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 1340 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
1342 | INT_SET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT, nbno); | 1341 | agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno); |
1343 | INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, 1); | 1342 | be32_add(&agf->agf_levels[cur->bc_btnum], 1); |
1344 | seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT); | 1343 | seqno = be32_to_cpu(agf->agf_seqno); |
1345 | mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; | 1344 | mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; |
1346 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 1345 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
1347 | XFS_AGF_ROOTS | XFS_AGF_LEVELS); | 1346 | XFS_AGF_ROOTS | XFS_AGF_LEVELS); |
@@ -1358,12 +1357,12 @@ xfs_alloc_newroot( | |||
1358 | if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp))) | 1357 | if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp))) |
1359 | return error; | 1358 | return error; |
1360 | #endif | 1359 | #endif |
1361 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 1360 | if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) { |
1362 | /* | 1361 | /* |
1363 | * Our block is left, pick up the right block. | 1362 | * Our block is left, pick up the right block. |
1364 | */ | 1363 | */ |
1365 | lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp)); | 1364 | lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp)); |
1366 | rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT); | 1365 | rbno = be32_to_cpu(left->bb_rightsib); |
1367 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 1366 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
1368 | cur->bc_private.a.agno, rbno, 0, &rbp, | 1367 | cur->bc_private.a.agno, rbno, 0, &rbp, |
1369 | XFS_ALLOC_BTREE_REF))) | 1368 | XFS_ALLOC_BTREE_REF))) |
@@ -1380,7 +1379,7 @@ xfs_alloc_newroot( | |||
1380 | rbp = lbp; | 1379 | rbp = lbp; |
1381 | right = left; | 1380 | right = left; |
1382 | rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp)); | 1381 | rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp)); |
1383 | lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT); | 1382 | lbno = be32_to_cpu(right->bb_leftsib); |
1384 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 1383 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
1385 | cur->bc_private.a.agno, lbno, 0, &lbp, | 1384 | cur->bc_private.a.agno, lbno, 0, &lbp, |
1386 | XFS_ALLOC_BTREE_REF))) | 1385 | XFS_ALLOC_BTREE_REF))) |
@@ -1394,11 +1393,11 @@ xfs_alloc_newroot( | |||
1394 | /* | 1393 | /* |
1395 | * Fill in the new block's btree header and log it. | 1394 | * Fill in the new block's btree header and log it. |
1396 | */ | 1395 | */ |
1397 | INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); | 1396 | new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); |
1398 | INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels); | 1397 | new->bb_level = cpu_to_be16(cur->bc_nlevels); |
1399 | INT_SET(new->bb_numrecs, ARCH_CONVERT, 2); | 1398 | new->bb_numrecs = cpu_to_be16(2); |
1400 | INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); | 1399 | new->bb_leftsib = cpu_to_be32(NULLAGBLOCK); |
1401 | INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); | 1400 | new->bb_rightsib = cpu_to_be32(NULLAGBLOCK); |
1402 | xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS); | 1401 | xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS); |
1403 | ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); | 1402 | ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); |
1404 | /* | 1403 | /* |
@@ -1408,18 +1407,18 @@ xfs_alloc_newroot( | |||
1408 | xfs_alloc_key_t *kp; /* btree key pointer */ | 1407 | xfs_alloc_key_t *kp; /* btree key pointer */ |
1409 | 1408 | ||
1410 | kp = XFS_ALLOC_KEY_ADDR(new, 1, cur); | 1409 | kp = XFS_ALLOC_KEY_ADDR(new, 1, cur); |
1411 | if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) { | 1410 | if (be16_to_cpu(left->bb_level) > 0) { |
1412 | kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */ | 1411 | kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */ |
1413 | kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */ | 1412 | kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */ |
1414 | } else { | 1413 | } else { |
1415 | xfs_alloc_rec_t *rp; /* btree record pointer */ | 1414 | xfs_alloc_rec_t *rp; /* btree record pointer */ |
1416 | 1415 | ||
1417 | rp = XFS_ALLOC_REC_ADDR(left, 1, cur); | 1416 | rp = XFS_ALLOC_REC_ADDR(left, 1, cur); |
1418 | kp[0].ar_startblock = rp->ar_startblock; /* INT_: direct copy */ | 1417 | kp[0].ar_startblock = rp->ar_startblock; |
1419 | kp[0].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */ | 1418 | kp[0].ar_blockcount = rp->ar_blockcount; |
1420 | rp = XFS_ALLOC_REC_ADDR(right, 1, cur); | 1419 | rp = XFS_ALLOC_REC_ADDR(right, 1, cur); |
1421 | kp[1].ar_startblock = rp->ar_startblock; /* INT_: direct copy */ | 1420 | kp[1].ar_startblock = rp->ar_startblock; |
1422 | kp[1].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */ | 1421 | kp[1].ar_blockcount = rp->ar_blockcount; |
1423 | } | 1422 | } |
1424 | } | 1423 | } |
1425 | xfs_alloc_log_keys(cur, nbp, 1, 2); | 1424 | xfs_alloc_log_keys(cur, nbp, 1, 2); |
@@ -1430,8 +1429,8 @@ xfs_alloc_newroot( | |||
1430 | xfs_alloc_ptr_t *pp; /* btree address pointer */ | 1429 | xfs_alloc_ptr_t *pp; /* btree address pointer */ |
1431 | 1430 | ||
1432 | pp = XFS_ALLOC_PTR_ADDR(new, 1, cur); | 1431 | pp = XFS_ALLOC_PTR_ADDR(new, 1, cur); |
1433 | INT_SET(pp[0], ARCH_CONVERT, lbno); | 1432 | pp[0] = cpu_to_be32(lbno); |
1434 | INT_SET(pp[1], ARCH_CONVERT, rbno); | 1433 | pp[1] = cpu_to_be32(rbno); |
1435 | } | 1434 | } |
1436 | xfs_alloc_log_ptrs(cur, nbp, 1, 2); | 1435 | xfs_alloc_log_ptrs(cur, nbp, 1, 2); |
1437 | /* | 1436 | /* |
@@ -1476,7 +1475,7 @@ xfs_alloc_rshift( | |||
1476 | /* | 1475 | /* |
1477 | * If we've got no right sibling then we can't shift an entry right. | 1476 | * If we've got no right sibling then we can't shift an entry right. |
1478 | */ | 1477 | */ |
1479 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1478 | if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) { |
1480 | *stat = 0; | 1479 | *stat = 0; |
1481 | return 0; | 1480 | return 0; |
1482 | } | 1481 | } |
@@ -1484,7 +1483,7 @@ xfs_alloc_rshift( | |||
1484 | * If the cursor entry is the one that would be moved, don't | 1483 | * If the cursor entry is the one that would be moved, don't |
1485 | * do it... it's too complicated. | 1484 | * do it... it's too complicated. |
1486 | */ | 1485 | */ |
1487 | if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { | 1486 | if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) { |
1488 | *stat = 0; | 1487 | *stat = 0; |
1489 | return 0; | 1488 | return 0; |
1490 | } | 1489 | } |
@@ -1492,8 +1491,8 @@ xfs_alloc_rshift( | |||
1492 | * Set up the right neighbor as "right". | 1491 | * Set up the right neighbor as "right". |
1493 | */ | 1492 | */ |
1494 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1493 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1495 | cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp, | 1494 | cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), |
1496 | XFS_ALLOC_BTREE_REF))) | 1495 | 0, &rbp, XFS_ALLOC_BTREE_REF))) |
1497 | return error; | 1496 | return error; |
1498 | right = XFS_BUF_TO_ALLOC_BLOCK(rbp); | 1497 | right = XFS_BUF_TO_ALLOC_BLOCK(rbp); |
1499 | if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) | 1498 | if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) |
@@ -1501,7 +1500,7 @@ xfs_alloc_rshift( | |||
1501 | /* | 1500 | /* |
1502 | * If it's full, it can't take another entry. | 1501 | * If it's full, it can't take another entry. |
1503 | */ | 1502 | */ |
1504 | if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 1503 | if (be16_to_cpu(right->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
1505 | *stat = 0; | 1504 | *stat = 0; |
1506 | return 0; | 1505 | return 0; |
1507 | } | 1506 | } |
@@ -1514,47 +1513,47 @@ xfs_alloc_rshift( | |||
1514 | xfs_alloc_ptr_t *lpp; /* address pointer for left block */ | 1513 | xfs_alloc_ptr_t *lpp; /* address pointer for left block */ |
1515 | xfs_alloc_ptr_t *rpp; /* address pointer for right block */ | 1514 | xfs_alloc_ptr_t *rpp; /* address pointer for right block */ |
1516 | 1515 | ||
1517 | lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1516 | lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1518 | lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1517 | lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1519 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); | 1518 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); |
1520 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); | 1519 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); |
1521 | #ifdef DEBUG | 1520 | #ifdef DEBUG |
1522 | for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { | 1521 | for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) { |
1523 | if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) | 1522 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) |
1524 | return error; | 1523 | return error; |
1525 | } | 1524 | } |
1526 | #endif | 1525 | #endif |
1527 | memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); | 1526 | memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1528 | memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); | 1527 | memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1529 | #ifdef DEBUG | 1528 | #ifdef DEBUG |
1530 | if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) | 1529 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level))) |
1531 | return error; | 1530 | return error; |
1532 | #endif | 1531 | #endif |
1533 | *rkp = *lkp; /* INT_: copy */ | 1532 | *rkp = *lkp; /* INT_: copy */ |
1534 | *rpp = *lpp; /* INT_: copy */ | 1533 | *rpp = *lpp; /* INT_: copy */ |
1535 | xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1534 | xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1536 | xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1535 | xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1537 | xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); | 1536 | xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); |
1538 | } else { | 1537 | } else { |
1539 | xfs_alloc_rec_t *lrp; /* record pointer for left block */ | 1538 | xfs_alloc_rec_t *lrp; /* record pointer for left block */ |
1540 | xfs_alloc_rec_t *rrp; /* record pointer for right block */ | 1539 | xfs_alloc_rec_t *rrp; /* record pointer for right block */ |
1541 | 1540 | ||
1542 | lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1541 | lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1543 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); | 1542 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); |
1544 | memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1543 | memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1545 | *rrp = *lrp; | 1544 | *rrp = *lrp; |
1546 | xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1545 | xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1547 | key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ | 1546 | key.ar_startblock = rrp->ar_startblock; |
1548 | key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ | 1547 | key.ar_blockcount = rrp->ar_blockcount; |
1549 | rkp = &key; | 1548 | rkp = &key; |
1550 | xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); | 1549 | xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); |
1551 | } | 1550 | } |
1552 | /* | 1551 | /* |
1553 | * Decrement and log left's numrecs, bump and log right's numrecs. | 1552 | * Decrement and log left's numrecs, bump and log right's numrecs. |
1554 | */ | 1553 | */ |
1555 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); | 1554 | be16_add(&left->bb_numrecs, -1); |
1556 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); | 1555 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); |
1557 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); | 1556 | be16_add(&right->bb_numrecs, 1); |
1558 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); | 1557 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); |
1559 | /* | 1558 | /* |
1560 | * Using a temporary cursor, update the parent key values of the | 1559 | * Using a temporary cursor, update the parent key values of the |
@@ -1627,17 +1626,17 @@ xfs_alloc_split( | |||
1627 | /* | 1626 | /* |
1628 | * Fill in the btree header for the new block. | 1627 | * Fill in the btree header for the new block. |
1629 | */ | 1628 | */ |
1630 | INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); | 1629 | right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); |
1631 | right->bb_level = left->bb_level; /* INT_: direct copy */ | 1630 | right->bb_level = left->bb_level; |
1632 | INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); | 1631 | right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); |
1633 | /* | 1632 | /* |
1634 | * Make sure that if there's an odd number of entries now, that | 1633 | * Make sure that if there's an odd number of entries now, that |
1635 | * each new block will have the same number of entries. | 1634 | * each new block will have the same number of entries. |
1636 | */ | 1635 | */ |
1637 | if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && | 1636 | if ((be16_to_cpu(left->bb_numrecs) & 1) && |
1638 | cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) | 1637 | cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) |
1639 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); | 1638 | be16_add(&right->bb_numrecs, 1); |
1640 | i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; | 1639 | i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; |
1641 | /* | 1640 | /* |
1642 | * For non-leaf blocks, copy keys and addresses over to the new block. | 1641 | * For non-leaf blocks, copy keys and addresses over to the new block. |
1643 | */ | 1642 | */ |
@@ -1652,15 +1651,15 @@ xfs_alloc_split( | |||
1652 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); | 1651 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); |
1653 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); | 1652 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); |
1654 | #ifdef DEBUG | 1653 | #ifdef DEBUG |
1655 | for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { | 1654 | for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { |
1656 | if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) | 1655 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) |
1657 | return error; | 1656 | return error; |
1658 | } | 1657 | } |
1659 | #endif | 1658 | #endif |
1660 | memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); /* INT_: copy */ | 1659 | memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1661 | memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); /* INT_: copy */ | 1660 | memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1662 | xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1661 | xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1663 | xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1662 | xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1664 | *keyp = *rkp; | 1663 | *keyp = *rkp; |
1665 | } | 1664 | } |
1666 | /* | 1665 | /* |
@@ -1672,38 +1671,38 @@ xfs_alloc_split( | |||
1672 | 1671 | ||
1673 | lrp = XFS_ALLOC_REC_ADDR(left, i, cur); | 1672 | lrp = XFS_ALLOC_REC_ADDR(left, i, cur); |
1674 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); | 1673 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); |
1675 | memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1674 | memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1676 | xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1675 | xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1677 | keyp->ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ | 1676 | keyp->ar_startblock = rrp->ar_startblock; |
1678 | keyp->ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ | 1677 | keyp->ar_blockcount = rrp->ar_blockcount; |
1679 | } | 1678 | } |
1680 | /* | 1679 | /* |
1681 | * Find the left block number by looking in the buffer. | 1680 | * Find the left block number by looking in the buffer. |
1682 | * Adjust numrecs, sibling pointers. | 1681 | * Adjust numrecs, sibling pointers. |
1683 | */ | 1682 | */ |
1684 | lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); | 1683 | lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); |
1685 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); | 1684 | be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); |
1686 | right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ | 1685 | right->bb_rightsib = left->bb_rightsib; |
1687 | INT_SET(left->bb_rightsib, ARCH_CONVERT, rbno); | 1686 | left->bb_rightsib = cpu_to_be32(rbno); |
1688 | INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); | 1687 | right->bb_leftsib = cpu_to_be32(lbno); |
1689 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS); | 1688 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS); |
1690 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); | 1689 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); |
1691 | /* | 1690 | /* |
1692 | * If there's a block to the new block's right, make that block | 1691 | * If there's a block to the new block's right, make that block |
1693 | * point back to right instead of to left. | 1692 | * point back to right instead of to left. |
1694 | */ | 1693 | */ |
1695 | if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 1694 | if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) { |
1696 | xfs_alloc_block_t *rrblock; /* rr btree block */ | 1695 | xfs_alloc_block_t *rrblock; /* rr btree block */ |
1697 | xfs_buf_t *rrbp; /* buffer for rrblock */ | 1696 | xfs_buf_t *rrbp; /* buffer for rrblock */ |
1698 | 1697 | ||
1699 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1698 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1700 | cur->bc_private.a.agno, INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, | 1699 | cur->bc_private.a.agno, be32_to_cpu(right->bb_rightsib), 0, |
1701 | &rrbp, XFS_ALLOC_BTREE_REF))) | 1700 | &rrbp, XFS_ALLOC_BTREE_REF))) |
1702 | return error; | 1701 | return error; |
1703 | rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); | 1702 | rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); |
1704 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) | 1703 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) |
1705 | return error; | 1704 | return error; |
1706 | INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, rbno); | 1705 | rrblock->bb_leftsib = cpu_to_be32(rbno); |
1707 | xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); | 1706 | xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); |
1708 | } | 1707 | } |
1709 | /* | 1708 | /* |
@@ -1711,9 +1710,9 @@ xfs_alloc_split( | |||
1711 | * If it's just pointing past the last entry in left, then we'll | 1710 | * If it's just pointing past the last entry in left, then we'll |
1712 | * insert there, so don't change anything in that case. | 1711 | * insert there, so don't change anything in that case. |
1713 | */ | 1712 | */ |
1714 | if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { | 1713 | if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) { |
1715 | xfs_btree_setbuf(cur, level, rbp); | 1714 | xfs_btree_setbuf(cur, level, rbp); |
1716 | cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); | 1715 | cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs); |
1717 | } | 1716 | } |
1718 | /* | 1717 | /* |
1719 | * If there are more levels, we'll need another cursor which refers to | 1718 | * If there are more levels, we'll need another cursor which refers to |
@@ -1811,7 +1810,7 @@ xfs_alloc_decrement( | |||
1811 | /* | 1810 | /* |
1812 | * If we just went off the left edge of the tree, return failure. | 1811 | * If we just went off the left edge of the tree, return failure. |
1813 | */ | 1812 | */ |
1814 | if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1813 | if (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK) { |
1815 | *stat = 0; | 1814 | *stat = 0; |
1816 | return 0; | 1815 | return 0; |
1817 | } | 1816 | } |
@@ -1840,7 +1839,7 @@ xfs_alloc_decrement( | |||
1840 | xfs_agblock_t agbno; /* block number of btree block */ | 1839 | xfs_agblock_t agbno; /* block number of btree block */ |
1841 | xfs_buf_t *bp; /* buffer pointer for block */ | 1840 | xfs_buf_t *bp; /* buffer pointer for block */ |
1842 | 1841 | ||
1843 | agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); | 1842 | agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); |
1844 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1843 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1845 | cur->bc_private.a.agno, agbno, 0, &bp, | 1844 | cur->bc_private.a.agno, agbno, 0, &bp, |
1846 | XFS_ALLOC_BTREE_REF))) | 1845 | XFS_ALLOC_BTREE_REF))) |
@@ -1850,7 +1849,7 @@ xfs_alloc_decrement( | |||
1850 | block = XFS_BUF_TO_ALLOC_BLOCK(bp); | 1849 | block = XFS_BUF_TO_ALLOC_BLOCK(bp); |
1851 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) | 1850 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) |
1852 | return error; | 1851 | return error; |
1853 | cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 1852 | cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs); |
1854 | } | 1853 | } |
1855 | *stat = 1; | 1854 | *stat = 1; |
1856 | return 0; | 1855 | return 0; |
@@ -1917,7 +1916,7 @@ xfs_alloc_get_rec( | |||
1917 | /* | 1916 | /* |
1918 | * Off the right end or left end, return failure. | 1917 | * Off the right end or left end, return failure. |
1919 | */ | 1918 | */ |
1920 | if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { | 1919 | if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) { |
1921 | *stat = 0; | 1920 | *stat = 0; |
1922 | return 0; | 1921 | return 0; |
1923 | } | 1922 | } |
@@ -1928,8 +1927,8 @@ xfs_alloc_get_rec( | |||
1928 | xfs_alloc_rec_t *rec; /* record data */ | 1927 | xfs_alloc_rec_t *rec; /* record data */ |
1929 | 1928 | ||
1930 | rec = XFS_ALLOC_REC_ADDR(block, ptr, cur); | 1929 | rec = XFS_ALLOC_REC_ADDR(block, ptr, cur); |
1931 | *bno = INT_GET(rec->ar_startblock, ARCH_CONVERT); | 1930 | *bno = be32_to_cpu(rec->ar_startblock); |
1932 | *len = INT_GET(rec->ar_blockcount, ARCH_CONVERT); | 1931 | *len = be32_to_cpu(rec->ar_blockcount); |
1933 | } | 1932 | } |
1934 | *stat = 1; | 1933 | *stat = 1; |
1935 | return 0; | 1934 | return 0; |
@@ -1968,14 +1967,14 @@ xfs_alloc_increment( | |||
1968 | * Increment the ptr at this level. If we're still in the block | 1967 | * Increment the ptr at this level. If we're still in the block |
1969 | * then we're done. | 1968 | * then we're done. |
1970 | */ | 1969 | */ |
1971 | if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 1970 | if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) { |
1972 | *stat = 1; | 1971 | *stat = 1; |
1973 | return 0; | 1972 | return 0; |
1974 | } | 1973 | } |
1975 | /* | 1974 | /* |
1976 | * If we just went off the right edge of the tree, return failure. | 1975 | * If we just went off the right edge of the tree, return failure. |
1977 | */ | 1976 | */ |
1978 | if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1977 | if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK) { |
1979 | *stat = 0; | 1978 | *stat = 0; |
1980 | return 0; | 1979 | return 0; |
1981 | } | 1980 | } |
@@ -1990,7 +1989,7 @@ xfs_alloc_increment( | |||
1990 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) | 1989 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) |
1991 | return error; | 1990 | return error; |
1992 | #endif | 1991 | #endif |
1993 | if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 1992 | if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) |
1994 | break; | 1993 | break; |
1995 | /* | 1994 | /* |
1996 | * Read-ahead the right block, we're going to read it | 1995 | * Read-ahead the right block, we're going to read it |
@@ -2010,7 +2009,7 @@ xfs_alloc_increment( | |||
2010 | lev > level; ) { | 2009 | lev > level; ) { |
2011 | xfs_agblock_t agbno; /* block number of btree block */ | 2010 | xfs_agblock_t agbno; /* block number of btree block */ |
2012 | 2011 | ||
2013 | agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); | 2012 | agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); |
2014 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 2013 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
2015 | cur->bc_private.a.agno, agbno, 0, &bp, | 2014 | cur->bc_private.a.agno, agbno, 0, &bp, |
2016 | XFS_ALLOC_BTREE_REF))) | 2015 | XFS_ALLOC_BTREE_REF))) |
@@ -2045,8 +2044,8 @@ xfs_alloc_insert( | |||
2045 | 2044 | ||
2046 | level = 0; | 2045 | level = 0; |
2047 | nbno = NULLAGBLOCK; | 2046 | nbno = NULLAGBLOCK; |
2048 | INT_SET(nrec.ar_startblock, ARCH_CONVERT, cur->bc_rec.a.ar_startblock); | 2047 | nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); |
2049 | INT_SET(nrec.ar_blockcount, ARCH_CONVERT, cur->bc_rec.a.ar_blockcount); | 2048 | nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); |
2050 | ncur = (xfs_btree_cur_t *)0; | 2049 | ncur = (xfs_btree_cur_t *)0; |
2051 | pcur = cur; | 2050 | pcur = cur; |
2052 | /* | 2051 | /* |
@@ -2167,8 +2166,8 @@ xfs_alloc_update( | |||
2167 | /* | 2166 | /* |
2168 | * Fill in the new contents and log them. | 2167 | * Fill in the new contents and log them. |
2169 | */ | 2168 | */ |
2170 | INT_SET(rp->ar_startblock, ARCH_CONVERT, bno); | 2169 | rp->ar_startblock = cpu_to_be32(bno); |
2171 | INT_SET(rp->ar_blockcount, ARCH_CONVERT, len); | 2170 | rp->ar_blockcount = cpu_to_be32(len); |
2172 | xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr); | 2171 | xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr); |
2173 | } | 2172 | } |
2174 | /* | 2173 | /* |
@@ -2177,15 +2176,15 @@ xfs_alloc_update( | |||
2177 | * extent in the a.g., which we cache in the a.g. freelist header. | 2176 | * extent in the a.g., which we cache in the a.g. freelist header. |
2178 | */ | 2177 | */ |
2179 | if (cur->bc_btnum == XFS_BTNUM_CNT && | 2178 | if (cur->bc_btnum == XFS_BTNUM_CNT && |
2180 | INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && | 2179 | be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && |
2181 | ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 2180 | ptr == be16_to_cpu(block->bb_numrecs)) { |
2182 | xfs_agf_t *agf; /* a.g. freespace header */ | 2181 | xfs_agf_t *agf; /* a.g. freespace header */ |
2183 | xfs_agnumber_t seqno; | 2182 | xfs_agnumber_t seqno; |
2184 | 2183 | ||
2185 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 2184 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
2186 | seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT); | 2185 | seqno = be32_to_cpu(agf->agf_seqno); |
2187 | cur->bc_mp->m_perag[seqno].pagf_longest = len; | 2186 | cur->bc_mp->m_perag[seqno].pagf_longest = len; |
2188 | INT_SET(agf->agf_longest, ARCH_CONVERT, len); | 2187 | agf->agf_longest = cpu_to_be32(len); |
2189 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 2188 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
2190 | XFS_AGF_LONGEST); | 2189 | XFS_AGF_LONGEST); |
2191 | } | 2190 | } |
@@ -2195,8 +2194,8 @@ xfs_alloc_update( | |||
2195 | if (ptr == 1) { | 2194 | if (ptr == 1) { |
2196 | xfs_alloc_key_t key; /* key containing [bno, len] */ | 2195 | xfs_alloc_key_t key; /* key containing [bno, len] */ |
2197 | 2196 | ||
2198 | INT_SET(key.ar_startblock, ARCH_CONVERT, bno); | 2197 | key.ar_startblock = cpu_to_be32(bno); |
2199 | INT_SET(key.ar_blockcount, ARCH_CONVERT, len); | 2198 | key.ar_blockcount = cpu_to_be32(len); |
2200 | if ((error = xfs_alloc_updkey(cur, &key, 1))) | 2199 | if ((error = xfs_alloc_updkey(cur, &key, 1))) |
2201 | return error; | 2200 | return error; |
2202 | } | 2201 | } |