diff options
author | Christoph Hellwig <hch@sgi.com> | 2005-11-01 23:11:25 -0500 |
---|---|---|
committer | Nathan Scott <nathans@sgi.com> | 2005-11-01 23:11:25 -0500 |
commit | 16259e7d952e26e949cc2c8c68b74f34b293935d (patch) | |
tree | a016791ecb67761236d32b9915efa9a92f6f3767 /fs | |
parent | e2ed81fbbb7c76e0a1b3e2f1b5a7414f4d66a559 (diff) |
[XFS] Endianess annotations for various allocator data structures
SGI-PV: 943272
SGI-Modid: xfs-linux:xfs-kern:201006a
Signed-off-by: Christoph Hellwig <hch@sgi.com>
Signed-off-by: Nathan Scott <nathans@sgi.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/xfs/xfs_ag.h | 58 | ||||
-rw-r--r-- | fs/xfs/xfs_alloc.c | 146 | ||||
-rw-r--r-- | fs/xfs/xfs_alloc_btree.c | 413 | ||||
-rw-r--r-- | fs/xfs/xfs_alloc_btree.h | 15 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap.c | 62 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap_btree.c | 230 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap_btree.h | 44 | ||||
-rw-r--r-- | fs/xfs/xfs_btree.c | 104 | ||||
-rw-r--r-- | fs/xfs/xfs_btree.h | 55 | ||||
-rw-r--r-- | fs/xfs/xfs_fsops.c | 94 | ||||
-rw-r--r-- | fs/xfs/xfs_ialloc.c | 74 | ||||
-rw-r--r-- | fs/xfs/xfs_ialloc_btree.c | 236 | ||||
-rw-r--r-- | fs/xfs/xfs_ialloc_btree.h | 8 | ||||
-rw-r--r-- | fs/xfs/xfs_inode.c | 22 | ||||
-rw-r--r-- | fs/xfs/xfs_itable.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_log_recover.c | 42 |
16 files changed, 794 insertions, 811 deletions
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index 8d055593fee4..a96e2ffce0cc 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h | |||
@@ -48,27 +48,26 @@ struct xfs_trans; | |||
48 | * are > 64k, our value cannot be confused for an EFS superblock's. | 48 | * are > 64k, our value cannot be confused for an EFS superblock's. |
49 | */ | 49 | */ |
50 | 50 | ||
51 | typedef struct xfs_agf | 51 | typedef struct xfs_agf { |
52 | { | ||
53 | /* | 52 | /* |
54 | * Common allocation group header information | 53 | * Common allocation group header information |
55 | */ | 54 | */ |
56 | __uint32_t agf_magicnum; /* magic number == XFS_AGF_MAGIC */ | 55 | __be32 agf_magicnum; /* magic number == XFS_AGF_MAGIC */ |
57 | __uint32_t agf_versionnum; /* header version == XFS_AGF_VERSION */ | 56 | __be32 agf_versionnum; /* header version == XFS_AGF_VERSION */ |
58 | xfs_agnumber_t agf_seqno; /* sequence # starting from 0 */ | 57 | __be32 agf_seqno; /* sequence # starting from 0 */ |
59 | xfs_agblock_t agf_length; /* size in blocks of a.g. */ | 58 | __be32 agf_length; /* size in blocks of a.g. */ |
60 | /* | 59 | /* |
61 | * Freespace information | 60 | * Freespace information |
62 | */ | 61 | */ |
63 | xfs_agblock_t agf_roots[XFS_BTNUM_AGF]; /* root blocks */ | 62 | __be32 agf_roots[XFS_BTNUM_AGF]; /* root blocks */ |
64 | __uint32_t agf_spare0; /* spare field */ | 63 | __be32 agf_spare0; /* spare field */ |
65 | __uint32_t agf_levels[XFS_BTNUM_AGF]; /* btree levels */ | 64 | __be32 agf_levels[XFS_BTNUM_AGF]; /* btree levels */ |
66 | __uint32_t agf_spare1; /* spare field */ | 65 | __be32 agf_spare1; /* spare field */ |
67 | __uint32_t agf_flfirst; /* first freelist block's index */ | 66 | __be32 agf_flfirst; /* first freelist block's index */ |
68 | __uint32_t agf_fllast; /* last freelist block's index */ | 67 | __be32 agf_fllast; /* last freelist block's index */ |
69 | __uint32_t agf_flcount; /* count of blocks in freelist */ | 68 | __be32 agf_flcount; /* count of blocks in freelist */ |
70 | xfs_extlen_t agf_freeblks; /* total free blocks */ | 69 | __be32 agf_freeblks; /* total free blocks */ |
71 | xfs_extlen_t agf_longest; /* longest free space */ | 70 | __be32 agf_longest; /* longest free space */ |
72 | } xfs_agf_t; | 71 | } xfs_agf_t; |
73 | 72 | ||
74 | #define XFS_AGF_MAGICNUM 0x00000001 | 73 | #define XFS_AGF_MAGICNUM 0x00000001 |
@@ -96,31 +95,30 @@ typedef struct xfs_agf | |||
96 | */ | 95 | */ |
97 | #define XFS_AGI_UNLINKED_BUCKETS 64 | 96 | #define XFS_AGI_UNLINKED_BUCKETS 64 |
98 | 97 | ||
99 | typedef struct xfs_agi | 98 | typedef struct xfs_agi { |
100 | { | ||
101 | /* | 99 | /* |
102 | * Common allocation group header information | 100 | * Common allocation group header information |
103 | */ | 101 | */ |
104 | __uint32_t agi_magicnum; /* magic number == XFS_AGI_MAGIC */ | 102 | __be32 agi_magicnum; /* magic number == XFS_AGI_MAGIC */ |
105 | __uint32_t agi_versionnum; /* header version == XFS_AGI_VERSION */ | 103 | __be32 agi_versionnum; /* header version == XFS_AGI_VERSION */ |
106 | xfs_agnumber_t agi_seqno; /* sequence # starting from 0 */ | 104 | __be32 agi_seqno; /* sequence # starting from 0 */ |
107 | xfs_agblock_t agi_length; /* size in blocks of a.g. */ | 105 | __be32 agi_length; /* size in blocks of a.g. */ |
108 | /* | 106 | /* |
109 | * Inode information | 107 | * Inode information |
110 | * Inodes are mapped by interpreting the inode number, so no | 108 | * Inodes are mapped by interpreting the inode number, so no |
111 | * mapping data is needed here. | 109 | * mapping data is needed here. |
112 | */ | 110 | */ |
113 | xfs_agino_t agi_count; /* count of allocated inodes */ | 111 | __be32 agi_count; /* count of allocated inodes */ |
114 | xfs_agblock_t agi_root; /* root of inode btree */ | 112 | __be32 agi_root; /* root of inode btree */ |
115 | __uint32_t agi_level; /* levels in inode btree */ | 113 | __be32 agi_level; /* levels in inode btree */ |
116 | xfs_agino_t agi_freecount; /* number of free inodes */ | 114 | __be32 agi_freecount; /* number of free inodes */ |
117 | xfs_agino_t agi_newino; /* new inode just allocated */ | 115 | __be32 agi_newino; /* new inode just allocated */ |
118 | xfs_agino_t agi_dirino; /* last directory inode chunk */ | 116 | __be32 agi_dirino; /* last directory inode chunk */ |
119 | /* | 117 | /* |
120 | * Hash table of inodes which have been unlinked but are | 118 | * Hash table of inodes which have been unlinked but are |
121 | * still being referenced. | 119 | * still being referenced. |
122 | */ | 120 | */ |
123 | xfs_agino_t agi_unlinked[XFS_AGI_UNLINKED_BUCKETS]; | 121 | __be32 agi_unlinked[XFS_AGI_UNLINKED_BUCKETS]; |
124 | } xfs_agi_t; | 122 | } xfs_agi_t; |
125 | 123 | ||
126 | #define XFS_AGI_MAGICNUM 0x00000001 | 124 | #define XFS_AGI_MAGICNUM 0x00000001 |
@@ -201,8 +199,8 @@ typedef struct xfs_perag | |||
201 | (MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp))) | 199 | (MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp))) |
202 | #define XFS_MIN_FREELIST(a,mp) \ | 200 | #define XFS_MIN_FREELIST(a,mp) \ |
203 | (XFS_MIN_FREELIST_RAW( \ | 201 | (XFS_MIN_FREELIST_RAW( \ |
204 | INT_GET((a)->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT), \ | 202 | be32_to_cpu((a)->agf_levels[XFS_BTNUM_BNOi]), \ |
205 | INT_GET((a)->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT), mp)) | 203 | be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp)) |
206 | #define XFS_MIN_FREELIST_PAG(pag,mp) \ | 204 | #define XFS_MIN_FREELIST_PAG(pag,mp) \ |
207 | (XFS_MIN_FREELIST_RAW( \ | 205 | (XFS_MIN_FREELIST_RAW( \ |
208 | (uint_t)(pag)->pagf_levels[XFS_BTNUM_BNOi], \ | 206 | (uint_t)(pag)->pagf_levels[XFS_BTNUM_BNOi], \ |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index a439e07253a5..f4328e1e2a74 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -231,8 +231,8 @@ xfs_alloc_fix_minleft( | |||
231 | if (args->minleft == 0) | 231 | if (args->minleft == 0) |
232 | return 1; | 232 | return 1; |
233 | agf = XFS_BUF_TO_AGF(args->agbp); | 233 | agf = XFS_BUF_TO_AGF(args->agbp); |
234 | diff = INT_GET(agf->agf_freeblks, ARCH_CONVERT) | 234 | diff = be32_to_cpu(agf->agf_freeblks) |
235 | + INT_GET(agf->agf_flcount, ARCH_CONVERT) | 235 | + be32_to_cpu(agf->agf_flcount) |
236 | - args->len - args->minleft; | 236 | - args->len - args->minleft; |
237 | if (diff >= 0) | 237 | if (diff >= 0) |
238 | return 1; | 238 | return 1; |
@@ -307,7 +307,8 @@ xfs_alloc_fixup_trees( | |||
307 | bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]); | 307 | bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]); |
308 | cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]); | 308 | cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]); |
309 | XFS_WANT_CORRUPTED_RETURN( | 309 | XFS_WANT_CORRUPTED_RETURN( |
310 | INT_GET(bnoblock->bb_numrecs, ARCH_CONVERT) == INT_GET(cntblock->bb_numrecs, ARCH_CONVERT)); | 310 | be16_to_cpu(bnoblock->bb_numrecs) == |
311 | be16_to_cpu(cntblock->bb_numrecs)); | ||
311 | } | 312 | } |
312 | } | 313 | } |
313 | #endif | 314 | #endif |
@@ -493,21 +494,17 @@ xfs_alloc_trace_modagf( | |||
493 | (void *)str, | 494 | (void *)str, |
494 | (void *)mp, | 495 | (void *)mp, |
495 | (void *)(__psint_t)flags, | 496 | (void *)(__psint_t)flags, |
496 | (void *)(__psunsigned_t)INT_GET(agf->agf_seqno, ARCH_CONVERT), | 497 | (void *)(__psunsigned_t)be32_to_cpu(agf->agf_seqno), |
497 | (void *)(__psunsigned_t)INT_GET(agf->agf_length, ARCH_CONVERT), | 498 | (void *)(__psunsigned_t)be32_to_cpu(agf->agf_length), |
498 | (void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_BNO], | 499 | (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]), |
499 | ARCH_CONVERT), | 500 | (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]), |
500 | (void *)(__psunsigned_t)INT_GET(agf->agf_roots[XFS_BTNUM_CNT], | 501 | (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]), |
501 | ARCH_CONVERT), | 502 | (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]), |
502 | (void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_BNO], | 503 | (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flfirst), |
503 | ARCH_CONVERT), | 504 | (void *)(__psunsigned_t)be32_to_cpu(agf->agf_fllast), |
504 | (void *)(__psunsigned_t)INT_GET(agf->agf_levels[XFS_BTNUM_CNT], | 505 | (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flcount), |
505 | ARCH_CONVERT), | 506 | (void *)(__psunsigned_t)be32_to_cpu(agf->agf_freeblks), |
506 | (void *)(__psunsigned_t)INT_GET(agf->agf_flfirst, ARCH_CONVERT), | 507 | (void *)(__psunsigned_t)be32_to_cpu(agf->agf_longest)); |
507 | (void *)(__psunsigned_t)INT_GET(agf->agf_fllast, ARCH_CONVERT), | ||
508 | (void *)(__psunsigned_t)INT_GET(agf->agf_flcount, ARCH_CONVERT), | ||
509 | (void *)(__psunsigned_t)INT_GET(agf->agf_freeblks, ARCH_CONVERT), | ||
510 | (void *)(__psunsigned_t)INT_GET(agf->agf_longest, ARCH_CONVERT)); | ||
511 | } | 508 | } |
512 | 509 | ||
513 | STATIC void | 510 | STATIC void |
@@ -600,12 +597,12 @@ xfs_alloc_ag_vextent( | |||
600 | if (!(args->wasfromfl)) { | 597 | if (!(args->wasfromfl)) { |
601 | 598 | ||
602 | agf = XFS_BUF_TO_AGF(args->agbp); | 599 | agf = XFS_BUF_TO_AGF(args->agbp); |
603 | INT_MOD(agf->agf_freeblks, ARCH_CONVERT, -(args->len)); | 600 | be32_add(&agf->agf_freeblks, -(args->len)); |
604 | xfs_trans_agblocks_delta(args->tp, | 601 | xfs_trans_agblocks_delta(args->tp, |
605 | -((long)(args->len))); | 602 | -((long)(args->len))); |
606 | args->pag->pagf_freeblks -= args->len; | 603 | args->pag->pagf_freeblks -= args->len; |
607 | ASSERT(INT_GET(agf->agf_freeblks, ARCH_CONVERT) | 604 | ASSERT(be32_to_cpu(agf->agf_freeblks) <= |
608 | <= INT_GET(agf->agf_length, ARCH_CONVERT)); | 605 | be32_to_cpu(agf->agf_length)); |
609 | TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS); | 606 | TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS); |
610 | xfs_alloc_log_agf(args->tp, args->agbp, | 607 | xfs_alloc_log_agf(args->tp, args->agbp, |
611 | XFS_AGF_FREEBLKS); | 608 | XFS_AGF_FREEBLKS); |
@@ -711,8 +708,7 @@ xfs_alloc_ag_vextent_exact( | |||
711 | cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, | 708 | cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, |
712 | args->agno, XFS_BTNUM_CNT, NULL, 0); | 709 | args->agno, XFS_BTNUM_CNT, NULL, 0); |
713 | ASSERT(args->agbno + args->len <= | 710 | ASSERT(args->agbno + args->len <= |
714 | INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, | 711 | be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); |
715 | ARCH_CONVERT)); | ||
716 | if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, | 712 | if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, |
717 | args->agbno, args->len, XFSA_FIXUP_BNO_OK))) { | 713 | args->agbno, args->len, XFSA_FIXUP_BNO_OK))) { |
718 | xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); | 714 | xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); |
@@ -885,8 +881,7 @@ xfs_alloc_ag_vextent_near( | |||
885 | goto error0; | 881 | goto error0; |
886 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | 882 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); |
887 | ltend = ltbno + ltlen; | 883 | ltend = ltbno + ltlen; |
888 | ASSERT(ltend <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, | 884 | ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); |
889 | ARCH_CONVERT)); | ||
890 | args->len = blen; | 885 | args->len = blen; |
891 | if (!xfs_alloc_fix_minleft(args)) { | 886 | if (!xfs_alloc_fix_minleft(args)) { |
892 | xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); | 887 | xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); |
@@ -1241,8 +1236,7 @@ xfs_alloc_ag_vextent_near( | |||
1241 | ltlen, <new); | 1236 | ltlen, <new); |
1242 | ASSERT(ltnew >= ltbno); | 1237 | ASSERT(ltnew >= ltbno); |
1243 | ASSERT(ltnew + rlen <= ltend); | 1238 | ASSERT(ltnew + rlen <= ltend); |
1244 | ASSERT(ltnew + rlen <= INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, | 1239 | ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); |
1245 | ARCH_CONVERT)); | ||
1246 | args->agbno = ltnew; | 1240 | args->agbno = ltnew; |
1247 | if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, | 1241 | if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, |
1248 | ltnew, rlen, XFSA_FIXUP_BNO_OK))) | 1242 | ltnew, rlen, XFSA_FIXUP_BNO_OK))) |
@@ -1405,8 +1399,7 @@ xfs_alloc_ag_vextent_size( | |||
1405 | args->agbno = rbno; | 1399 | args->agbno = rbno; |
1406 | XFS_WANT_CORRUPTED_GOTO( | 1400 | XFS_WANT_CORRUPTED_GOTO( |
1407 | args->agbno + args->len <= | 1401 | args->agbno + args->len <= |
1408 | INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, | 1402 | be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), |
1409 | ARCH_CONVERT), | ||
1410 | error0); | 1403 | error0); |
1411 | TRACE_ALLOC("normal", args); | 1404 | TRACE_ALLOC("normal", args); |
1412 | return 0; | 1405 | return 0; |
@@ -1454,8 +1447,8 @@ xfs_alloc_ag_vextent_small( | |||
1454 | * freelist. | 1447 | * freelist. |
1455 | */ | 1448 | */ |
1456 | else if (args->minlen == 1 && args->alignment == 1 && !args->isfl && | 1449 | else if (args->minlen == 1 && args->alignment == 1 && !args->isfl && |
1457 | (INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_flcount, | 1450 | (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount) |
1458 | ARCH_CONVERT) > args->minleft)) { | 1451 | > args->minleft)) { |
1459 | if ((error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno))) | 1452 | if ((error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno))) |
1460 | goto error0; | 1453 | goto error0; |
1461 | if (fbno != NULLAGBLOCK) { | 1454 | if (fbno != NULLAGBLOCK) { |
@@ -1470,8 +1463,7 @@ xfs_alloc_ag_vextent_small( | |||
1470 | args->agbno = fbno; | 1463 | args->agbno = fbno; |
1471 | XFS_WANT_CORRUPTED_GOTO( | 1464 | XFS_WANT_CORRUPTED_GOTO( |
1472 | args->agbno + args->len <= | 1465 | args->agbno + args->len <= |
1473 | INT_GET(XFS_BUF_TO_AGF(args->agbp)->agf_length, | 1466 | be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), |
1474 | ARCH_CONVERT), | ||
1475 | error0); | 1467 | error0); |
1476 | args->wasfromfl = 1; | 1468 | args->wasfromfl = 1; |
1477 | TRACE_ALLOC("freelist", args); | 1469 | TRACE_ALLOC("freelist", args); |
@@ -1745,12 +1737,12 @@ xfs_free_ag_extent( | |||
1745 | 1737 | ||
1746 | agf = XFS_BUF_TO_AGF(agbp); | 1738 | agf = XFS_BUF_TO_AGF(agbp); |
1747 | pag = &mp->m_perag[agno]; | 1739 | pag = &mp->m_perag[agno]; |
1748 | INT_MOD(agf->agf_freeblks, ARCH_CONVERT, len); | 1740 | be32_add(&agf->agf_freeblks, len); |
1749 | xfs_trans_agblocks_delta(tp, len); | 1741 | xfs_trans_agblocks_delta(tp, len); |
1750 | pag->pagf_freeblks += len; | 1742 | pag->pagf_freeblks += len; |
1751 | XFS_WANT_CORRUPTED_GOTO( | 1743 | XFS_WANT_CORRUPTED_GOTO( |
1752 | INT_GET(agf->agf_freeblks, ARCH_CONVERT) | 1744 | be32_to_cpu(agf->agf_freeblks) <= |
1753 | <= INT_GET(agf->agf_length, ARCH_CONVERT), | 1745 | be32_to_cpu(agf->agf_length), |
1754 | error0); | 1746 | error0); |
1755 | TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS); | 1747 | TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS); |
1756 | xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); | 1748 | xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); |
@@ -1897,18 +1889,18 @@ xfs_alloc_fix_freelist( | |||
1897 | */ | 1889 | */ |
1898 | agf = XFS_BUF_TO_AGF(agbp); | 1890 | agf = XFS_BUF_TO_AGF(agbp); |
1899 | need = XFS_MIN_FREELIST(agf, mp); | 1891 | need = XFS_MIN_FREELIST(agf, mp); |
1900 | delta = need > INT_GET(agf->agf_flcount, ARCH_CONVERT) ? | 1892 | delta = need > be32_to_cpu(agf->agf_flcount) ? |
1901 | (need - INT_GET(agf->agf_flcount, ARCH_CONVERT)) : 0; | 1893 | (need - be32_to_cpu(agf->agf_flcount)) : 0; |
1902 | /* | 1894 | /* |
1903 | * If there isn't enough total or single-extent, reject it. | 1895 | * If there isn't enough total or single-extent, reject it. |
1904 | */ | 1896 | */ |
1905 | longest = INT_GET(agf->agf_longest, ARCH_CONVERT); | 1897 | longest = be32_to_cpu(agf->agf_longest); |
1906 | longest = (longest > delta) ? (longest - delta) : | 1898 | longest = (longest > delta) ? (longest - delta) : |
1907 | (INT_GET(agf->agf_flcount, ARCH_CONVERT) > 0 || longest > 0); | 1899 | (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); |
1908 | if (args->minlen + args->alignment + args->minalignslop - 1 > longest || | 1900 | if (args->minlen + args->alignment + args->minalignslop - 1 > longest || |
1909 | (args->minleft && | 1901 | (args->minleft && |
1910 | (int)(INT_GET(agf->agf_freeblks, ARCH_CONVERT) + | 1902 | (int)(be32_to_cpu(agf->agf_freeblks) + |
1911 | INT_GET(agf->agf_flcount, ARCH_CONVERT) - need - args->total) < | 1903 | be32_to_cpu(agf->agf_flcount) - need - args->total) < |
1912 | (int)args->minleft)) { | 1904 | (int)args->minleft)) { |
1913 | xfs_trans_brelse(tp, agbp); | 1905 | xfs_trans_brelse(tp, agbp); |
1914 | args->agbp = NULL; | 1906 | args->agbp = NULL; |
@@ -1917,7 +1909,7 @@ xfs_alloc_fix_freelist( | |||
1917 | /* | 1909 | /* |
1918 | * Make the freelist shorter if it's too long. | 1910 | * Make the freelist shorter if it's too long. |
1919 | */ | 1911 | */ |
1920 | while (INT_GET(agf->agf_flcount, ARCH_CONVERT) > need) { | 1912 | while (be32_to_cpu(agf->agf_flcount) > need) { |
1921 | xfs_buf_t *bp; | 1913 | xfs_buf_t *bp; |
1922 | 1914 | ||
1923 | if ((error = xfs_alloc_get_freelist(tp, agbp, &bno))) | 1915 | if ((error = xfs_alloc_get_freelist(tp, agbp, &bno))) |
@@ -1944,9 +1936,9 @@ xfs_alloc_fix_freelist( | |||
1944 | /* | 1936 | /* |
1945 | * Make the freelist longer if it's too short. | 1937 | * Make the freelist longer if it's too short. |
1946 | */ | 1938 | */ |
1947 | while (INT_GET(agf->agf_flcount, ARCH_CONVERT) < need) { | 1939 | while (be32_to_cpu(agf->agf_flcount) < need) { |
1948 | targs.agbno = 0; | 1940 | targs.agbno = 0; |
1949 | targs.maxlen = need - INT_GET(agf->agf_flcount, ARCH_CONVERT); | 1941 | targs.maxlen = need - be32_to_cpu(agf->agf_flcount); |
1950 | /* | 1942 | /* |
1951 | * Allocate as many blocks as possible at once. | 1943 | * Allocate as many blocks as possible at once. |
1952 | */ | 1944 | */ |
@@ -2006,19 +1998,19 @@ xfs_alloc_get_freelist( | |||
2006 | */ | 1998 | */ |
2007 | mp = tp->t_mountp; | 1999 | mp = tp->t_mountp; |
2008 | if ((error = xfs_alloc_read_agfl(mp, tp, | 2000 | if ((error = xfs_alloc_read_agfl(mp, tp, |
2009 | INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp))) | 2001 | be32_to_cpu(agf->agf_seqno), &agflbp))) |
2010 | return error; | 2002 | return error; |
2011 | agfl = XFS_BUF_TO_AGFL(agflbp); | 2003 | agfl = XFS_BUF_TO_AGFL(agflbp); |
2012 | /* | 2004 | /* |
2013 | * Get the block number and update the data structures. | 2005 | * Get the block number and update the data structures. |
2014 | */ | 2006 | */ |
2015 | bno = INT_GET(agfl->agfl_bno[INT_GET(agf->agf_flfirst, ARCH_CONVERT)], ARCH_CONVERT); | 2007 | bno = INT_GET(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)], ARCH_CONVERT); |
2016 | INT_MOD(agf->agf_flfirst, ARCH_CONVERT, 1); | 2008 | be32_add(&agf->agf_flfirst, 1); |
2017 | xfs_trans_brelse(tp, agflbp); | 2009 | xfs_trans_brelse(tp, agflbp); |
2018 | if (INT_GET(agf->agf_flfirst, ARCH_CONVERT) == XFS_AGFL_SIZE(mp)) | 2010 | if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp)) |
2019 | agf->agf_flfirst = 0; | 2011 | agf->agf_flfirst = 0; |
2020 | pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)]; | 2012 | pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)]; |
2021 | INT_MOD(agf->agf_flcount, ARCH_CONVERT, -1); | 2013 | be32_add(&agf->agf_flcount, -1); |
2022 | xfs_trans_agflist_delta(tp, -1); | 2014 | xfs_trans_agflist_delta(tp, -1); |
2023 | pag->pagf_flcount--; | 2015 | pag->pagf_flcount--; |
2024 | TRACE_MODAGF(NULL, agf, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT); | 2016 | TRACE_MODAGF(NULL, agf, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT); |
@@ -2033,7 +2025,7 @@ xfs_alloc_get_freelist( | |||
2033 | * the freeing transaction must be pushed to disk NOW by forcing | 2025 | * the freeing transaction must be pushed to disk NOW by forcing |
2034 | * to disk all iclogs up that transaction's LSN. | 2026 | * to disk all iclogs up that transaction's LSN. |
2035 | */ | 2027 | */ |
2036 | xfs_alloc_search_busy(tp, INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); | 2028 | xfs_alloc_search_busy(tp, be32_to_cpu(agf->agf_seqno), bno, 1); |
2037 | return 0; | 2029 | return 0; |
2038 | } | 2030 | } |
2039 | 2031 | ||
@@ -2111,18 +2103,18 @@ xfs_alloc_put_freelist( | |||
2111 | mp = tp->t_mountp; | 2103 | mp = tp->t_mountp; |
2112 | 2104 | ||
2113 | if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp, | 2105 | if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp, |
2114 | INT_GET(agf->agf_seqno, ARCH_CONVERT), &agflbp))) | 2106 | be32_to_cpu(agf->agf_seqno), &agflbp))) |
2115 | return error; | 2107 | return error; |
2116 | agfl = XFS_BUF_TO_AGFL(agflbp); | 2108 | agfl = XFS_BUF_TO_AGFL(agflbp); |
2117 | INT_MOD(agf->agf_fllast, ARCH_CONVERT, 1); | 2109 | be32_add(&agf->agf_fllast, 1); |
2118 | if (INT_GET(agf->agf_fllast, ARCH_CONVERT) == XFS_AGFL_SIZE(mp)) | 2110 | if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp)) |
2119 | agf->agf_fllast = 0; | 2111 | agf->agf_fllast = 0; |
2120 | pag = &mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)]; | 2112 | pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)]; |
2121 | INT_MOD(agf->agf_flcount, ARCH_CONVERT, 1); | 2113 | be32_add(&agf->agf_flcount, 1); |
2122 | xfs_trans_agflist_delta(tp, 1); | 2114 | xfs_trans_agflist_delta(tp, 1); |
2123 | pag->pagf_flcount++; | 2115 | pag->pagf_flcount++; |
2124 | ASSERT(INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp)); | 2116 | ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); |
2125 | blockp = &agfl->agfl_bno[INT_GET(agf->agf_fllast, ARCH_CONVERT)]; | 2117 | blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)]; |
2126 | INT_SET(*blockp, ARCH_CONVERT, bno); | 2118 | INT_SET(*blockp, ARCH_CONVERT, bno); |
2127 | TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); | 2119 | TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); |
2128 | xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); | 2120 | xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); |
@@ -2169,14 +2161,12 @@ xfs_alloc_read_agf( | |||
2169 | */ | 2161 | */ |
2170 | agf = XFS_BUF_TO_AGF(bp); | 2162 | agf = XFS_BUF_TO_AGF(bp); |
2171 | agf_ok = | 2163 | agf_ok = |
2172 | INT_GET(agf->agf_magicnum, ARCH_CONVERT) == XFS_AGF_MAGIC && | 2164 | be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC && |
2173 | XFS_AGF_GOOD_VERSION( | 2165 | XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) && |
2174 | INT_GET(agf->agf_versionnum, ARCH_CONVERT)) && | 2166 | be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) && |
2175 | INT_GET(agf->agf_freeblks, ARCH_CONVERT) <= | 2167 | be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && |
2176 | INT_GET(agf->agf_length, ARCH_CONVERT) && | 2168 | be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) && |
2177 | INT_GET(agf->agf_flfirst, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) && | 2169 | be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp); |
2178 | INT_GET(agf->agf_fllast, ARCH_CONVERT) < XFS_AGFL_SIZE(mp) && | ||
2179 | INT_GET(agf->agf_flcount, ARCH_CONVERT) <= XFS_AGFL_SIZE(mp); | ||
2180 | if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF, | 2170 | if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF, |
2181 | XFS_RANDOM_ALLOC_READ_AGF))) { | 2171 | XFS_RANDOM_ALLOC_READ_AGF))) { |
2182 | XFS_CORRUPTION_ERROR("xfs_alloc_read_agf", | 2172 | XFS_CORRUPTION_ERROR("xfs_alloc_read_agf", |
@@ -2186,13 +2176,13 @@ xfs_alloc_read_agf( | |||
2186 | } | 2176 | } |
2187 | pag = &mp->m_perag[agno]; | 2177 | pag = &mp->m_perag[agno]; |
2188 | if (!pag->pagf_init) { | 2178 | if (!pag->pagf_init) { |
2189 | pag->pagf_freeblks = INT_GET(agf->agf_freeblks, ARCH_CONVERT); | 2179 | pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); |
2190 | pag->pagf_flcount = INT_GET(agf->agf_flcount, ARCH_CONVERT); | 2180 | pag->pagf_flcount = be32_to_cpu(agf->agf_flcount); |
2191 | pag->pagf_longest = INT_GET(agf->agf_longest, ARCH_CONVERT); | 2181 | pag->pagf_longest = be32_to_cpu(agf->agf_longest); |
2192 | pag->pagf_levels[XFS_BTNUM_BNOi] = | 2182 | pag->pagf_levels[XFS_BTNUM_BNOi] = |
2193 | INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT); | 2183 | be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]); |
2194 | pag->pagf_levels[XFS_BTNUM_CNTi] = | 2184 | pag->pagf_levels[XFS_BTNUM_CNTi] = |
2195 | INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT); | 2185 | be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]); |
2196 | spinlock_init(&pag->pagb_lock, "xfspagb"); | 2186 | spinlock_init(&pag->pagb_lock, "xfspagb"); |
2197 | pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS * | 2187 | pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS * |
2198 | sizeof(xfs_perag_busy_t), KM_SLEEP); | 2188 | sizeof(xfs_perag_busy_t), KM_SLEEP); |
@@ -2200,13 +2190,13 @@ xfs_alloc_read_agf( | |||
2200 | } | 2190 | } |
2201 | #ifdef DEBUG | 2191 | #ifdef DEBUG |
2202 | else if (!XFS_FORCED_SHUTDOWN(mp)) { | 2192 | else if (!XFS_FORCED_SHUTDOWN(mp)) { |
2203 | ASSERT(pag->pagf_freeblks == INT_GET(agf->agf_freeblks, ARCH_CONVERT)); | 2193 | ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks)); |
2204 | ASSERT(pag->pagf_flcount == INT_GET(agf->agf_flcount, ARCH_CONVERT)); | 2194 | ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount)); |
2205 | ASSERT(pag->pagf_longest == INT_GET(agf->agf_longest, ARCH_CONVERT)); | 2195 | ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest)); |
2206 | ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] == | 2196 | ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] == |
2207 | INT_GET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT)); | 2197 | be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi])); |
2208 | ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] == | 2198 | ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] == |
2209 | INT_GET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT)); | 2199 | be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi])); |
2210 | } | 2200 | } |
2211 | #endif | 2201 | #endif |
2212 | XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGF, XFS_AGF_REF); | 2202 | XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGF, XFS_AGF_REF); |
@@ -2455,7 +2445,7 @@ xfs_free_extent( | |||
2455 | #ifdef DEBUG | 2445 | #ifdef DEBUG |
2456 | ASSERT(args.agbp != NULL); | 2446 | ASSERT(args.agbp != NULL); |
2457 | agf = XFS_BUF_TO_AGF(args.agbp); | 2447 | agf = XFS_BUF_TO_AGF(args.agbp); |
2458 | ASSERT(args.agbno + len <= INT_GET(agf->agf_length, ARCH_CONVERT)); | 2448 | ASSERT(args.agbno + len <= be32_to_cpu(agf->agf_length)); |
2459 | #endif | 2449 | #endif |
2460 | error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, | 2450 | error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, |
2461 | len, 0); | 2451 | len, 0); |
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 7ecc8c0611d1..a1d92da86ccd 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c | |||
@@ -117,7 +117,7 @@ xfs_alloc_delrec( | |||
117 | /* | 117 | /* |
118 | * Fail if we're off the end of the block. | 118 | * Fail if we're off the end of the block. |
119 | */ | 119 | */ |
120 | if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 120 | if (ptr > be16_to_cpu(block->bb_numrecs)) { |
121 | *stat = 0; | 121 | *stat = 0; |
122 | return 0; | 122 | return 0; |
123 | } | 123 | } |
@@ -131,18 +131,18 @@ xfs_alloc_delrec( | |||
131 | lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur); | 131 | lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur); |
132 | lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur); | 132 | lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur); |
133 | #ifdef DEBUG | 133 | #ifdef DEBUG |
134 | for (i = ptr; i < INT_GET(block->bb_numrecs, ARCH_CONVERT); i++) { | 134 | for (i = ptr; i < be16_to_cpu(block->bb_numrecs); i++) { |
135 | if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) | 135 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) |
136 | return error; | 136 | return error; |
137 | } | 137 | } |
138 | #endif | 138 | #endif |
139 | if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 139 | if (ptr < be16_to_cpu(block->bb_numrecs)) { |
140 | memmove(&lkp[ptr - 1], &lkp[ptr], | 140 | memmove(&lkp[ptr - 1], &lkp[ptr], |
141 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lkp)); /* INT_: mem copy */ | 141 | (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lkp)); |
142 | memmove(&lpp[ptr - 1], &lpp[ptr], | 142 | memmove(&lpp[ptr - 1], &lpp[ptr], |
143 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lpp)); /* INT_: mem copy */ | 143 | (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lpp)); |
144 | xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); | 144 | xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); |
145 | xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); | 145 | xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); |
146 | } | 146 | } |
147 | } | 147 | } |
148 | /* | 148 | /* |
@@ -151,25 +151,25 @@ xfs_alloc_delrec( | |||
151 | */ | 151 | */ |
152 | else { | 152 | else { |
153 | lrp = XFS_ALLOC_REC_ADDR(block, 1, cur); | 153 | lrp = XFS_ALLOC_REC_ADDR(block, 1, cur); |
154 | if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 154 | if (ptr < be16_to_cpu(block->bb_numrecs)) { |
155 | memmove(&lrp[ptr - 1], &lrp[ptr], | 155 | memmove(&lrp[ptr - 1], &lrp[ptr], |
156 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr) * sizeof(*lrp)); | 156 | (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lrp)); |
157 | xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1); | 157 | xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); |
158 | } | 158 | } |
159 | /* | 159 | /* |
160 | * If it's the first record in the block, we'll need a key | 160 | * If it's the first record in the block, we'll need a key |
161 | * structure to pass up to the next level (updkey). | 161 | * structure to pass up to the next level (updkey). |
162 | */ | 162 | */ |
163 | if (ptr == 1) { | 163 | if (ptr == 1) { |
164 | key.ar_startblock = lrp->ar_startblock; /* INT_: direct copy */ | 164 | key.ar_startblock = lrp->ar_startblock; |
165 | key.ar_blockcount = lrp->ar_blockcount; /* INT_: direct copy */ | 165 | key.ar_blockcount = lrp->ar_blockcount; |
166 | lkp = &key; | 166 | lkp = &key; |
167 | } | 167 | } |
168 | } | 168 | } |
169 | /* | 169 | /* |
170 | * Decrement and log the number of entries in the block. | 170 | * Decrement and log the number of entries in the block. |
171 | */ | 171 | */ |
172 | INT_MOD(block->bb_numrecs, ARCH_CONVERT, -1); | 172 | be16_add(&block->bb_numrecs, -1); |
173 | xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); | 173 | xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); |
174 | /* | 174 | /* |
175 | * See if the longest free extent in the allocation group was | 175 | * See if the longest free extent in the allocation group was |
@@ -182,24 +182,24 @@ xfs_alloc_delrec( | |||
182 | 182 | ||
183 | if (level == 0 && | 183 | if (level == 0 && |
184 | cur->bc_btnum == XFS_BTNUM_CNT && | 184 | cur->bc_btnum == XFS_BTNUM_CNT && |
185 | INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && | 185 | be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && |
186 | ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 186 | ptr > be16_to_cpu(block->bb_numrecs)) { |
187 | ASSERT(ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT) + 1); | 187 | ASSERT(ptr == be16_to_cpu(block->bb_numrecs) + 1); |
188 | /* | 188 | /* |
189 | * There are still records in the block. Grab the size | 189 | * There are still records in the block. Grab the size |
190 | * from the last one. | 190 | * from the last one. |
191 | */ | 191 | */ |
192 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 192 | if (be16_to_cpu(block->bb_numrecs)) { |
193 | rrp = XFS_ALLOC_REC_ADDR(block, INT_GET(block->bb_numrecs, ARCH_CONVERT), cur); | 193 | rrp = XFS_ALLOC_REC_ADDR(block, be16_to_cpu(block->bb_numrecs), cur); |
194 | INT_COPY(agf->agf_longest, rrp->ar_blockcount, ARCH_CONVERT); | 194 | agf->agf_longest = rrp->ar_blockcount; |
195 | } | 195 | } |
196 | /* | 196 | /* |
197 | * No free extents left. | 197 | * No free extents left. |
198 | */ | 198 | */ |
199 | else | 199 | else |
200 | agf->agf_longest = 0; | 200 | agf->agf_longest = 0; |
201 | mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest = | 201 | mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest = |
202 | INT_GET(agf->agf_longest, ARCH_CONVERT); | 202 | be32_to_cpu(agf->agf_longest); |
203 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 203 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
204 | XFS_AGF_LONGEST); | 204 | XFS_AGF_LONGEST); |
205 | } | 205 | } |
@@ -213,15 +213,15 @@ xfs_alloc_delrec( | |||
213 | * and it's NOT the leaf level, | 213 | * and it's NOT the leaf level, |
214 | * then we can get rid of this level. | 214 | * then we can get rid of this level. |
215 | */ | 215 | */ |
216 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == 1 && level > 0) { | 216 | if (be16_to_cpu(block->bb_numrecs) == 1 && level > 0) { |
217 | /* | 217 | /* |
218 | * lpp is still set to the first pointer in the block. | 218 | * lpp is still set to the first pointer in the block. |
219 | * Make it the new root of the btree. | 219 | * Make it the new root of the btree. |
220 | */ | 220 | */ |
221 | bno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT); | 221 | bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); |
222 | INT_COPY(agf->agf_roots[cur->bc_btnum], *lpp, ARCH_CONVERT); | 222 | agf->agf_roots[cur->bc_btnum] = *lpp; |
223 | INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, -1); | 223 | be32_add(&agf->agf_levels[cur->bc_btnum], -1); |
224 | mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_levels[cur->bc_btnum]--; | 224 | mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--; |
225 | /* | 225 | /* |
226 | * Put this buffer/block on the ag's freelist. | 226 | * Put this buffer/block on the ag's freelist. |
227 | */ | 227 | */ |
@@ -243,7 +243,7 @@ xfs_alloc_delrec( | |||
243 | * that freed the block. | 243 | * that freed the block. |
244 | */ | 244 | */ |
245 | xfs_alloc_mark_busy(cur->bc_tp, | 245 | xfs_alloc_mark_busy(cur->bc_tp, |
246 | INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); | 246 | be32_to_cpu(agf->agf_seqno), bno, 1); |
247 | 247 | ||
248 | xfs_trans_agbtree_delta(cur->bc_tp, -1); | 248 | xfs_trans_agbtree_delta(cur->bc_tp, -1); |
249 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 249 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
@@ -269,7 +269,7 @@ xfs_alloc_delrec( | |||
269 | * If the number of records remaining in the block is at least | 269 | * If the number of records remaining in the block is at least |
270 | * the minimum, we're done. | 270 | * the minimum, we're done. |
271 | */ | 271 | */ |
272 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { | 272 | if (be16_to_cpu(block->bb_numrecs) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { |
273 | if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) | 273 | if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) |
274 | return error; | 274 | return error; |
275 | *stat = 1; | 275 | *stat = 1; |
@@ -280,8 +280,8 @@ xfs_alloc_delrec( | |||
280 | * tree balanced. Look at the left and right sibling blocks to | 280 | * tree balanced. Look at the left and right sibling blocks to |
281 | * see if we can re-balance by moving only one record. | 281 | * see if we can re-balance by moving only one record. |
282 | */ | 282 | */ |
283 | rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); | 283 | rbno = be32_to_cpu(block->bb_rightsib); |
284 | lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); | 284 | lbno = be32_to_cpu(block->bb_leftsib); |
285 | bno = NULLAGBLOCK; | 285 | bno = NULLAGBLOCK; |
286 | ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); | 286 | ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); |
287 | /* | 287 | /* |
@@ -318,18 +318,18 @@ xfs_alloc_delrec( | |||
318 | /* | 318 | /* |
319 | * Grab the current block number, for future use. | 319 | * Grab the current block number, for future use. |
320 | */ | 320 | */ |
321 | bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); | 321 | bno = be32_to_cpu(right->bb_leftsib); |
322 | /* | 322 | /* |
323 | * If right block is full enough so that removing one entry | 323 | * If right block is full enough so that removing one entry |
324 | * won't make it too empty, and left-shifting an entry out | 324 | * won't make it too empty, and left-shifting an entry out |
325 | * of right to us works, we're done. | 325 | * of right to us works, we're done. |
326 | */ | 326 | */ |
327 | if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= | 327 | if (be16_to_cpu(right->bb_numrecs) - 1 >= |
328 | XFS_ALLOC_BLOCK_MINRECS(level, cur)) { | 328 | XFS_ALLOC_BLOCK_MINRECS(level, cur)) { |
329 | if ((error = xfs_alloc_lshift(tcur, level, &i))) | 329 | if ((error = xfs_alloc_lshift(tcur, level, &i))) |
330 | goto error0; | 330 | goto error0; |
331 | if (i) { | 331 | if (i) { |
332 | ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= | 332 | ASSERT(be16_to_cpu(block->bb_numrecs) >= |
333 | XFS_ALLOC_BLOCK_MINRECS(level, cur)); | 333 | XFS_ALLOC_BLOCK_MINRECS(level, cur)); |
334 | xfs_btree_del_cursor(tcur, | 334 | xfs_btree_del_cursor(tcur, |
335 | XFS_BTREE_NOERROR); | 335 | XFS_BTREE_NOERROR); |
@@ -346,7 +346,7 @@ xfs_alloc_delrec( | |||
346 | * future reference, and fix up the temp cursor to point | 346 | * future reference, and fix up the temp cursor to point |
347 | * to our block again (last record). | 347 | * to our block again (last record). |
348 | */ | 348 | */ |
349 | rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); | 349 | rrecs = be16_to_cpu(right->bb_numrecs); |
350 | if (lbno != NULLAGBLOCK) { | 350 | if (lbno != NULLAGBLOCK) { |
351 | i = xfs_btree_firstrec(tcur, level); | 351 | i = xfs_btree_firstrec(tcur, level); |
352 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | 352 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); |
@@ -382,18 +382,18 @@ xfs_alloc_delrec( | |||
382 | /* | 382 | /* |
383 | * Grab the current block number, for future use. | 383 | * Grab the current block number, for future use. |
384 | */ | 384 | */ |
385 | bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); | 385 | bno = be32_to_cpu(left->bb_rightsib); |
386 | /* | 386 | /* |
387 | * If left block is full enough so that removing one entry | 387 | * If left block is full enough so that removing one entry |
388 | * won't make it too empty, and right-shifting an entry out | 388 | * won't make it too empty, and right-shifting an entry out |
389 | * of left to us works, we're done. | 389 | * of left to us works, we're done. |
390 | */ | 390 | */ |
391 | if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= | 391 | if (be16_to_cpu(left->bb_numrecs) - 1 >= |
392 | XFS_ALLOC_BLOCK_MINRECS(level, cur)) { | 392 | XFS_ALLOC_BLOCK_MINRECS(level, cur)) { |
393 | if ((error = xfs_alloc_rshift(tcur, level, &i))) | 393 | if ((error = xfs_alloc_rshift(tcur, level, &i))) |
394 | goto error0; | 394 | goto error0; |
395 | if (i) { | 395 | if (i) { |
396 | ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= | 396 | ASSERT(be16_to_cpu(block->bb_numrecs) >= |
397 | XFS_ALLOC_BLOCK_MINRECS(level, cur)); | 397 | XFS_ALLOC_BLOCK_MINRECS(level, cur)); |
398 | xfs_btree_del_cursor(tcur, | 398 | xfs_btree_del_cursor(tcur, |
399 | XFS_BTREE_NOERROR); | 399 | XFS_BTREE_NOERROR); |
@@ -407,7 +407,7 @@ xfs_alloc_delrec( | |||
407 | * Otherwise, grab the number of records in right for | 407 | * Otherwise, grab the number of records in right for |
408 | * future reference. | 408 | * future reference. |
409 | */ | 409 | */ |
410 | lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); | 410 | lrecs = be16_to_cpu(left->bb_numrecs); |
411 | } | 411 | } |
412 | /* | 412 | /* |
413 | * Delete the temp cursor, we're done with it. | 413 | * Delete the temp cursor, we're done with it. |
@@ -421,7 +421,7 @@ xfs_alloc_delrec( | |||
421 | * See if we can join with the left neighbor block. | 421 | * See if we can join with the left neighbor block. |
422 | */ | 422 | */ |
423 | if (lbno != NULLAGBLOCK && | 423 | if (lbno != NULLAGBLOCK && |
424 | lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 424 | lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
425 | /* | 425 | /* |
426 | * Set "right" to be the starting block, | 426 | * Set "right" to be the starting block, |
427 | * "left" to be the left neighbor. | 427 | * "left" to be the left neighbor. |
@@ -441,7 +441,7 @@ xfs_alloc_delrec( | |||
441 | * If that won't work, see if we can join with the right neighbor block. | 441 | * If that won't work, see if we can join with the right neighbor block. |
442 | */ | 442 | */ |
443 | else if (rbno != NULLAGBLOCK && | 443 | else if (rbno != NULLAGBLOCK && |
444 | rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= | 444 | rrecs + be16_to_cpu(block->bb_numrecs) <= |
445 | XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 445 | XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
446 | /* | 446 | /* |
447 | * Set "left" to be the starting block, | 447 | * Set "left" to be the starting block, |
@@ -476,31 +476,34 @@ xfs_alloc_delrec( | |||
476 | /* | 476 | /* |
477 | * It's a non-leaf. Move keys and pointers. | 477 | * It's a non-leaf. Move keys and pointers. |
478 | */ | 478 | */ |
479 | lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); | 479 | lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); |
480 | lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); | 480 | lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); |
481 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); | 481 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); |
482 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); | 482 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); |
483 | #ifdef DEBUG | 483 | #ifdef DEBUG |
484 | for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { | 484 | for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { |
485 | if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) | 485 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) |
486 | return error; | 486 | return error; |
487 | } | 487 | } |
488 | #endif | 488 | #endif |
489 | memcpy(lkp, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lkp)); /* INT_: structure copy */ | 489 | memcpy(lkp, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*lkp)); |
490 | memcpy(lpp, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lpp)); /* INT_: structure copy */ | 490 | memcpy(lpp, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*lpp)); |
491 | xfs_alloc_log_keys(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, | 491 | xfs_alloc_log_keys(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, |
492 | INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 492 | be16_to_cpu(left->bb_numrecs) + |
493 | xfs_alloc_log_ptrs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, | 493 | be16_to_cpu(right->bb_numrecs)); |
494 | INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 494 | xfs_alloc_log_ptrs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, |
495 | be16_to_cpu(left->bb_numrecs) + | ||
496 | be16_to_cpu(right->bb_numrecs)); | ||
495 | } else { | 497 | } else { |
496 | /* | 498 | /* |
497 | * It's a leaf. Move records. | 499 | * It's a leaf. Move records. |
498 | */ | 500 | */ |
499 | lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, cur); | 501 | lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); |
500 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); | 502 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); |
501 | memcpy(lrp, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*lrp)); | 503 | memcpy(lrp, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*lrp)); |
502 | xfs_alloc_log_recs(cur, lbp, INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1, | 504 | xfs_alloc_log_recs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, |
503 | INT_GET(left->bb_numrecs, ARCH_CONVERT) + INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 505 | be16_to_cpu(left->bb_numrecs) + |
506 | be16_to_cpu(right->bb_numrecs)); | ||
504 | } | 507 | } |
505 | /* | 508 | /* |
506 | * If we joined with the left neighbor, set the buffer in the | 509 | * If we joined with the left neighbor, set the buffer in the |
@@ -508,7 +511,7 @@ xfs_alloc_delrec( | |||
508 | */ | 511 | */ |
509 | if (bp != lbp) { | 512 | if (bp != lbp) { |
510 | xfs_btree_setbuf(cur, level, lbp); | 513 | xfs_btree_setbuf(cur, level, lbp); |
511 | cur->bc_ptrs[level] += INT_GET(left->bb_numrecs, ARCH_CONVERT); | 514 | cur->bc_ptrs[level] += be16_to_cpu(left->bb_numrecs); |
512 | } | 515 | } |
513 | /* | 516 | /* |
514 | * If we joined with the right neighbor and there's a level above | 517 | * If we joined with the right neighbor and there's a level above |
@@ -520,28 +523,28 @@ xfs_alloc_delrec( | |||
520 | /* | 523 | /* |
521 | * Fix up the number of records in the surviving block. | 524 | * Fix up the number of records in the surviving block. |
522 | */ | 525 | */ |
523 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 526 | be16_add(&left->bb_numrecs, be16_to_cpu(right->bb_numrecs)); |
524 | /* | 527 | /* |
525 | * Fix up the right block pointer in the surviving block, and log it. | 528 | * Fix up the right block pointer in the surviving block, and log it. |
526 | */ | 529 | */ |
527 | left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */ | 530 | left->bb_rightsib = right->bb_rightsib; |
528 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); | 531 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); |
529 | /* | 532 | /* |
530 | * If there is a right sibling now, make it point to the | 533 | * If there is a right sibling now, make it point to the |
531 | * remaining block. | 534 | * remaining block. |
532 | */ | 535 | */ |
533 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 536 | if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) { |
534 | xfs_alloc_block_t *rrblock; | 537 | xfs_alloc_block_t *rrblock; |
535 | xfs_buf_t *rrbp; | 538 | xfs_buf_t *rrbp; |
536 | 539 | ||
537 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 540 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
538 | cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, | 541 | cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0, |
539 | &rrbp, XFS_ALLOC_BTREE_REF))) | 542 | &rrbp, XFS_ALLOC_BTREE_REF))) |
540 | return error; | 543 | return error; |
541 | rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); | 544 | rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); |
542 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) | 545 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) |
543 | return error; | 546 | return error; |
544 | INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); | 547 | rrblock->bb_leftsib = cpu_to_be32(lbno); |
545 | xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); | 548 | xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); |
546 | } | 549 | } |
547 | /* | 550 | /* |
@@ -562,10 +565,9 @@ xfs_alloc_delrec( | |||
562 | * busy block is allocated, the iclog is pushed up to the | 565 | * busy block is allocated, the iclog is pushed up to the |
563 | * LSN that freed the block. | 566 | * LSN that freed the block. |
564 | */ | 567 | */ |
565 | xfs_alloc_mark_busy(cur->bc_tp, | 568 | xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1); |
566 | INT_GET(agf->agf_seqno, ARCH_CONVERT), bno, 1); | ||
567 | |||
568 | xfs_trans_agbtree_delta(cur->bc_tp, -1); | 569 | xfs_trans_agbtree_delta(cur->bc_tp, -1); |
570 | |||
569 | /* | 571 | /* |
570 | * Adjust the current level's cursor so that we're left referring | 572 | * Adjust the current level's cursor so that we're left referring |
571 | * to the right node, after we're done. | 573 | * to the right node, after we're done. |
@@ -613,7 +615,7 @@ xfs_alloc_insrec( | |||
613 | int ptr; /* index in btree block for this rec */ | 615 | int ptr; /* index in btree block for this rec */ |
614 | xfs_alloc_rec_t *rp; /* pointer to btree records */ | 616 | xfs_alloc_rec_t *rp; /* pointer to btree records */ |
615 | 617 | ||
616 | ASSERT(INT_GET(recp->ar_blockcount, ARCH_CONVERT) > 0); | 618 | ASSERT(be32_to_cpu(recp->ar_blockcount) > 0); |
617 | 619 | ||
618 | /* | 620 | /* |
619 | * GCC doesn't understand the (arguably complex) control flow in | 621 | * GCC doesn't understand the (arguably complex) control flow in |
@@ -637,8 +639,8 @@ xfs_alloc_insrec( | |||
637 | /* | 639 | /* |
638 | * Make a key out of the record data to be inserted, and save it. | 640 | * Make a key out of the record data to be inserted, and save it. |
639 | */ | 641 | */ |
640 | key.ar_startblock = recp->ar_startblock; /* INT_: direct copy */ | 642 | key.ar_startblock = recp->ar_startblock; |
641 | key.ar_blockcount = recp->ar_blockcount; /* INT_: direct copy */ | 643 | key.ar_blockcount = recp->ar_blockcount; |
642 | optr = ptr = cur->bc_ptrs[level]; | 644 | optr = ptr = cur->bc_ptrs[level]; |
643 | /* | 645 | /* |
644 | * If we're off the left edge, return failure. | 646 | * If we're off the left edge, return failure. |
@@ -659,7 +661,7 @@ xfs_alloc_insrec( | |||
659 | /* | 661 | /* |
660 | * Check that the new entry is being inserted in the right place. | 662 | * Check that the new entry is being inserted in the right place. |
661 | */ | 663 | */ |
662 | if (ptr <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 664 | if (ptr <= be16_to_cpu(block->bb_numrecs)) { |
663 | if (level == 0) { | 665 | if (level == 0) { |
664 | rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); | 666 | rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); |
665 | xfs_btree_check_rec(cur->bc_btnum, recp, rp); | 667 | xfs_btree_check_rec(cur->bc_btnum, recp, rp); |
@@ -675,7 +677,7 @@ xfs_alloc_insrec( | |||
675 | * If the block is full, we can't insert the new entry until we | 677 | * If the block is full, we can't insert the new entry until we |
676 | * make the block un-full. | 678 | * make the block un-full. |
677 | */ | 679 | */ |
678 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 680 | if (be16_to_cpu(block->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
679 | /* | 681 | /* |
680 | * First, try shifting an entry to the right neighbor. | 682 | * First, try shifting an entry to the right neighbor. |
681 | */ | 683 | */ |
@@ -712,8 +714,8 @@ xfs_alloc_insrec( | |||
712 | return error; | 714 | return error; |
713 | #endif | 715 | #endif |
714 | ptr = cur->bc_ptrs[level]; | 716 | ptr = cur->bc_ptrs[level]; |
715 | nrec.ar_startblock = nkey.ar_startblock; /* INT_: direct copy */ | 717 | nrec.ar_startblock = nkey.ar_startblock; |
716 | nrec.ar_blockcount = nkey.ar_blockcount; /* INT_: direct copy */ | 718 | nrec.ar_blockcount = nkey.ar_blockcount; |
717 | } | 719 | } |
718 | /* | 720 | /* |
719 | * Otherwise the insert fails. | 721 | * Otherwise the insert fails. |
@@ -737,15 +739,15 @@ xfs_alloc_insrec( | |||
737 | kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); | 739 | kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); |
738 | pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); | 740 | pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); |
739 | #ifdef DEBUG | 741 | #ifdef DEBUG |
740 | for (i = INT_GET(block->bb_numrecs, ARCH_CONVERT); i >= ptr; i--) { | 742 | for (i = be16_to_cpu(block->bb_numrecs); i >= ptr; i--) { |
741 | if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level))) | 743 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level))) |
742 | return error; | 744 | return error; |
743 | } | 745 | } |
744 | #endif | 746 | #endif |
745 | memmove(&kp[ptr], &kp[ptr - 1], | 747 | memmove(&kp[ptr], &kp[ptr - 1], |
746 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*kp)); /* INT_: copy */ | 748 | (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*kp)); |
747 | memmove(&pp[ptr], &pp[ptr - 1], | 749 | memmove(&pp[ptr], &pp[ptr - 1], |
748 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*pp)); /* INT_: copy */ | 750 | (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*pp)); |
749 | #ifdef DEBUG | 751 | #ifdef DEBUG |
750 | if ((error = xfs_btree_check_sptr(cur, *bnop, level))) | 752 | if ((error = xfs_btree_check_sptr(cur, *bnop, level))) |
751 | return error; | 753 | return error; |
@@ -754,12 +756,12 @@ xfs_alloc_insrec( | |||
754 | * Now stuff the new data in, bump numrecs and log the new data. | 756 | * Now stuff the new data in, bump numrecs and log the new data. |
755 | */ | 757 | */ |
756 | kp[ptr - 1] = key; | 758 | kp[ptr - 1] = key; |
757 | INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); | 759 | pp[ptr - 1] = cpu_to_be32(*bnop); |
758 | INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); | 760 | be16_add(&block->bb_numrecs, 1); |
759 | xfs_alloc_log_keys(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); | 761 | xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); |
760 | xfs_alloc_log_ptrs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); | 762 | xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); |
761 | #ifdef DEBUG | 763 | #ifdef DEBUG |
762 | if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 764 | if (ptr < be16_to_cpu(block->bb_numrecs)) |
763 | xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, | 765 | xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, |
764 | kp + ptr); | 766 | kp + ptr); |
765 | #endif | 767 | #endif |
@@ -769,16 +771,16 @@ xfs_alloc_insrec( | |||
769 | */ | 771 | */ |
770 | rp = XFS_ALLOC_REC_ADDR(block, 1, cur); | 772 | rp = XFS_ALLOC_REC_ADDR(block, 1, cur); |
771 | memmove(&rp[ptr], &rp[ptr - 1], | 773 | memmove(&rp[ptr], &rp[ptr - 1], |
772 | (INT_GET(block->bb_numrecs, ARCH_CONVERT) - ptr + 1) * sizeof(*rp)); | 774 | (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*rp)); |
773 | /* | 775 | /* |
774 | * Now stuff the new record in, bump numrecs | 776 | * Now stuff the new record in, bump numrecs |
775 | * and log the new data. | 777 | * and log the new data. |
776 | */ | 778 | */ |
777 | rp[ptr - 1] = *recp; /* INT_: struct copy */ | 779 | rp[ptr - 1] = *recp; /* INT_: struct copy */ |
778 | INT_MOD(block->bb_numrecs, ARCH_CONVERT, +1); | 780 | be16_add(&block->bb_numrecs, 1); |
779 | xfs_alloc_log_recs(cur, bp, ptr, INT_GET(block->bb_numrecs, ARCH_CONVERT)); | 781 | xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); |
780 | #ifdef DEBUG | 782 | #ifdef DEBUG |
781 | if (ptr < INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 783 | if (ptr < be16_to_cpu(block->bb_numrecs)) |
782 | xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, | 784 | xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, |
783 | rp + ptr); | 785 | rp + ptr); |
784 | #endif | 786 | #endif |
@@ -800,16 +802,16 @@ xfs_alloc_insrec( | |||
800 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 802 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
801 | if (level == 0 && | 803 | if (level == 0 && |
802 | cur->bc_btnum == XFS_BTNUM_CNT && | 804 | cur->bc_btnum == XFS_BTNUM_CNT && |
803 | INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && | 805 | be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && |
804 | INT_GET(recp->ar_blockcount, ARCH_CONVERT) > INT_GET(agf->agf_longest, ARCH_CONVERT)) { | 806 | be32_to_cpu(recp->ar_blockcount) > be32_to_cpu(agf->agf_longest)) { |
805 | /* | 807 | /* |
806 | * If this is a leaf in the by-size btree and there | 808 | * If this is a leaf in the by-size btree and there |
807 | * is no right sibling block and this block is bigger | 809 | * is no right sibling block and this block is bigger |
808 | * than the previous longest block, update it. | 810 | * than the previous longest block, update it. |
809 | */ | 811 | */ |
810 | INT_COPY(agf->agf_longest, recp->ar_blockcount, ARCH_CONVERT); | 812 | agf->agf_longest = recp->ar_blockcount; |
811 | cur->bc_mp->m_perag[INT_GET(agf->agf_seqno, ARCH_CONVERT)].pagf_longest | 813 | cur->bc_mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest |
812 | = INT_GET(recp->ar_blockcount, ARCH_CONVERT); | 814 | = be32_to_cpu(recp->ar_blockcount); |
813 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 815 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
814 | XFS_AGF_LONGEST); | 816 | XFS_AGF_LONGEST); |
815 | } | 817 | } |
@@ -919,8 +921,9 @@ xfs_alloc_log_recs( | |||
919 | 921 | ||
920 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 922 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
921 | for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++) | 923 | for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++) |
922 | ASSERT(INT_GET(p->ar_startblock, ARCH_CONVERT) + INT_GET(p->ar_blockcount, ARCH_CONVERT) <= | 924 | ASSERT(be32_to_cpu(p->ar_startblock) + |
923 | INT_GET(agf->agf_length, ARCH_CONVERT)); | 925 | be32_to_cpu(p->ar_blockcount) <= |
926 | be32_to_cpu(agf->agf_length)); | ||
924 | } | 927 | } |
925 | #endif | 928 | #endif |
926 | first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); | 929 | first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); |
@@ -957,8 +960,8 @@ xfs_alloc_lookup( | |||
957 | xfs_agf_t *agf; /* a.g. freespace header */ | 960 | xfs_agf_t *agf; /* a.g. freespace header */ |
958 | 961 | ||
959 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 962 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
960 | agno = INT_GET(agf->agf_seqno, ARCH_CONVERT); | 963 | agno = be32_to_cpu(agf->agf_seqno); |
961 | agbno = INT_GET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT); | 964 | agbno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); |
962 | } | 965 | } |
963 | /* | 966 | /* |
964 | * Iterate over each level in the btree, starting at the root. | 967 | * Iterate over each level in the btree, starting at the root. |
@@ -1025,7 +1028,7 @@ xfs_alloc_lookup( | |||
1025 | * Set low and high entry numbers, 1-based. | 1028 | * Set low and high entry numbers, 1-based. |
1026 | */ | 1029 | */ |
1027 | low = 1; | 1030 | low = 1; |
1028 | if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { | 1031 | if (!(high = be16_to_cpu(block->bb_numrecs))) { |
1029 | /* | 1032 | /* |
1030 | * If the block is empty, the tree must | 1033 | * If the block is empty, the tree must |
1031 | * be an empty leaf. | 1034 | * be an empty leaf. |
@@ -1054,14 +1057,14 @@ xfs_alloc_lookup( | |||
1054 | xfs_alloc_key_t *kkp; | 1057 | xfs_alloc_key_t *kkp; |
1055 | 1058 | ||
1056 | kkp = kkbase + keyno - 1; | 1059 | kkp = kkbase + keyno - 1; |
1057 | startblock = INT_GET(kkp->ar_startblock, ARCH_CONVERT); | 1060 | startblock = be32_to_cpu(kkp->ar_startblock); |
1058 | blockcount = INT_GET(kkp->ar_blockcount, ARCH_CONVERT); | 1061 | blockcount = be32_to_cpu(kkp->ar_blockcount); |
1059 | } else { | 1062 | } else { |
1060 | xfs_alloc_rec_t *krp; | 1063 | xfs_alloc_rec_t *krp; |
1061 | 1064 | ||
1062 | krp = krbase + keyno - 1; | 1065 | krp = krbase + keyno - 1; |
1063 | startblock = INT_GET(krp->ar_startblock, ARCH_CONVERT); | 1066 | startblock = be32_to_cpu(krp->ar_startblock); |
1064 | blockcount = INT_GET(krp->ar_blockcount, ARCH_CONVERT); | 1067 | blockcount = be32_to_cpu(krp->ar_blockcount); |
1065 | } | 1068 | } |
1066 | /* | 1069 | /* |
1067 | * Compute difference to get next direction. | 1070 | * Compute difference to get next direction. |
@@ -1101,7 +1104,7 @@ xfs_alloc_lookup( | |||
1101 | */ | 1104 | */ |
1102 | if (diff > 0 && --keyno < 1) | 1105 | if (diff > 0 && --keyno < 1) |
1103 | keyno = 1; | 1106 | keyno = 1; |
1104 | agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, keyno, cur), ARCH_CONVERT); | 1107 | agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, keyno, cur)); |
1105 | #ifdef DEBUG | 1108 | #ifdef DEBUG |
1106 | if ((error = xfs_btree_check_sptr(cur, agbno, level))) | 1109 | if ((error = xfs_btree_check_sptr(cur, agbno, level))) |
1107 | return error; | 1110 | return error; |
@@ -1120,8 +1123,8 @@ xfs_alloc_lookup( | |||
1120 | * not the last block, we're in the wrong block. | 1123 | * not the last block, we're in the wrong block. |
1121 | */ | 1124 | */ |
1122 | if (dir == XFS_LOOKUP_GE && | 1125 | if (dir == XFS_LOOKUP_GE && |
1123 | keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && | 1126 | keyno > be16_to_cpu(block->bb_numrecs) && |
1124 | INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 1127 | be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) { |
1125 | int i; | 1128 | int i; |
1126 | 1129 | ||
1127 | cur->bc_ptrs[0] = keyno; | 1130 | cur->bc_ptrs[0] = keyno; |
@@ -1138,7 +1141,7 @@ xfs_alloc_lookup( | |||
1138 | /* | 1141 | /* |
1139 | * Return if we succeeded or not. | 1142 | * Return if we succeeded or not. |
1140 | */ | 1143 | */ |
1141 | if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 1144 | if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) |
1142 | *stat = 0; | 1145 | *stat = 0; |
1143 | else | 1146 | else |
1144 | *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); | 1147 | *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); |
@@ -1181,7 +1184,7 @@ xfs_alloc_lshift( | |||
1181 | /* | 1184 | /* |
1182 | * If we've got no left sibling then we can't shift an entry left. | 1185 | * If we've got no left sibling then we can't shift an entry left. |
1183 | */ | 1186 | */ |
1184 | if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1187 | if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) { |
1185 | *stat = 0; | 1188 | *stat = 0; |
1186 | return 0; | 1189 | return 0; |
1187 | } | 1190 | } |
@@ -1197,8 +1200,8 @@ xfs_alloc_lshift( | |||
1197 | * Set up the left neighbor as "left". | 1200 | * Set up the left neighbor as "left". |
1198 | */ | 1201 | */ |
1199 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1202 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1200 | cur->bc_private.a.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp, | 1203 | cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib), |
1201 | XFS_ALLOC_BTREE_REF))) | 1204 | 0, &lbp, XFS_ALLOC_BTREE_REF))) |
1202 | return error; | 1205 | return error; |
1203 | left = XFS_BUF_TO_ALLOC_BLOCK(lbp); | 1206 | left = XFS_BUF_TO_ALLOC_BLOCK(lbp); |
1204 | if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) | 1207 | if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) |
@@ -1206,11 +1209,11 @@ xfs_alloc_lshift( | |||
1206 | /* | 1209 | /* |
1207 | * If it's full, it can't take another entry. | 1210 | * If it's full, it can't take another entry. |
1208 | */ | 1211 | */ |
1209 | if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 1212 | if (be16_to_cpu(left->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
1210 | *stat = 0; | 1213 | *stat = 0; |
1211 | return 0; | 1214 | return 0; |
1212 | } | 1215 | } |
1213 | nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; | 1216 | nrec = be16_to_cpu(left->bb_numrecs) + 1; |
1214 | /* | 1217 | /* |
1215 | * If non-leaf, copy a key and a ptr to the left block. | 1218 | * If non-leaf, copy a key and a ptr to the left block. |
1216 | */ | 1219 | */ |
@@ -1225,7 +1228,7 @@ xfs_alloc_lshift( | |||
1225 | lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur); | 1228 | lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur); |
1226 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); | 1229 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); |
1227 | #ifdef DEBUG | 1230 | #ifdef DEBUG |
1228 | if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) | 1231 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level))) |
1229 | return error; | 1232 | return error; |
1230 | #endif | 1233 | #endif |
1231 | *lpp = *rpp; /* INT_: copy */ | 1234 | *lpp = *rpp; /* INT_: copy */ |
@@ -1247,30 +1250,30 @@ xfs_alloc_lshift( | |||
1247 | /* | 1250 | /* |
1248 | * Bump and log left's numrecs, decrement and log right's numrecs. | 1251 | * Bump and log left's numrecs, decrement and log right's numrecs. |
1249 | */ | 1252 | */ |
1250 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1); | 1253 | be16_add(&left->bb_numrecs, 1); |
1251 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); | 1254 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); |
1252 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1); | 1255 | be16_add(&right->bb_numrecs, -1); |
1253 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); | 1256 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); |
1254 | /* | 1257 | /* |
1255 | * Slide the contents of right down one entry. | 1258 | * Slide the contents of right down one entry. |
1256 | */ | 1259 | */ |
1257 | if (level > 0) { | 1260 | if (level > 0) { |
1258 | #ifdef DEBUG | 1261 | #ifdef DEBUG |
1259 | for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { | 1262 | for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { |
1260 | if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), | 1263 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]), |
1261 | level))) | 1264 | level))) |
1262 | return error; | 1265 | return error; |
1263 | } | 1266 | } |
1264 | #endif | 1267 | #endif |
1265 | memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); | 1268 | memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1266 | memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); | 1269 | memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1267 | xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1270 | xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1268 | xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1271 | xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1269 | } else { | 1272 | } else { |
1270 | memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1273 | memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1271 | xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1274 | xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1272 | key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ | 1275 | key.ar_startblock = rrp->ar_startblock; |
1273 | key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ | 1276 | key.ar_blockcount = rrp->ar_blockcount; |
1274 | rkp = &key; | 1277 | rkp = &key; |
1275 | } | 1278 | } |
1276 | /* | 1279 | /* |
@@ -1335,9 +1338,9 @@ xfs_alloc_newroot( | |||
1335 | xfs_agnumber_t seqno; | 1338 | xfs_agnumber_t seqno; |
1336 | 1339 | ||
1337 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 1340 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
1338 | INT_SET(agf->agf_roots[cur->bc_btnum], ARCH_CONVERT, nbno); | 1341 | agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno); |
1339 | INT_MOD(agf->agf_levels[cur->bc_btnum], ARCH_CONVERT, 1); | 1342 | be32_add(&agf->agf_levels[cur->bc_btnum], 1); |
1340 | seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT); | 1343 | seqno = be32_to_cpu(agf->agf_seqno); |
1341 | mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; | 1344 | mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; |
1342 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 1345 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
1343 | XFS_AGF_ROOTS | XFS_AGF_LEVELS); | 1346 | XFS_AGF_ROOTS | XFS_AGF_LEVELS); |
@@ -1354,12 +1357,12 @@ xfs_alloc_newroot( | |||
1354 | if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp))) | 1357 | if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp))) |
1355 | return error; | 1358 | return error; |
1356 | #endif | 1359 | #endif |
1357 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 1360 | if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) { |
1358 | /* | 1361 | /* |
1359 | * Our block is left, pick up the right block. | 1362 | * Our block is left, pick up the right block. |
1360 | */ | 1363 | */ |
1361 | lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp)); | 1364 | lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp)); |
1362 | rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT); | 1365 | rbno = be32_to_cpu(left->bb_rightsib); |
1363 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 1366 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
1364 | cur->bc_private.a.agno, rbno, 0, &rbp, | 1367 | cur->bc_private.a.agno, rbno, 0, &rbp, |
1365 | XFS_ALLOC_BTREE_REF))) | 1368 | XFS_ALLOC_BTREE_REF))) |
@@ -1376,7 +1379,7 @@ xfs_alloc_newroot( | |||
1376 | rbp = lbp; | 1379 | rbp = lbp; |
1377 | right = left; | 1380 | right = left; |
1378 | rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp)); | 1381 | rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp)); |
1379 | lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT); | 1382 | lbno = be32_to_cpu(right->bb_leftsib); |
1380 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 1383 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
1381 | cur->bc_private.a.agno, lbno, 0, &lbp, | 1384 | cur->bc_private.a.agno, lbno, 0, &lbp, |
1382 | XFS_ALLOC_BTREE_REF))) | 1385 | XFS_ALLOC_BTREE_REF))) |
@@ -1390,11 +1393,11 @@ xfs_alloc_newroot( | |||
1390 | /* | 1393 | /* |
1391 | * Fill in the new block's btree header and log it. | 1394 | * Fill in the new block's btree header and log it. |
1392 | */ | 1395 | */ |
1393 | INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); | 1396 | new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); |
1394 | INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels); | 1397 | new->bb_level = cpu_to_be16(cur->bc_nlevels); |
1395 | INT_SET(new->bb_numrecs, ARCH_CONVERT, 2); | 1398 | new->bb_numrecs = cpu_to_be16(2); |
1396 | INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); | 1399 | new->bb_leftsib = cpu_to_be32(NULLAGBLOCK); |
1397 | INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); | 1400 | new->bb_rightsib = cpu_to_be32(NULLAGBLOCK); |
1398 | xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS); | 1401 | xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS); |
1399 | ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); | 1402 | ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); |
1400 | /* | 1403 | /* |
@@ -1404,18 +1407,18 @@ xfs_alloc_newroot( | |||
1404 | xfs_alloc_key_t *kp; /* btree key pointer */ | 1407 | xfs_alloc_key_t *kp; /* btree key pointer */ |
1405 | 1408 | ||
1406 | kp = XFS_ALLOC_KEY_ADDR(new, 1, cur); | 1409 | kp = XFS_ALLOC_KEY_ADDR(new, 1, cur); |
1407 | if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) { | 1410 | if (be16_to_cpu(left->bb_level) > 0) { |
1408 | kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */ | 1411 | kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */ |
1409 | kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */ | 1412 | kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */ |
1410 | } else { | 1413 | } else { |
1411 | xfs_alloc_rec_t *rp; /* btree record pointer */ | 1414 | xfs_alloc_rec_t *rp; /* btree record pointer */ |
1412 | 1415 | ||
1413 | rp = XFS_ALLOC_REC_ADDR(left, 1, cur); | 1416 | rp = XFS_ALLOC_REC_ADDR(left, 1, cur); |
1414 | kp[0].ar_startblock = rp->ar_startblock; /* INT_: direct copy */ | 1417 | kp[0].ar_startblock = rp->ar_startblock; |
1415 | kp[0].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */ | 1418 | kp[0].ar_blockcount = rp->ar_blockcount; |
1416 | rp = XFS_ALLOC_REC_ADDR(right, 1, cur); | 1419 | rp = XFS_ALLOC_REC_ADDR(right, 1, cur); |
1417 | kp[1].ar_startblock = rp->ar_startblock; /* INT_: direct copy */ | 1420 | kp[1].ar_startblock = rp->ar_startblock; |
1418 | kp[1].ar_blockcount = rp->ar_blockcount; /* INT_: direct copy */ | 1421 | kp[1].ar_blockcount = rp->ar_blockcount; |
1419 | } | 1422 | } |
1420 | } | 1423 | } |
1421 | xfs_alloc_log_keys(cur, nbp, 1, 2); | 1424 | xfs_alloc_log_keys(cur, nbp, 1, 2); |
@@ -1426,8 +1429,8 @@ xfs_alloc_newroot( | |||
1426 | xfs_alloc_ptr_t *pp; /* btree address pointer */ | 1429 | xfs_alloc_ptr_t *pp; /* btree address pointer */ |
1427 | 1430 | ||
1428 | pp = XFS_ALLOC_PTR_ADDR(new, 1, cur); | 1431 | pp = XFS_ALLOC_PTR_ADDR(new, 1, cur); |
1429 | INT_SET(pp[0], ARCH_CONVERT, lbno); | 1432 | pp[0] = cpu_to_be32(lbno); |
1430 | INT_SET(pp[1], ARCH_CONVERT, rbno); | 1433 | pp[1] = cpu_to_be32(rbno); |
1431 | } | 1434 | } |
1432 | xfs_alloc_log_ptrs(cur, nbp, 1, 2); | 1435 | xfs_alloc_log_ptrs(cur, nbp, 1, 2); |
1433 | /* | 1436 | /* |
@@ -1472,7 +1475,7 @@ xfs_alloc_rshift( | |||
1472 | /* | 1475 | /* |
1473 | * If we've got no right sibling then we can't shift an entry right. | 1476 | * If we've got no right sibling then we can't shift an entry right. |
1474 | */ | 1477 | */ |
1475 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1478 | if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) { |
1476 | *stat = 0; | 1479 | *stat = 0; |
1477 | return 0; | 1480 | return 0; |
1478 | } | 1481 | } |
@@ -1480,7 +1483,7 @@ xfs_alloc_rshift( | |||
1480 | * If the cursor entry is the one that would be moved, don't | 1483 | * If the cursor entry is the one that would be moved, don't |
1481 | * do it... it's too complicated. | 1484 | * do it... it's too complicated. |
1482 | */ | 1485 | */ |
1483 | if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { | 1486 | if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) { |
1484 | *stat = 0; | 1487 | *stat = 0; |
1485 | return 0; | 1488 | return 0; |
1486 | } | 1489 | } |
@@ -1488,8 +1491,8 @@ xfs_alloc_rshift( | |||
1488 | * Set up the right neighbor as "right". | 1491 | * Set up the right neighbor as "right". |
1489 | */ | 1492 | */ |
1490 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1493 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1491 | cur->bc_private.a.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp, | 1494 | cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), |
1492 | XFS_ALLOC_BTREE_REF))) | 1495 | 0, &rbp, XFS_ALLOC_BTREE_REF))) |
1493 | return error; | 1496 | return error; |
1494 | right = XFS_BUF_TO_ALLOC_BLOCK(rbp); | 1497 | right = XFS_BUF_TO_ALLOC_BLOCK(rbp); |
1495 | if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) | 1498 | if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) |
@@ -1497,7 +1500,7 @@ xfs_alloc_rshift( | |||
1497 | /* | 1500 | /* |
1498 | * If it's full, it can't take another entry. | 1501 | * If it's full, it can't take another entry. |
1499 | */ | 1502 | */ |
1500 | if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { | 1503 | if (be16_to_cpu(right->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { |
1501 | *stat = 0; | 1504 | *stat = 0; |
1502 | return 0; | 1505 | return 0; |
1503 | } | 1506 | } |
@@ -1510,47 +1513,47 @@ xfs_alloc_rshift( | |||
1510 | xfs_alloc_ptr_t *lpp; /* address pointer for left block */ | 1513 | xfs_alloc_ptr_t *lpp; /* address pointer for left block */ |
1511 | xfs_alloc_ptr_t *rpp; /* address pointer for right block */ | 1514 | xfs_alloc_ptr_t *rpp; /* address pointer for right block */ |
1512 | 1515 | ||
1513 | lkp = XFS_ALLOC_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1516 | lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1514 | lpp = XFS_ALLOC_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1517 | lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1515 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); | 1518 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); |
1516 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); | 1519 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); |
1517 | #ifdef DEBUG | 1520 | #ifdef DEBUG |
1518 | for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { | 1521 | for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) { |
1519 | if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) | 1522 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) |
1520 | return error; | 1523 | return error; |
1521 | } | 1524 | } |
1522 | #endif | 1525 | #endif |
1523 | memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); | 1526 | memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1524 | memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); | 1527 | memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1525 | #ifdef DEBUG | 1528 | #ifdef DEBUG |
1526 | if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) | 1529 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level))) |
1527 | return error; | 1530 | return error; |
1528 | #endif | 1531 | #endif |
1529 | *rkp = *lkp; /* INT_: copy */ | 1532 | *rkp = *lkp; /* INT_: copy */ |
1530 | *rpp = *lpp; /* INT_: copy */ | 1533 | *rpp = *lpp; /* INT_: copy */ |
1531 | xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1534 | xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1532 | xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1535 | xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1533 | xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); | 1536 | xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); |
1534 | } else { | 1537 | } else { |
1535 | xfs_alloc_rec_t *lrp; /* record pointer for left block */ | 1538 | xfs_alloc_rec_t *lrp; /* record pointer for left block */ |
1536 | xfs_alloc_rec_t *rrp; /* record pointer for right block */ | 1539 | xfs_alloc_rec_t *rrp; /* record pointer for right block */ |
1537 | 1540 | ||
1538 | lrp = XFS_ALLOC_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1541 | lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1539 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); | 1542 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); |
1540 | memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1543 | memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1541 | *rrp = *lrp; | 1544 | *rrp = *lrp; |
1542 | xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1545 | xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1543 | key.ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ | 1546 | key.ar_startblock = rrp->ar_startblock; |
1544 | key.ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ | 1547 | key.ar_blockcount = rrp->ar_blockcount; |
1545 | rkp = &key; | 1548 | rkp = &key; |
1546 | xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); | 1549 | xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); |
1547 | } | 1550 | } |
1548 | /* | 1551 | /* |
1549 | * Decrement and log left's numrecs, bump and log right's numrecs. | 1552 | * Decrement and log left's numrecs, bump and log right's numrecs. |
1550 | */ | 1553 | */ |
1551 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); | 1554 | be16_add(&left->bb_numrecs, -1); |
1552 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); | 1555 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); |
1553 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); | 1556 | be16_add(&right->bb_numrecs, 1); |
1554 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); | 1557 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); |
1555 | /* | 1558 | /* |
1556 | * Using a temporary cursor, update the parent key values of the | 1559 | * Using a temporary cursor, update the parent key values of the |
@@ -1623,17 +1626,17 @@ xfs_alloc_split( | |||
1623 | /* | 1626 | /* |
1624 | * Fill in the btree header for the new block. | 1627 | * Fill in the btree header for the new block. |
1625 | */ | 1628 | */ |
1626 | INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); | 1629 | right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); |
1627 | right->bb_level = left->bb_level; /* INT_: direct copy */ | 1630 | right->bb_level = left->bb_level; |
1628 | INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); | 1631 | right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); |
1629 | /* | 1632 | /* |
1630 | * Make sure that if there's an odd number of entries now, that | 1633 | * Make sure that if there's an odd number of entries now, that |
1631 | * each new block will have the same number of entries. | 1634 | * each new block will have the same number of entries. |
1632 | */ | 1635 | */ |
1633 | if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && | 1636 | if ((be16_to_cpu(left->bb_numrecs) & 1) && |
1634 | cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) | 1637 | cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) |
1635 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); | 1638 | be16_add(&right->bb_numrecs, 1); |
1636 | i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; | 1639 | i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; |
1637 | /* | 1640 | /* |
1638 | * For non-leaf blocks, copy keys and addresses over to the new block. | 1641 | * For non-leaf blocks, copy keys and addresses over to the new block. |
1639 | */ | 1642 | */ |
@@ -1648,15 +1651,15 @@ xfs_alloc_split( | |||
1648 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); | 1651 | rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); |
1649 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); | 1652 | rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); |
1650 | #ifdef DEBUG | 1653 | #ifdef DEBUG |
1651 | for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { | 1654 | for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { |
1652 | if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) | 1655 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) |
1653 | return error; | 1656 | return error; |
1654 | } | 1657 | } |
1655 | #endif | 1658 | #endif |
1656 | memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); /* INT_: copy */ | 1659 | memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1657 | memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); /* INT_: copy */ | 1660 | memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1658 | xfs_alloc_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1661 | xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1659 | xfs_alloc_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1662 | xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1660 | *keyp = *rkp; | 1663 | *keyp = *rkp; |
1661 | } | 1664 | } |
1662 | /* | 1665 | /* |
@@ -1668,38 +1671,38 @@ xfs_alloc_split( | |||
1668 | 1671 | ||
1669 | lrp = XFS_ALLOC_REC_ADDR(left, i, cur); | 1672 | lrp = XFS_ALLOC_REC_ADDR(left, i, cur); |
1670 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); | 1673 | rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); |
1671 | memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1674 | memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1672 | xfs_alloc_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1675 | xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1673 | keyp->ar_startblock = rrp->ar_startblock; /* INT_: direct copy */ | 1676 | keyp->ar_startblock = rrp->ar_startblock; |
1674 | keyp->ar_blockcount = rrp->ar_blockcount; /* INT_: direct copy */ | 1677 | keyp->ar_blockcount = rrp->ar_blockcount; |
1675 | } | 1678 | } |
1676 | /* | 1679 | /* |
1677 | * Find the left block number by looking in the buffer. | 1680 | * Find the left block number by looking in the buffer. |
1678 | * Adjust numrecs, sibling pointers. | 1681 | * Adjust numrecs, sibling pointers. |
1679 | */ | 1682 | */ |
1680 | lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); | 1683 | lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); |
1681 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); | 1684 | be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); |
1682 | right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ | 1685 | right->bb_rightsib = left->bb_rightsib; |
1683 | INT_SET(left->bb_rightsib, ARCH_CONVERT, rbno); | 1686 | left->bb_rightsib = cpu_to_be32(rbno); |
1684 | INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); | 1687 | right->bb_leftsib = cpu_to_be32(lbno); |
1685 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS); | 1688 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS); |
1686 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); | 1689 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); |
1687 | /* | 1690 | /* |
1688 | * If there's a block to the new block's right, make that block | 1691 | * If there's a block to the new block's right, make that block |
1689 | * point back to right instead of to left. | 1692 | * point back to right instead of to left. |
1690 | */ | 1693 | */ |
1691 | if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 1694 | if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) { |
1692 | xfs_alloc_block_t *rrblock; /* rr btree block */ | 1695 | xfs_alloc_block_t *rrblock; /* rr btree block */ |
1693 | xfs_buf_t *rrbp; /* buffer for rrblock */ | 1696 | xfs_buf_t *rrbp; /* buffer for rrblock */ |
1694 | 1697 | ||
1695 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1698 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1696 | cur->bc_private.a.agno, INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, | 1699 | cur->bc_private.a.agno, be32_to_cpu(right->bb_rightsib), 0, |
1697 | &rrbp, XFS_ALLOC_BTREE_REF))) | 1700 | &rrbp, XFS_ALLOC_BTREE_REF))) |
1698 | return error; | 1701 | return error; |
1699 | rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); | 1702 | rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); |
1700 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) | 1703 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) |
1701 | return error; | 1704 | return error; |
1702 | INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, rbno); | 1705 | rrblock->bb_leftsib = cpu_to_be32(rbno); |
1703 | xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); | 1706 | xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); |
1704 | } | 1707 | } |
1705 | /* | 1708 | /* |
@@ -1707,9 +1710,9 @@ xfs_alloc_split( | |||
1707 | * If it's just pointing past the last entry in left, then we'll | 1710 | * If it's just pointing past the last entry in left, then we'll |
1708 | * insert there, so don't change anything in that case. | 1711 | * insert there, so don't change anything in that case. |
1709 | */ | 1712 | */ |
1710 | if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { | 1713 | if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) { |
1711 | xfs_btree_setbuf(cur, level, rbp); | 1714 | xfs_btree_setbuf(cur, level, rbp); |
1712 | cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); | 1715 | cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs); |
1713 | } | 1716 | } |
1714 | /* | 1717 | /* |
1715 | * If there are more levels, we'll need another cursor which refers to | 1718 | * If there are more levels, we'll need another cursor which refers to |
@@ -1807,7 +1810,7 @@ xfs_alloc_decrement( | |||
1807 | /* | 1810 | /* |
1808 | * If we just went off the left edge of the tree, return failure. | 1811 | * If we just went off the left edge of the tree, return failure. |
1809 | */ | 1812 | */ |
1810 | if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1813 | if (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK) { |
1811 | *stat = 0; | 1814 | *stat = 0; |
1812 | return 0; | 1815 | return 0; |
1813 | } | 1816 | } |
@@ -1836,7 +1839,7 @@ xfs_alloc_decrement( | |||
1836 | xfs_agblock_t agbno; /* block number of btree block */ | 1839 | xfs_agblock_t agbno; /* block number of btree block */ |
1837 | xfs_buf_t *bp; /* buffer pointer for block */ | 1840 | xfs_buf_t *bp; /* buffer pointer for block */ |
1838 | 1841 | ||
1839 | agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); | 1842 | agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); |
1840 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1843 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1841 | cur->bc_private.a.agno, agbno, 0, &bp, | 1844 | cur->bc_private.a.agno, agbno, 0, &bp, |
1842 | XFS_ALLOC_BTREE_REF))) | 1845 | XFS_ALLOC_BTREE_REF))) |
@@ -1846,7 +1849,7 @@ xfs_alloc_decrement( | |||
1846 | block = XFS_BUF_TO_ALLOC_BLOCK(bp); | 1849 | block = XFS_BUF_TO_ALLOC_BLOCK(bp); |
1847 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) | 1850 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) |
1848 | return error; | 1851 | return error; |
1849 | cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 1852 | cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs); |
1850 | } | 1853 | } |
1851 | *stat = 1; | 1854 | *stat = 1; |
1852 | return 0; | 1855 | return 0; |
@@ -1913,7 +1916,7 @@ xfs_alloc_get_rec( | |||
1913 | /* | 1916 | /* |
1914 | * Off the right end or left end, return failure. | 1917 | * Off the right end or left end, return failure. |
1915 | */ | 1918 | */ |
1916 | if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { | 1919 | if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) { |
1917 | *stat = 0; | 1920 | *stat = 0; |
1918 | return 0; | 1921 | return 0; |
1919 | } | 1922 | } |
@@ -1924,8 +1927,8 @@ xfs_alloc_get_rec( | |||
1924 | xfs_alloc_rec_t *rec; /* record data */ | 1927 | xfs_alloc_rec_t *rec; /* record data */ |
1925 | 1928 | ||
1926 | rec = XFS_ALLOC_REC_ADDR(block, ptr, cur); | 1929 | rec = XFS_ALLOC_REC_ADDR(block, ptr, cur); |
1927 | *bno = INT_GET(rec->ar_startblock, ARCH_CONVERT); | 1930 | *bno = be32_to_cpu(rec->ar_startblock); |
1928 | *len = INT_GET(rec->ar_blockcount, ARCH_CONVERT); | 1931 | *len = be32_to_cpu(rec->ar_blockcount); |
1929 | } | 1932 | } |
1930 | *stat = 1; | 1933 | *stat = 1; |
1931 | return 0; | 1934 | return 0; |
@@ -1964,14 +1967,14 @@ xfs_alloc_increment( | |||
1964 | * Increment the ptr at this level. If we're still in the block | 1967 | * Increment the ptr at this level. If we're still in the block |
1965 | * then we're done. | 1968 | * then we're done. |
1966 | */ | 1969 | */ |
1967 | if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 1970 | if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) { |
1968 | *stat = 1; | 1971 | *stat = 1; |
1969 | return 0; | 1972 | return 0; |
1970 | } | 1973 | } |
1971 | /* | 1974 | /* |
1972 | * If we just went off the right edge of the tree, return failure. | 1975 | * If we just went off the right edge of the tree, return failure. |
1973 | */ | 1976 | */ |
1974 | if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1977 | if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK) { |
1975 | *stat = 0; | 1978 | *stat = 0; |
1976 | return 0; | 1979 | return 0; |
1977 | } | 1980 | } |
@@ -1986,7 +1989,7 @@ xfs_alloc_increment( | |||
1986 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) | 1989 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) |
1987 | return error; | 1990 | return error; |
1988 | #endif | 1991 | #endif |
1989 | if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 1992 | if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) |
1990 | break; | 1993 | break; |
1991 | /* | 1994 | /* |
1992 | * Read-ahead the right block, we're going to read it | 1995 | * Read-ahead the right block, we're going to read it |
@@ -2006,7 +2009,7 @@ xfs_alloc_increment( | |||
2006 | lev > level; ) { | 2009 | lev > level; ) { |
2007 | xfs_agblock_t agbno; /* block number of btree block */ | 2010 | xfs_agblock_t agbno; /* block number of btree block */ |
2008 | 2011 | ||
2009 | agbno = INT_GET(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); | 2012 | agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); |
2010 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 2013 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
2011 | cur->bc_private.a.agno, agbno, 0, &bp, | 2014 | cur->bc_private.a.agno, agbno, 0, &bp, |
2012 | XFS_ALLOC_BTREE_REF))) | 2015 | XFS_ALLOC_BTREE_REF))) |
@@ -2041,8 +2044,8 @@ xfs_alloc_insert( | |||
2041 | 2044 | ||
2042 | level = 0; | 2045 | level = 0; |
2043 | nbno = NULLAGBLOCK; | 2046 | nbno = NULLAGBLOCK; |
2044 | INT_SET(nrec.ar_startblock, ARCH_CONVERT, cur->bc_rec.a.ar_startblock); | 2047 | nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); |
2045 | INT_SET(nrec.ar_blockcount, ARCH_CONVERT, cur->bc_rec.a.ar_blockcount); | 2048 | nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); |
2046 | ncur = (xfs_btree_cur_t *)0; | 2049 | ncur = (xfs_btree_cur_t *)0; |
2047 | pcur = cur; | 2050 | pcur = cur; |
2048 | /* | 2051 | /* |
@@ -2163,8 +2166,8 @@ xfs_alloc_update( | |||
2163 | /* | 2166 | /* |
2164 | * Fill in the new contents and log them. | 2167 | * Fill in the new contents and log them. |
2165 | */ | 2168 | */ |
2166 | INT_SET(rp->ar_startblock, ARCH_CONVERT, bno); | 2169 | rp->ar_startblock = cpu_to_be32(bno); |
2167 | INT_SET(rp->ar_blockcount, ARCH_CONVERT, len); | 2170 | rp->ar_blockcount = cpu_to_be32(len); |
2168 | xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr); | 2171 | xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr); |
2169 | } | 2172 | } |
2170 | /* | 2173 | /* |
@@ -2173,15 +2176,15 @@ xfs_alloc_update( | |||
2173 | * extent in the a.g., which we cache in the a.g. freelist header. | 2176 | * extent in the a.g., which we cache in the a.g. freelist header. |
2174 | */ | 2177 | */ |
2175 | if (cur->bc_btnum == XFS_BTNUM_CNT && | 2178 | if (cur->bc_btnum == XFS_BTNUM_CNT && |
2176 | INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK && | 2179 | be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && |
2177 | ptr == INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 2180 | ptr == be16_to_cpu(block->bb_numrecs)) { |
2178 | xfs_agf_t *agf; /* a.g. freespace header */ | 2181 | xfs_agf_t *agf; /* a.g. freespace header */ |
2179 | xfs_agnumber_t seqno; | 2182 | xfs_agnumber_t seqno; |
2180 | 2183 | ||
2181 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 2184 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
2182 | seqno = INT_GET(agf->agf_seqno, ARCH_CONVERT); | 2185 | seqno = be32_to_cpu(agf->agf_seqno); |
2183 | cur->bc_mp->m_perag[seqno].pagf_longest = len; | 2186 | cur->bc_mp->m_perag[seqno].pagf_longest = len; |
2184 | INT_SET(agf->agf_longest, ARCH_CONVERT, len); | 2187 | agf->agf_longest = cpu_to_be32(len); |
2185 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 2188 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
2186 | XFS_AGF_LONGEST); | 2189 | XFS_AGF_LONGEST); |
2187 | } | 2190 | } |
@@ -2191,8 +2194,8 @@ xfs_alloc_update( | |||
2191 | if (ptr == 1) { | 2194 | if (ptr == 1) { |
2192 | xfs_alloc_key_t key; /* key containing [bno, len] */ | 2195 | xfs_alloc_key_t key; /* key containing [bno, len] */ |
2193 | 2196 | ||
2194 | INT_SET(key.ar_startblock, ARCH_CONVERT, bno); | 2197 | key.ar_startblock = cpu_to_be32(bno); |
2195 | INT_SET(key.ar_blockcount, ARCH_CONVERT, len); | 2198 | key.ar_blockcount = cpu_to_be32(len); |
2196 | if ((error = xfs_alloc_updkey(cur, &key, 1))) | 2199 | if ((error = xfs_alloc_updkey(cur, &key, 1))) |
2197 | return error; | 2200 | return error; |
2198 | } | 2201 | } |
diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h index 5615ebba6a3a..bce81c7a4fdc 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/xfs_alloc_btree.h | |||
@@ -38,14 +38,19 @@ struct xfs_mount; | |||
38 | /* | 38 | /* |
39 | * Data record/key structure | 39 | * Data record/key structure |
40 | */ | 40 | */ |
41 | typedef struct xfs_alloc_rec | 41 | typedef struct xfs_alloc_rec { |
42 | { | 42 | __be32 ar_startblock; /* starting block number */ |
43 | __be32 ar_blockcount; /* count of free blocks */ | ||
44 | } xfs_alloc_rec_t, xfs_alloc_key_t; | ||
45 | |||
46 | typedef struct xfs_alloc_rec_incore { | ||
43 | xfs_agblock_t ar_startblock; /* starting block number */ | 47 | xfs_agblock_t ar_startblock; /* starting block number */ |
44 | xfs_extlen_t ar_blockcount; /* count of free blocks */ | 48 | xfs_extlen_t ar_blockcount; /* count of free blocks */ |
45 | } xfs_alloc_rec_t, xfs_alloc_key_t; | 49 | } xfs_alloc_rec_incore_t; |
46 | 50 | ||
47 | typedef xfs_agblock_t xfs_alloc_ptr_t; /* btree pointer type */ | 51 | /* btree pointer type */ |
48 | /* btree block header type */ | 52 | typedef __be32 xfs_alloc_ptr_t; |
53 | /* btree block header type */ | ||
49 | typedef struct xfs_btree_sblock xfs_alloc_block_t; | 54 | typedef struct xfs_btree_sblock xfs_alloc_block_t; |
50 | 55 | ||
51 | #define XFS_BUF_TO_ALLOC_BLOCK(bp) ((xfs_alloc_block_t *)XFS_BUF_PTR(bp)) | 56 | #define XFS_BUF_TO_ALLOC_BLOCK(bp) ((xfs_alloc_block_t *)XFS_BUF_PTR(bp)) |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 9f635f0ccf08..e415a4698e9c 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -2763,8 +2763,8 @@ xfs_bmap_btree_to_extents( | |||
2763 | ASSERT(ifp->if_flags & XFS_IFEXTENTS); | 2763 | ASSERT(ifp->if_flags & XFS_IFEXTENTS); |
2764 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); | 2764 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); |
2765 | rblock = ifp->if_broot; | 2765 | rblock = ifp->if_broot; |
2766 | ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) == 1); | 2766 | ASSERT(be16_to_cpu(rblock->bb_level) == 1); |
2767 | ASSERT(INT_GET(rblock->bb_numrecs, ARCH_CONVERT) == 1); | 2767 | ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); |
2768 | ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1); | 2768 | ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1); |
2769 | mp = ip->i_mount; | 2769 | mp = ip->i_mount; |
2770 | pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes); | 2770 | pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes); |
@@ -3207,11 +3207,11 @@ xfs_bmap_extents_to_btree( | |||
3207 | * Fill in the root. | 3207 | * Fill in the root. |
3208 | */ | 3208 | */ |
3209 | block = ifp->if_broot; | 3209 | block = ifp->if_broot; |
3210 | INT_SET(block->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); | 3210 | block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); |
3211 | INT_SET(block->bb_level, ARCH_CONVERT, 1); | 3211 | block->bb_level = cpu_to_be16(1); |
3212 | INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); | 3212 | block->bb_numrecs = cpu_to_be16(1); |
3213 | INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLDFSBNO); | 3213 | block->bb_leftsib = cpu_to_be64(NULLDFSBNO); |
3214 | INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLDFSBNO); | 3214 | block->bb_rightsib = cpu_to_be64(NULLDFSBNO); |
3215 | /* | 3215 | /* |
3216 | * Need a cursor. Can't allocate until bb_level is filled in. | 3216 | * Need a cursor. Can't allocate until bb_level is filled in. |
3217 | */ | 3217 | */ |
@@ -3264,10 +3264,10 @@ xfs_bmap_extents_to_btree( | |||
3264 | * Fill in the child block. | 3264 | * Fill in the child block. |
3265 | */ | 3265 | */ |
3266 | ablock = XFS_BUF_TO_BMBT_BLOCK(abp); | 3266 | ablock = XFS_BUF_TO_BMBT_BLOCK(abp); |
3267 | INT_SET(ablock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); | 3267 | ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); |
3268 | ablock->bb_level = 0; | 3268 | ablock->bb_level = 0; |
3269 | INT_SET(ablock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO); | 3269 | ablock->bb_leftsib = cpu_to_be64(NULLDFSBNO); |
3270 | INT_SET(ablock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO); | 3270 | ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO); |
3271 | arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); | 3271 | arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); |
3272 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); | 3272 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
3273 | for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) { | 3273 | for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) { |
@@ -3277,8 +3277,8 @@ xfs_bmap_extents_to_btree( | |||
3277 | arp++; cnt++; | 3277 | arp++; cnt++; |
3278 | } | 3278 | } |
3279 | } | 3279 | } |
3280 | INT_SET(ablock->bb_numrecs, ARCH_CONVERT, cnt); | 3280 | ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); |
3281 | ASSERT(INT_GET(ablock->bb_numrecs, ARCH_CONVERT) == XFS_IFORK_NEXTENTS(ip, whichfork)); | 3281 | ablock->bb_numrecs = cpu_to_be16(cnt); |
3282 | /* | 3282 | /* |
3283 | * Fill in the root key and pointer. | 3283 | * Fill in the root key and pointer. |
3284 | */ | 3284 | */ |
@@ -3292,7 +3292,7 @@ xfs_bmap_extents_to_btree( | |||
3292 | * the root is at the right level. | 3292 | * the root is at the right level. |
3293 | */ | 3293 | */ |
3294 | xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS); | 3294 | xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS); |
3295 | xfs_bmbt_log_recs(cur, abp, 1, INT_GET(ablock->bb_numrecs, ARCH_CONVERT)); | 3295 | xfs_bmbt_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); |
3296 | ASSERT(*curp == NULL); | 3296 | ASSERT(*curp == NULL); |
3297 | *curp = cur; | 3297 | *curp = cur; |
3298 | *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork); | 3298 | *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork); |
@@ -4371,8 +4371,8 @@ xfs_bmap_read_extents( | |||
4371 | /* | 4371 | /* |
4372 | * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. | 4372 | * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. |
4373 | */ | 4373 | */ |
4374 | ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); | 4374 | level = be16_to_cpu(block->bb_level); |
4375 | level = INT_GET(block->bb_level, ARCH_CONVERT); | 4375 | ASSERT(level > 0); |
4376 | pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); | 4376 | pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); |
4377 | ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); | 4377 | ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); |
4378 | ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); | 4378 | ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); |
@@ -4415,7 +4415,7 @@ xfs_bmap_read_extents( | |||
4415 | xfs_extnum_t num_recs; | 4415 | xfs_extnum_t num_recs; |
4416 | 4416 | ||
4417 | 4417 | ||
4418 | num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 4418 | num_recs = be16_to_cpu(block->bb_numrecs); |
4419 | if (unlikely(i + num_recs > room)) { | 4419 | if (unlikely(i + num_recs > room)) { |
4420 | ASSERT(i + num_recs <= room); | 4420 | ASSERT(i + num_recs <= room); |
4421 | xfs_fs_cmn_err(CE_WARN, ip->i_mount, | 4421 | xfs_fs_cmn_err(CE_WARN, ip->i_mount, |
@@ -4432,7 +4432,7 @@ xfs_bmap_read_extents( | |||
4432 | /* | 4432 | /* |
4433 | * Read-ahead the next leaf block, if any. | 4433 | * Read-ahead the next leaf block, if any. |
4434 | */ | 4434 | */ |
4435 | nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); | 4435 | nextbno = be64_to_cpu(block->bb_rightsib); |
4436 | if (nextbno != NULLFSBLOCK) | 4436 | if (nextbno != NULLFSBLOCK) |
4437 | xfs_btree_reada_bufl(mp, nextbno, 1); | 4437 | xfs_btree_reada_bufl(mp, nextbno, 1); |
4438 | /* | 4438 | /* |
@@ -4689,7 +4689,7 @@ xfs_bmapi( | |||
4689 | } | 4689 | } |
4690 | if (wr && *firstblock == NULLFSBLOCK) { | 4690 | if (wr && *firstblock == NULLFSBLOCK) { |
4691 | if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) | 4691 | if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) |
4692 | minleft = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1; | 4692 | minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1; |
4693 | else | 4693 | else |
4694 | minleft = 1; | 4694 | minleft = 1; |
4695 | } else | 4695 | } else |
@@ -5967,10 +5967,10 @@ xfs_check_block( | |||
5967 | xfs_bmbt_ptr_t *pp, *thispa; /* pointer to block address */ | 5967 | xfs_bmbt_ptr_t *pp, *thispa; /* pointer to block address */ |
5968 | xfs_bmbt_key_t *prevp, *keyp; | 5968 | xfs_bmbt_key_t *prevp, *keyp; |
5969 | 5969 | ||
5970 | ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); | 5970 | ASSERT(be16_to_cpu(block->bb_level) > 0); |
5971 | 5971 | ||
5972 | prevp = NULL; | 5972 | prevp = NULL; |
5973 | for( i = 1; i <= INT_GET(block->bb_numrecs, ARCH_CONVERT);i++) { | 5973 | for( i = 1; i <= be16_to_cpu(block->bb_numrecs); i++) { |
5974 | dmxr = mp->m_bmap_dmxr[0]; | 5974 | dmxr = mp->m_bmap_dmxr[0]; |
5975 | 5975 | ||
5976 | if (root) { | 5976 | if (root) { |
@@ -5995,7 +5995,7 @@ xfs_check_block( | |||
5995 | pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, | 5995 | pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, |
5996 | xfs_bmbt, block, i, dmxr); | 5996 | xfs_bmbt, block, i, dmxr); |
5997 | } | 5997 | } |
5998 | for (j = i+1; j <= INT_GET(block->bb_numrecs, ARCH_CONVERT); j++) { | 5998 | for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { |
5999 | if (root) { | 5999 | if (root) { |
6000 | thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz); | 6000 | thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz); |
6001 | } else { | 6001 | } else { |
@@ -6048,8 +6048,8 @@ xfs_bmap_check_leaf_extents( | |||
6048 | /* | 6048 | /* |
6049 | * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. | 6049 | * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. |
6050 | */ | 6050 | */ |
6051 | ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); | 6051 | level = be16_to_cpu(block->bb_level); |
6052 | level = INT_GET(block->bb_level, ARCH_CONVERT); | 6052 | ASSERT(level > 0); |
6053 | xfs_check_block(block, mp, 1, ifp->if_broot_bytes); | 6053 | xfs_check_block(block, mp, 1, ifp->if_broot_bytes); |
6054 | pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); | 6054 | pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); |
6055 | ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); | 6055 | ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); |
@@ -6109,13 +6109,13 @@ xfs_bmap_check_leaf_extents( | |||
6109 | xfs_extnum_t num_recs; | 6109 | xfs_extnum_t num_recs; |
6110 | 6110 | ||
6111 | 6111 | ||
6112 | num_recs = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 6112 | num_recs = be16_to_cpu(block->bb_numrecs); |
6113 | 6113 | ||
6114 | /* | 6114 | /* |
6115 | * Read-ahead the next leaf block, if any. | 6115 | * Read-ahead the next leaf block, if any. |
6116 | */ | 6116 | */ |
6117 | 6117 | ||
6118 | nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); | 6118 | nextbno = be64_to_cpu(block->bb_rightsib); |
6119 | 6119 | ||
6120 | /* | 6120 | /* |
6121 | * Check all the extents to make sure they are OK. | 6121 | * Check all the extents to make sure they are OK. |
@@ -6212,8 +6212,8 @@ xfs_bmap_count_blocks( | |||
6212 | * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. | 6212 | * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. |
6213 | */ | 6213 | */ |
6214 | block = ifp->if_broot; | 6214 | block = ifp->if_broot; |
6215 | ASSERT(INT_GET(block->bb_level, ARCH_CONVERT) > 0); | 6215 | level = be16_to_cpu(block->bb_level); |
6216 | level = INT_GET(block->bb_level, ARCH_CONVERT); | 6216 | ASSERT(level > 0); |
6217 | pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); | 6217 | pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); |
6218 | ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); | 6218 | ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); |
6219 | ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); | 6219 | ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); |
@@ -6258,14 +6258,14 @@ xfs_bmap_count_tree( | |||
6258 | 6258 | ||
6259 | if (--level) { | 6259 | if (--level) { |
6260 | /* Not at node above leafs, count this level of nodes */ | 6260 | /* Not at node above leafs, count this level of nodes */ |
6261 | nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); | 6261 | nextbno = be64_to_cpu(block->bb_rightsib); |
6262 | while (nextbno != NULLFSBLOCK) { | 6262 | while (nextbno != NULLFSBLOCK) { |
6263 | if ((error = xfs_btree_read_bufl(mp, tp, nextbno, | 6263 | if ((error = xfs_btree_read_bufl(mp, tp, nextbno, |
6264 | 0, &nbp, XFS_BMAP_BTREE_REF))) | 6264 | 0, &nbp, XFS_BMAP_BTREE_REF))) |
6265 | return error; | 6265 | return error; |
6266 | *count += 1; | 6266 | *count += 1; |
6267 | nextblock = XFS_BUF_TO_BMBT_BLOCK(nbp); | 6267 | nextblock = XFS_BUF_TO_BMBT_BLOCK(nbp); |
6268 | nextbno = INT_GET(nextblock->bb_rightsib, ARCH_CONVERT); | 6268 | nextbno = be64_to_cpu(nextblock->bb_rightsib); |
6269 | xfs_trans_brelse(tp, nbp); | 6269 | xfs_trans_brelse(tp, nbp); |
6270 | } | 6270 | } |
6271 | 6271 | ||
@@ -6284,8 +6284,8 @@ xfs_bmap_count_tree( | |||
6284 | } else { | 6284 | } else { |
6285 | /* count all level 1 nodes and their leaves */ | 6285 | /* count all level 1 nodes and their leaves */ |
6286 | for (;;) { | 6286 | for (;;) { |
6287 | nextbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); | 6287 | nextbno = be64_to_cpu(block->bb_rightsib); |
6288 | numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 6288 | numrecs = be16_to_cpu(block->bb_numrecs); |
6289 | frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, | 6289 | frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, |
6290 | xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); | 6290 | xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); |
6291 | if (unlikely(xfs_bmap_disk_count_leaves(frp, numrecs, count) < 0)) { | 6291 | if (unlikely(xfs_bmap_disk_count_leaves(frp, numrecs, count) < 0)) { |
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 3bf70eca1638..3f1383d160e8 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c | |||
@@ -366,7 +366,7 @@ xfs_bmbt_delrec( | |||
366 | return 0; | 366 | return 0; |
367 | } | 367 | } |
368 | block = xfs_bmbt_get_block(cur, level, &bp); | 368 | block = xfs_bmbt_get_block(cur, level, &bp); |
369 | numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 369 | numrecs = be16_to_cpu(block->bb_numrecs); |
370 | #ifdef DEBUG | 370 | #ifdef DEBUG |
371 | if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { | 371 | if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { |
372 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 372 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
@@ -411,7 +411,7 @@ xfs_bmbt_delrec( | |||
411 | } | 411 | } |
412 | } | 412 | } |
413 | numrecs--; | 413 | numrecs--; |
414 | INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); | 414 | block->bb_numrecs = cpu_to_be16(numrecs); |
415 | xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS); | 415 | xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS); |
416 | /* | 416 | /* |
417 | * We're at the root level. | 417 | * We're at the root level. |
@@ -447,8 +447,8 @@ xfs_bmbt_delrec( | |||
447 | *stat = 1; | 447 | *stat = 1; |
448 | return 0; | 448 | return 0; |
449 | } | 449 | } |
450 | rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); | 450 | rbno = be64_to_cpu(block->bb_rightsib); |
451 | lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); | 451 | lbno = be64_to_cpu(block->bb_leftsib); |
452 | /* | 452 | /* |
453 | * One child of root, need to get a chance to copy its contents | 453 | * One child of root, need to get a chance to copy its contents |
454 | * into the root and delete it. Can't go up to next level, | 454 | * into the root and delete it. Can't go up to next level, |
@@ -492,15 +492,15 @@ xfs_bmbt_delrec( | |||
492 | goto error0; | 492 | goto error0; |
493 | } | 493 | } |
494 | #endif | 494 | #endif |
495 | bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); | 495 | bno = be64_to_cpu(right->bb_leftsib); |
496 | if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= | 496 | if (be16_to_cpu(right->bb_numrecs) - 1 >= |
497 | XFS_BMAP_BLOCK_IMINRECS(level, cur)) { | 497 | XFS_BMAP_BLOCK_IMINRECS(level, cur)) { |
498 | if ((error = xfs_bmbt_lshift(tcur, level, &i))) { | 498 | if ((error = xfs_bmbt_lshift(tcur, level, &i))) { |
499 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 499 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
500 | goto error0; | 500 | goto error0; |
501 | } | 501 | } |
502 | if (i) { | 502 | if (i) { |
503 | ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= | 503 | ASSERT(be16_to_cpu(block->bb_numrecs) >= |
504 | XFS_BMAP_BLOCK_IMINRECS(level, tcur)); | 504 | XFS_BMAP_BLOCK_IMINRECS(level, tcur)); |
505 | xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); | 505 | xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); |
506 | tcur = NULL; | 506 | tcur = NULL; |
@@ -517,7 +517,7 @@ xfs_bmbt_delrec( | |||
517 | return 0; | 517 | return 0; |
518 | } | 518 | } |
519 | } | 519 | } |
520 | rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); | 520 | rrecs = be16_to_cpu(right->bb_numrecs); |
521 | if (lbno != NULLFSBLOCK) { | 521 | if (lbno != NULLFSBLOCK) { |
522 | i = xfs_btree_firstrec(tcur, level); | 522 | i = xfs_btree_firstrec(tcur, level); |
523 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | 523 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); |
@@ -548,15 +548,15 @@ xfs_bmbt_delrec( | |||
548 | goto error0; | 548 | goto error0; |
549 | } | 549 | } |
550 | #endif | 550 | #endif |
551 | bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); | 551 | bno = be64_to_cpu(left->bb_rightsib); |
552 | if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= | 552 | if (be16_to_cpu(left->bb_numrecs) - 1 >= |
553 | XFS_BMAP_BLOCK_IMINRECS(level, cur)) { | 553 | XFS_BMAP_BLOCK_IMINRECS(level, cur)) { |
554 | if ((error = xfs_bmbt_rshift(tcur, level, &i))) { | 554 | if ((error = xfs_bmbt_rshift(tcur, level, &i))) { |
555 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 555 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
556 | goto error0; | 556 | goto error0; |
557 | } | 557 | } |
558 | if (i) { | 558 | if (i) { |
559 | ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= | 559 | ASSERT(be16_to_cpu(block->bb_numrecs) >= |
560 | XFS_BMAP_BLOCK_IMINRECS(level, tcur)); | 560 | XFS_BMAP_BLOCK_IMINRECS(level, tcur)); |
561 | xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); | 561 | xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); |
562 | tcur = NULL; | 562 | tcur = NULL; |
@@ -567,14 +567,14 @@ xfs_bmbt_delrec( | |||
567 | return 0; | 567 | return 0; |
568 | } | 568 | } |
569 | } | 569 | } |
570 | lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); | 570 | lrecs = be16_to_cpu(left->bb_numrecs); |
571 | } | 571 | } |
572 | xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); | 572 | xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); |
573 | tcur = NULL; | 573 | tcur = NULL; |
574 | mp = cur->bc_mp; | 574 | mp = cur->bc_mp; |
575 | ASSERT(bno != NULLFSBLOCK); | 575 | ASSERT(bno != NULLFSBLOCK); |
576 | if (lbno != NULLFSBLOCK && | 576 | if (lbno != NULLFSBLOCK && |
577 | lrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { | 577 | lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { |
578 | rbno = bno; | 578 | rbno = bno; |
579 | right = block; | 579 | right = block; |
580 | rbp = bp; | 580 | rbp = bp; |
@@ -589,7 +589,7 @@ xfs_bmbt_delrec( | |||
589 | goto error0; | 589 | goto error0; |
590 | } | 590 | } |
591 | } else if (rbno != NULLFSBLOCK && | 591 | } else if (rbno != NULLFSBLOCK && |
592 | rrecs + INT_GET(block->bb_numrecs, ARCH_CONVERT) <= | 592 | rrecs + be16_to_cpu(block->bb_numrecs) <= |
593 | XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { | 593 | XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { |
594 | lbno = bno; | 594 | lbno = bno; |
595 | left = block; | 595 | left = block; |
@@ -604,7 +604,7 @@ xfs_bmbt_delrec( | |||
604 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 604 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
605 | goto error0; | 605 | goto error0; |
606 | } | 606 | } |
607 | lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); | 607 | lrecs = be16_to_cpu(left->bb_numrecs); |
608 | } else { | 608 | } else { |
609 | if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) { | 609 | if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) { |
610 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 610 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
@@ -614,8 +614,8 @@ xfs_bmbt_delrec( | |||
614 | *stat = 1; | 614 | *stat = 1; |
615 | return 0; | 615 | return 0; |
616 | } | 616 | } |
617 | numlrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); | 617 | numlrecs = be16_to_cpu(left->bb_numrecs); |
618 | numrrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); | 618 | numrrecs = be16_to_cpu(right->bb_numrecs); |
619 | if (level > 0) { | 619 | if (level > 0) { |
620 | lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur); | 620 | lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur); |
621 | lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur); | 621 | lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur); |
@@ -639,12 +639,12 @@ xfs_bmbt_delrec( | |||
639 | memcpy(lrp, rrp, numrrecs * sizeof(*lrp)); | 639 | memcpy(lrp, rrp, numrrecs * sizeof(*lrp)); |
640 | xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs); | 640 | xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs); |
641 | } | 641 | } |
642 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, numrrecs); | 642 | be16_add(&left->bb_numrecs, numrrecs); |
643 | left->bb_rightsib = right->bb_rightsib; /* INT_: direct copy */ | 643 | left->bb_rightsib = right->bb_rightsib; |
644 | xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS); | 644 | xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS); |
645 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { | 645 | if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) { |
646 | if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, | 646 | if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, |
647 | INT_GET(left->bb_rightsib, ARCH_CONVERT), | 647 | be64_to_cpu(left->bb_rightsib), |
648 | 0, &rrbp, XFS_BMAP_BTREE_REF))) { | 648 | 0, &rrbp, XFS_BMAP_BTREE_REF))) { |
649 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 649 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
650 | goto error0; | 650 | goto error0; |
@@ -654,7 +654,7 @@ xfs_bmbt_delrec( | |||
654 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 654 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
655 | goto error0; | 655 | goto error0; |
656 | } | 656 | } |
657 | INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); | 657 | rrblock->bb_leftsib = cpu_to_be64(lbno); |
658 | xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB); | 658 | xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB); |
659 | } | 659 | } |
660 | xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1, | 660 | xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1, |
@@ -711,7 +711,7 @@ xfs_bmbt_get_rec( | |||
711 | if ((error = xfs_btree_check_lblock(cur, block, 0, bp))) | 711 | if ((error = xfs_btree_check_lblock(cur, block, 0, bp))) |
712 | return error; | 712 | return error; |
713 | #endif | 713 | #endif |
714 | if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { | 714 | if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) { |
715 | *stat = 0; | 715 | *stat = 0; |
716 | return 0; | 716 | return 0; |
717 | } | 717 | } |
@@ -772,7 +772,7 @@ xfs_bmbt_insrec( | |||
772 | } | 772 | } |
773 | XFS_STATS_INC(xs_bmbt_insrec); | 773 | XFS_STATS_INC(xs_bmbt_insrec); |
774 | block = xfs_bmbt_get_block(cur, level, &bp); | 774 | block = xfs_bmbt_get_block(cur, level, &bp); |
775 | numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 775 | numrecs = be16_to_cpu(block->bb_numrecs); |
776 | #ifdef DEBUG | 776 | #ifdef DEBUG |
777 | if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { | 777 | if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { |
778 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 778 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
@@ -854,7 +854,7 @@ xfs_bmbt_insrec( | |||
854 | } | 854 | } |
855 | } | 855 | } |
856 | } | 856 | } |
857 | numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 857 | numrecs = be16_to_cpu(block->bb_numrecs); |
858 | if (level > 0) { | 858 | if (level > 0) { |
859 | kp = XFS_BMAP_KEY_IADDR(block, 1, cur); | 859 | kp = XFS_BMAP_KEY_IADDR(block, 1, cur); |
860 | pp = XFS_BMAP_PTR_IADDR(block, 1, cur); | 860 | pp = XFS_BMAP_PTR_IADDR(block, 1, cur); |
@@ -881,7 +881,7 @@ xfs_bmbt_insrec( | |||
881 | kp[ptr - 1] = key; | 881 | kp[ptr - 1] = key; |
882 | INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); | 882 | INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); |
883 | numrecs++; | 883 | numrecs++; |
884 | INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); | 884 | block->bb_numrecs = cpu_to_be16(numrecs); |
885 | xfs_bmbt_log_keys(cur, bp, ptr, numrecs); | 885 | xfs_bmbt_log_keys(cur, bp, ptr, numrecs); |
886 | xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs); | 886 | xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs); |
887 | } else { | 887 | } else { |
@@ -890,7 +890,7 @@ xfs_bmbt_insrec( | |||
890 | (numrecs - ptr + 1) * sizeof(*rp)); | 890 | (numrecs - ptr + 1) * sizeof(*rp)); |
891 | rp[ptr - 1] = *recp; | 891 | rp[ptr - 1] = *recp; |
892 | numrecs++; | 892 | numrecs++; |
893 | INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); | 893 | block->bb_numrecs = cpu_to_be16(numrecs); |
894 | xfs_bmbt_log_recs(cur, bp, ptr, numrecs); | 894 | xfs_bmbt_log_recs(cur, bp, ptr, numrecs); |
895 | } | 895 | } |
896 | xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS); | 896 | xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS); |
@@ -955,7 +955,7 @@ xfs_bmbt_killroot( | |||
955 | /* | 955 | /* |
956 | * Give up if the root has multiple children. | 956 | * Give up if the root has multiple children. |
957 | */ | 957 | */ |
958 | if (INT_GET(block->bb_numrecs, ARCH_CONVERT) != 1) { | 958 | if (be16_to_cpu(block->bb_numrecs) != 1) { |
959 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 959 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
960 | return 0; | 960 | return 0; |
961 | } | 961 | } |
@@ -966,37 +966,37 @@ xfs_bmbt_killroot( | |||
966 | */ | 966 | */ |
967 | cbp = cur->bc_bufs[level - 1]; | 967 | cbp = cur->bc_bufs[level - 1]; |
968 | cblock = XFS_BUF_TO_BMBT_BLOCK(cbp); | 968 | cblock = XFS_BUF_TO_BMBT_BLOCK(cbp); |
969 | if (INT_GET(cblock->bb_numrecs, ARCH_CONVERT) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) { | 969 | if (be16_to_cpu(cblock->bb_numrecs) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) { |
970 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 970 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
971 | return 0; | 971 | return 0; |
972 | } | 972 | } |
973 | ASSERT(INT_GET(cblock->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO); | 973 | ASSERT(be64_to_cpu(cblock->bb_leftsib) == NULLDFSBNO); |
974 | ASSERT(INT_GET(cblock->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO); | 974 | ASSERT(be64_to_cpu(cblock->bb_rightsib) == NULLDFSBNO); |
975 | ip = cur->bc_private.b.ip; | 975 | ip = cur->bc_private.b.ip; |
976 | ifp = XFS_IFORK_PTR(ip, cur->bc_private.b.whichfork); | 976 | ifp = XFS_IFORK_PTR(ip, cur->bc_private.b.whichfork); |
977 | ASSERT(XFS_BMAP_BLOCK_IMAXRECS(level, cur) == | 977 | ASSERT(XFS_BMAP_BLOCK_IMAXRECS(level, cur) == |
978 | XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes)); | 978 | XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes)); |
979 | i = (int)(INT_GET(cblock->bb_numrecs, ARCH_CONVERT) - XFS_BMAP_BLOCK_IMAXRECS(level, cur)); | 979 | i = (int)(be16_to_cpu(cblock->bb_numrecs) - XFS_BMAP_BLOCK_IMAXRECS(level, cur)); |
980 | if (i) { | 980 | if (i) { |
981 | xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork); | 981 | xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork); |
982 | block = ifp->if_broot; | 982 | block = ifp->if_broot; |
983 | } | 983 | } |
984 | INT_MOD(block->bb_numrecs, ARCH_CONVERT, i); | 984 | be16_add(&block->bb_numrecs, i); |
985 | ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) == INT_GET(cblock->bb_numrecs, ARCH_CONVERT)); | 985 | ASSERT(block->bb_numrecs == cblock->bb_numrecs); |
986 | kp = XFS_BMAP_KEY_IADDR(block, 1, cur); | 986 | kp = XFS_BMAP_KEY_IADDR(block, 1, cur); |
987 | ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); | 987 | ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); |
988 | memcpy(kp, ckp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*kp)); | 988 | memcpy(kp, ckp, be16_to_cpu(block->bb_numrecs) * sizeof(*kp)); |
989 | pp = XFS_BMAP_PTR_IADDR(block, 1, cur); | 989 | pp = XFS_BMAP_PTR_IADDR(block, 1, cur); |
990 | cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); | 990 | cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); |
991 | #ifdef DEBUG | 991 | #ifdef DEBUG |
992 | for (i = 0; i < INT_GET(cblock->bb_numrecs, ARCH_CONVERT); i++) { | 992 | for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { |
993 | if ((error = xfs_btree_check_lptr(cur, INT_GET(cpp[i], ARCH_CONVERT), level - 1))) { | 993 | if ((error = xfs_btree_check_lptr(cur, INT_GET(cpp[i], ARCH_CONVERT), level - 1))) { |
994 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 994 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
995 | return error; | 995 | return error; |
996 | } | 996 | } |
997 | } | 997 | } |
998 | #endif | 998 | #endif |
999 | memcpy(pp, cpp, INT_GET(block->bb_numrecs, ARCH_CONVERT) * sizeof(*pp)); | 999 | memcpy(pp, cpp, be16_to_cpu(block->bb_numrecs) * sizeof(*pp)); |
1000 | xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1, | 1000 | xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1, |
1001 | cur->bc_private.b.flist, cur->bc_mp); | 1001 | cur->bc_private.b.flist, cur->bc_mp); |
1002 | ip->i_d.di_nblocks--; | 1002 | ip->i_d.di_nblocks--; |
@@ -1004,7 +1004,7 @@ xfs_bmbt_killroot( | |||
1004 | XFS_TRANS_DQ_BCOUNT, -1L); | 1004 | XFS_TRANS_DQ_BCOUNT, -1L); |
1005 | xfs_trans_binval(cur->bc_tp, cbp); | 1005 | xfs_trans_binval(cur->bc_tp, cbp); |
1006 | cur->bc_bufs[level - 1] = NULL; | 1006 | cur->bc_bufs[level - 1] = NULL; |
1007 | INT_MOD(block->bb_level, ARCH_CONVERT, -1); | 1007 | be16_add(&block->bb_level, -1); |
1008 | xfs_trans_log_inode(cur->bc_tp, ip, | 1008 | xfs_trans_log_inode(cur->bc_tp, ip, |
1009 | XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); | 1009 | XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); |
1010 | cur->bc_nlevels--; | 1010 | cur->bc_nlevels--; |
@@ -1160,7 +1160,7 @@ xfs_bmbt_lookup( | |||
1160 | else | 1160 | else |
1161 | krbase = XFS_BMAP_REC_IADDR(block, 1, cur); | 1161 | krbase = XFS_BMAP_REC_IADDR(block, 1, cur); |
1162 | low = 1; | 1162 | low = 1; |
1163 | if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { | 1163 | if (!(high = be16_to_cpu(block->bb_numrecs))) { |
1164 | ASSERT(level == 0); | 1164 | ASSERT(level == 0); |
1165 | cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; | 1165 | cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; |
1166 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 1166 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
@@ -1207,8 +1207,8 @@ xfs_bmbt_lookup( | |||
1207 | * If ge search and we went off the end of the block, but it's | 1207 | * If ge search and we went off the end of the block, but it's |
1208 | * not the last block, we're in the wrong block. | 1208 | * not the last block, we're in the wrong block. |
1209 | */ | 1209 | */ |
1210 | if (dir == XFS_LOOKUP_GE && keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && | 1210 | if (dir == XFS_LOOKUP_GE && keyno > be16_to_cpu(block->bb_numrecs) && |
1211 | INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { | 1211 | be64_to_cpu(block->bb_rightsib) != NULLDFSBNO) { |
1212 | cur->bc_ptrs[0] = keyno; | 1212 | cur->bc_ptrs[0] = keyno; |
1213 | if ((error = xfs_bmbt_increment(cur, 0, &i))) { | 1213 | if ((error = xfs_bmbt_increment(cur, 0, &i))) { |
1214 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 1214 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
@@ -1223,7 +1223,7 @@ xfs_bmbt_lookup( | |||
1223 | else if (dir == XFS_LOOKUP_LE && diff > 0) | 1223 | else if (dir == XFS_LOOKUP_LE && diff > 0) |
1224 | keyno--; | 1224 | keyno--; |
1225 | cur->bc_ptrs[0] = keyno; | 1225 | cur->bc_ptrs[0] = keyno; |
1226 | if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 1226 | if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) { |
1227 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 1227 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
1228 | *stat = 0; | 1228 | *stat = 0; |
1229 | } else { | 1229 | } else { |
@@ -1280,7 +1280,7 @@ xfs_bmbt_lshift( | |||
1280 | return error; | 1280 | return error; |
1281 | } | 1281 | } |
1282 | #endif | 1282 | #endif |
1283 | if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) { | 1283 | if (be64_to_cpu(right->bb_leftsib) == NULLDFSBNO) { |
1284 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 1284 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
1285 | *stat = 0; | 1285 | *stat = 0; |
1286 | return 0; | 1286 | return 0; |
@@ -1291,7 +1291,7 @@ xfs_bmbt_lshift( | |||
1291 | return 0; | 1291 | return 0; |
1292 | } | 1292 | } |
1293 | mp = cur->bc_mp; | 1293 | mp = cur->bc_mp; |
1294 | if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, | 1294 | if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(right->bb_leftsib), 0, |
1295 | &lbp, XFS_BMAP_BTREE_REF))) { | 1295 | &lbp, XFS_BMAP_BTREE_REF))) { |
1296 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 1296 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
1297 | return error; | 1297 | return error; |
@@ -1301,12 +1301,12 @@ xfs_bmbt_lshift( | |||
1301 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 1301 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
1302 | return error; | 1302 | return error; |
1303 | } | 1303 | } |
1304 | if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { | 1304 | if (be16_to_cpu(left->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { |
1305 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 1305 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
1306 | *stat = 0; | 1306 | *stat = 0; |
1307 | return 0; | 1307 | return 0; |
1308 | } | 1308 | } |
1309 | lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; | 1309 | lrecs = be16_to_cpu(left->bb_numrecs) + 1; |
1310 | if (level > 0) { | 1310 | if (level > 0) { |
1311 | lkp = XFS_BMAP_KEY_IADDR(left, lrecs, cur); | 1311 | lkp = XFS_BMAP_KEY_IADDR(left, lrecs, cur); |
1312 | rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); | 1312 | rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); |
@@ -1328,7 +1328,7 @@ xfs_bmbt_lshift( | |||
1328 | *lrp = *rrp; | 1328 | *lrp = *rrp; |
1329 | xfs_bmbt_log_recs(cur, lbp, lrecs, lrecs); | 1329 | xfs_bmbt_log_recs(cur, lbp, lrecs, lrecs); |
1330 | } | 1330 | } |
1331 | INT_SET(left->bb_numrecs, ARCH_CONVERT, lrecs); | 1331 | left->bb_numrecs = cpu_to_be16(lrecs); |
1332 | xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); | 1332 | xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); |
1333 | #ifdef DEBUG | 1333 | #ifdef DEBUG |
1334 | if (level > 0) | 1334 | if (level > 0) |
@@ -1336,8 +1336,8 @@ xfs_bmbt_lshift( | |||
1336 | else | 1336 | else |
1337 | xfs_btree_check_rec(XFS_BTNUM_BMAP, lrp - 1, lrp); | 1337 | xfs_btree_check_rec(XFS_BTNUM_BMAP, lrp - 1, lrp); |
1338 | #endif | 1338 | #endif |
1339 | rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; | 1339 | rrecs = be16_to_cpu(right->bb_numrecs) - 1; |
1340 | INT_SET(right->bb_numrecs, ARCH_CONVERT, rrecs); | 1340 | right->bb_numrecs = cpu_to_be16(rrecs); |
1341 | xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS); | 1341 | xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS); |
1342 | if (level > 0) { | 1342 | if (level > 0) { |
1343 | #ifdef DEBUG | 1343 | #ifdef DEBUG |
@@ -1414,18 +1414,18 @@ xfs_bmbt_rshift( | |||
1414 | return error; | 1414 | return error; |
1415 | } | 1415 | } |
1416 | #endif | 1416 | #endif |
1417 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) { | 1417 | if (be64_to_cpu(left->bb_rightsib) == NULLDFSBNO) { |
1418 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 1418 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
1419 | *stat = 0; | 1419 | *stat = 0; |
1420 | return 0; | 1420 | return 0; |
1421 | } | 1421 | } |
1422 | if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { | 1422 | if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) { |
1423 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 1423 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
1424 | *stat = 0; | 1424 | *stat = 0; |
1425 | return 0; | 1425 | return 0; |
1426 | } | 1426 | } |
1427 | mp = cur->bc_mp; | 1427 | mp = cur->bc_mp; |
1428 | if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, | 1428 | if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(left->bb_rightsib), 0, |
1429 | &rbp, XFS_BMAP_BTREE_REF))) { | 1429 | &rbp, XFS_BMAP_BTREE_REF))) { |
1430 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 1430 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
1431 | return error; | 1431 | return error; |
@@ -1435,26 +1435,26 @@ xfs_bmbt_rshift( | |||
1435 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 1435 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
1436 | return error; | 1436 | return error; |
1437 | } | 1437 | } |
1438 | if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { | 1438 | if (be16_to_cpu(right->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { |
1439 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 1439 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
1440 | *stat = 0; | 1440 | *stat = 0; |
1441 | return 0; | 1441 | return 0; |
1442 | } | 1442 | } |
1443 | if (level > 0) { | 1443 | if (level > 0) { |
1444 | lkp = XFS_BMAP_KEY_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1444 | lkp = XFS_BMAP_KEY_IADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1445 | lpp = XFS_BMAP_PTR_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1445 | lpp = XFS_BMAP_PTR_IADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1446 | rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); | 1446 | rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); |
1447 | rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); | 1447 | rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); |
1448 | #ifdef DEBUG | 1448 | #ifdef DEBUG |
1449 | for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { | 1449 | for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) { |
1450 | if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) { | 1450 | if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) { |
1451 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 1451 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
1452 | return error; | 1452 | return error; |
1453 | } | 1453 | } |
1454 | } | 1454 | } |
1455 | #endif | 1455 | #endif |
1456 | memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); | 1456 | memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1457 | memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); | 1457 | memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1458 | #ifdef DEBUG | 1458 | #ifdef DEBUG |
1459 | if ((error = xfs_btree_check_lptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) { | 1459 | if ((error = xfs_btree_check_lptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) { |
1460 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 1460 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
@@ -1463,21 +1463,21 @@ xfs_bmbt_rshift( | |||
1463 | #endif | 1463 | #endif |
1464 | *rkp = *lkp; | 1464 | *rkp = *lkp; |
1465 | *rpp = *lpp; /* INT_: direct copy */ | 1465 | *rpp = *lpp; /* INT_: direct copy */ |
1466 | xfs_bmbt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1466 | xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1467 | xfs_bmbt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1467 | xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1468 | } else { | 1468 | } else { |
1469 | lrp = XFS_BMAP_REC_IADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1469 | lrp = XFS_BMAP_REC_IADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1470 | rrp = XFS_BMAP_REC_IADDR(right, 1, cur); | 1470 | rrp = XFS_BMAP_REC_IADDR(right, 1, cur); |
1471 | memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1471 | memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1472 | *rrp = *lrp; | 1472 | *rrp = *lrp; |
1473 | xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1473 | xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1474 | INT_SET(key.br_startoff, ARCH_CONVERT, | 1474 | INT_SET(key.br_startoff, ARCH_CONVERT, |
1475 | xfs_bmbt_disk_get_startoff(rrp)); | 1475 | xfs_bmbt_disk_get_startoff(rrp)); |
1476 | rkp = &key; | 1476 | rkp = &key; |
1477 | } | 1477 | } |
1478 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); | 1478 | be16_add(&left->bb_numrecs, -1); |
1479 | xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); | 1479 | xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); |
1480 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); | 1480 | be16_add(&right->bb_numrecs, 1); |
1481 | #ifdef DEBUG | 1481 | #ifdef DEBUG |
1482 | if (level > 0) | 1482 | if (level > 0) |
1483 | xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1); | 1483 | xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1); |
@@ -1608,47 +1608,47 @@ xfs_bmbt_split( | |||
1608 | return error; | 1608 | return error; |
1609 | } | 1609 | } |
1610 | #endif | 1610 | #endif |
1611 | INT_SET(right->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); | 1611 | right->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); |
1612 | right->bb_level = left->bb_level; /* INT_: direct copy */ | 1612 | right->bb_level = left->bb_level; |
1613 | INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); | 1613 | right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); |
1614 | if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && | 1614 | if ((be16_to_cpu(left->bb_numrecs) & 1) && |
1615 | cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) | 1615 | cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) |
1616 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); | 1616 | be16_add(&right->bb_numrecs, 1); |
1617 | i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; | 1617 | i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; |
1618 | if (level > 0) { | 1618 | if (level > 0) { |
1619 | lkp = XFS_BMAP_KEY_IADDR(left, i, cur); | 1619 | lkp = XFS_BMAP_KEY_IADDR(left, i, cur); |
1620 | lpp = XFS_BMAP_PTR_IADDR(left, i, cur); | 1620 | lpp = XFS_BMAP_PTR_IADDR(left, i, cur); |
1621 | rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); | 1621 | rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); |
1622 | rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); | 1622 | rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); |
1623 | #ifdef DEBUG | 1623 | #ifdef DEBUG |
1624 | for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { | 1624 | for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { |
1625 | if ((error = xfs_btree_check_lptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) { | 1625 | if ((error = xfs_btree_check_lptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) { |
1626 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 1626 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
1627 | return error; | 1627 | return error; |
1628 | } | 1628 | } |
1629 | } | 1629 | } |
1630 | #endif | 1630 | #endif |
1631 | memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); | 1631 | memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1632 | memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); | 1632 | memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1633 | xfs_bmbt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1633 | xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1634 | xfs_bmbt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1634 | xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1635 | keyp->br_startoff = INT_GET(rkp->br_startoff, ARCH_CONVERT); | 1635 | keyp->br_startoff = INT_GET(rkp->br_startoff, ARCH_CONVERT); |
1636 | } else { | 1636 | } else { |
1637 | lrp = XFS_BMAP_REC_IADDR(left, i, cur); | 1637 | lrp = XFS_BMAP_REC_IADDR(left, i, cur); |
1638 | rrp = XFS_BMAP_REC_IADDR(right, 1, cur); | 1638 | rrp = XFS_BMAP_REC_IADDR(right, 1, cur); |
1639 | memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1639 | memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1640 | xfs_bmbt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1640 | xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1641 | keyp->br_startoff = xfs_bmbt_disk_get_startoff(rrp); | 1641 | keyp->br_startoff = xfs_bmbt_disk_get_startoff(rrp); |
1642 | } | 1642 | } |
1643 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); | 1643 | be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); |
1644 | right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ | 1644 | right->bb_rightsib = left->bb_rightsib; |
1645 | INT_SET(left->bb_rightsib, ARCH_CONVERT, args.fsbno); | 1645 | left->bb_rightsib = cpu_to_be64(args.fsbno); |
1646 | INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); | 1646 | right->bb_leftsib = cpu_to_be64(lbno); |
1647 | xfs_bmbt_log_block(cur, rbp, XFS_BB_ALL_BITS); | 1647 | xfs_bmbt_log_block(cur, rbp, XFS_BB_ALL_BITS); |
1648 | xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); | 1648 | xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); |
1649 | if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { | 1649 | if (be64_to_cpu(right->bb_rightsib) != NULLDFSBNO) { |
1650 | if ((error = xfs_btree_read_bufl(args.mp, args.tp, | 1650 | if ((error = xfs_btree_read_bufl(args.mp, args.tp, |
1651 | INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, &rrbp, | 1651 | be64_to_cpu(right->bb_rightsib), 0, &rrbp, |
1652 | XFS_BMAP_BTREE_REF))) { | 1652 | XFS_BMAP_BTREE_REF))) { |
1653 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 1653 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
1654 | return error; | 1654 | return error; |
@@ -1658,12 +1658,12 @@ xfs_bmbt_split( | |||
1658 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 1658 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
1659 | return error; | 1659 | return error; |
1660 | } | 1660 | } |
1661 | INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, args.fsbno); | 1661 | rrblock->bb_leftsib = cpu_to_be64(args.fsbno); |
1662 | xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB); | 1662 | xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB); |
1663 | } | 1663 | } |
1664 | if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { | 1664 | if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) { |
1665 | xfs_btree_setbuf(cur, level, rbp); | 1665 | xfs_btree_setbuf(cur, level, rbp); |
1666 | cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); | 1666 | cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs); |
1667 | } | 1667 | } |
1668 | if (level + 1 < cur->bc_nlevels) { | 1668 | if (level + 1 < cur->bc_nlevels) { |
1669 | if ((error = xfs_btree_dup_cursor(cur, curp))) { | 1669 | if ((error = xfs_btree_dup_cursor(cur, curp))) { |
@@ -1735,18 +1735,18 @@ xfs_bmdr_to_bmbt( | |||
1735 | xfs_bmbt_key_t *tkp; | 1735 | xfs_bmbt_key_t *tkp; |
1736 | xfs_bmbt_ptr_t *tpp; | 1736 | xfs_bmbt_ptr_t *tpp; |
1737 | 1737 | ||
1738 | INT_SET(rblock->bb_magic, ARCH_CONVERT, XFS_BMAP_MAGIC); | 1738 | rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); |
1739 | rblock->bb_level = dblock->bb_level; /* both in on-disk format */ | 1739 | rblock->bb_level = dblock->bb_level; |
1740 | ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) > 0); | 1740 | ASSERT(be16_to_cpu(rblock->bb_level) > 0); |
1741 | rblock->bb_numrecs = dblock->bb_numrecs;/* both in on-disk format */ | 1741 | rblock->bb_numrecs = dblock->bb_numrecs; |
1742 | INT_SET(rblock->bb_leftsib, ARCH_CONVERT, NULLDFSBNO); | 1742 | rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO); |
1743 | INT_SET(rblock->bb_rightsib, ARCH_CONVERT, NULLDFSBNO); | 1743 | rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO); |
1744 | dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0); | 1744 | dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0); |
1745 | fkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); | 1745 | fkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); |
1746 | tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); | 1746 | tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); |
1747 | fpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); | 1747 | fpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); |
1748 | tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); | 1748 | tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); |
1749 | dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT); | 1749 | dmxr = be16_to_cpu(dblock->bb_numrecs); |
1750 | memcpy(tkp, fkp, sizeof(*fkp) * dmxr); | 1750 | memcpy(tkp, fkp, sizeof(*fkp) * dmxr); |
1751 | memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */ | 1751 | memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */ |
1752 | } | 1752 | } |
@@ -1789,7 +1789,7 @@ xfs_bmbt_decrement( | |||
1789 | return error; | 1789 | return error; |
1790 | } | 1790 | } |
1791 | #endif | 1791 | #endif |
1792 | if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) { | 1792 | if (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO) { |
1793 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 1793 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
1794 | *stat = 0; | 1794 | *stat = 0; |
1795 | return 0; | 1795 | return 0; |
@@ -1821,7 +1821,7 @@ xfs_bmbt_decrement( | |||
1821 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 1821 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
1822 | return error; | 1822 | return error; |
1823 | } | 1823 | } |
1824 | cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 1824 | cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs); |
1825 | } | 1825 | } |
1826 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 1826 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
1827 | *stat = 1; | 1827 | *stat = 1; |
@@ -2107,12 +2107,12 @@ xfs_bmbt_increment( | |||
2107 | return error; | 2107 | return error; |
2108 | } | 2108 | } |
2109 | #endif | 2109 | #endif |
2110 | if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 2110 | if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) { |
2111 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 2111 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
2112 | *stat = 1; | 2112 | *stat = 1; |
2113 | return 0; | 2113 | return 0; |
2114 | } | 2114 | } |
2115 | if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) { | 2115 | if (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO) { |
2116 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 2116 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
2117 | *stat = 0; | 2117 | *stat = 0; |
2118 | return 0; | 2118 | return 0; |
@@ -2125,7 +2125,7 @@ xfs_bmbt_increment( | |||
2125 | return error; | 2125 | return error; |
2126 | } | 2126 | } |
2127 | #endif | 2127 | #endif |
2128 | if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 2128 | if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) |
2129 | break; | 2129 | break; |
2130 | if (lev < cur->bc_nlevels - 1) | 2130 | if (lev < cur->bc_nlevels - 1) |
2131 | xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); | 2131 | xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); |
@@ -2387,23 +2387,23 @@ xfs_bmbt_newroot( | |||
2387 | bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0); | 2387 | bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0); |
2388 | cblock = XFS_BUF_TO_BMBT_BLOCK(bp); | 2388 | cblock = XFS_BUF_TO_BMBT_BLOCK(bp); |
2389 | *cblock = *block; | 2389 | *cblock = *block; |
2390 | INT_MOD(block->bb_level, ARCH_CONVERT, +1); | 2390 | be16_add(&block->bb_level, 1); |
2391 | INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); | 2391 | block->bb_numrecs = cpu_to_be16(1); |
2392 | cur->bc_nlevels++; | 2392 | cur->bc_nlevels++; |
2393 | cur->bc_ptrs[level + 1] = 1; | 2393 | cur->bc_ptrs[level + 1] = 1; |
2394 | kp = XFS_BMAP_KEY_IADDR(block, 1, cur); | 2394 | kp = XFS_BMAP_KEY_IADDR(block, 1, cur); |
2395 | ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); | 2395 | ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); |
2396 | memcpy(ckp, kp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*kp)); | 2396 | memcpy(ckp, kp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*kp)); |
2397 | cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); | 2397 | cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); |
2398 | #ifdef DEBUG | 2398 | #ifdef DEBUG |
2399 | for (i = 0; i < INT_GET(cblock->bb_numrecs, ARCH_CONVERT); i++) { | 2399 | for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { |
2400 | if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) { | 2400 | if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) { |
2401 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 2401 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); |
2402 | return error; | 2402 | return error; |
2403 | } | 2403 | } |
2404 | } | 2404 | } |
2405 | #endif | 2405 | #endif |
2406 | memcpy(cpp, pp, INT_GET(cblock->bb_numrecs, ARCH_CONVERT) * sizeof(*pp)); | 2406 | memcpy(cpp, pp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*pp)); |
2407 | #ifdef DEBUG | 2407 | #ifdef DEBUG |
2408 | if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)args.fsbno, | 2408 | if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)args.fsbno, |
2409 | level))) { | 2409 | level))) { |
@@ -2412,7 +2412,7 @@ xfs_bmbt_newroot( | |||
2412 | } | 2412 | } |
2413 | #endif | 2413 | #endif |
2414 | INT_SET(*pp, ARCH_CONVERT, args.fsbno); | 2414 | INT_SET(*pp, ARCH_CONVERT, args.fsbno); |
2415 | xfs_iroot_realloc(cur->bc_private.b.ip, 1 - INT_GET(cblock->bb_numrecs, ARCH_CONVERT), | 2415 | xfs_iroot_realloc(cur->bc_private.b.ip, 1 - be16_to_cpu(cblock->bb_numrecs), |
2416 | cur->bc_private.b.whichfork); | 2416 | cur->bc_private.b.whichfork); |
2417 | xfs_btree_setbuf(cur, level, bp); | 2417 | xfs_btree_setbuf(cur, level, bp); |
2418 | /* | 2418 | /* |
@@ -2420,8 +2420,8 @@ xfs_bmbt_newroot( | |||
2420 | * the root is at the right level. | 2420 | * the root is at the right level. |
2421 | */ | 2421 | */ |
2422 | xfs_bmbt_log_block(cur, bp, XFS_BB_ALL_BITS); | 2422 | xfs_bmbt_log_block(cur, bp, XFS_BB_ALL_BITS); |
2423 | xfs_bmbt_log_keys(cur, bp, 1, INT_GET(cblock->bb_numrecs, ARCH_CONVERT)); | 2423 | xfs_bmbt_log_keys(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs)); |
2424 | xfs_bmbt_log_ptrs(cur, bp, 1, INT_GET(cblock->bb_numrecs, ARCH_CONVERT)); | 2424 | xfs_bmbt_log_ptrs(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs)); |
2425 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 2425 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
2426 | *logflags |= | 2426 | *logflags |= |
2427 | XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork); | 2427 | XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork); |
@@ -2689,18 +2689,18 @@ xfs_bmbt_to_bmdr( | |||
2689 | xfs_bmbt_key_t *tkp; | 2689 | xfs_bmbt_key_t *tkp; |
2690 | xfs_bmbt_ptr_t *tpp; | 2690 | xfs_bmbt_ptr_t *tpp; |
2691 | 2691 | ||
2692 | ASSERT(INT_GET(rblock->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC); | 2692 | ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC); |
2693 | ASSERT(INT_GET(rblock->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO); | 2693 | ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO); |
2694 | ASSERT(INT_GET(rblock->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO); | 2694 | ASSERT(be64_to_cpu(rblock->bb_rightsib) == NULLDFSBNO); |
2695 | ASSERT(INT_GET(rblock->bb_level, ARCH_CONVERT) > 0); | 2695 | ASSERT(be16_to_cpu(rblock->bb_level) > 0); |
2696 | dblock->bb_level = rblock->bb_level; /* both in on-disk format */ | 2696 | dblock->bb_level = rblock->bb_level; |
2697 | dblock->bb_numrecs = rblock->bb_numrecs;/* both in on-disk format */ | 2697 | dblock->bb_numrecs = rblock->bb_numrecs; |
2698 | dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0); | 2698 | dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0); |
2699 | fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); | 2699 | fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); |
2700 | tkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); | 2700 | tkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); |
2701 | fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); | 2701 | fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); |
2702 | tpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); | 2702 | tpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); |
2703 | dmxr = INT_GET(dblock->bb_numrecs, ARCH_CONVERT); | 2703 | dmxr = be16_to_cpu(dblock->bb_numrecs); |
2704 | memcpy(tkp, fkp, sizeof(*fkp) * dmxr); | 2704 | memcpy(tkp, fkp, sizeof(*fkp) * dmxr); |
2705 | memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */ | 2705 | memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */ |
2706 | } | 2706 | } |
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 7478b1b1aa2b..e095a2d344ae 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h | |||
@@ -28,10 +28,9 @@ struct xfs_inode; | |||
28 | /* | 28 | /* |
29 | * Bmap root header, on-disk form only. | 29 | * Bmap root header, on-disk form only. |
30 | */ | 30 | */ |
31 | typedef struct xfs_bmdr_block | 31 | typedef struct xfs_bmdr_block { |
32 | { | 32 | __be16 bb_level; /* 0 is a leaf */ |
33 | __uint16_t bb_level; /* 0 is a leaf */ | 33 | __be16 bb_numrecs; /* current # of data records */ |
34 | __uint16_t bb_numrecs; /* current # of data records */ | ||
35 | } xfs_bmdr_block_t; | 34 | } xfs_bmdr_block_t; |
36 | 35 | ||
37 | /* | 36 | /* |
@@ -212,36 +211,36 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; | |||
212 | 211 | ||
213 | #define XFS_BMAP_REC_DADDR(bb,i,cur) \ | 212 | #define XFS_BMAP_REC_DADDR(bb,i,cur) \ |
214 | (XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_DSIZE( \ | 213 | (XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_DSIZE( \ |
215 | INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ | 214 | be16_to_cpu((bb)->bb_level), cur), \ |
216 | xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ | 215 | xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ |
217 | INT_GET((bb)->bb_level, ARCH_CONVERT), cur))) | 216 | be16_to_cpu((bb)->bb_level), cur))) |
218 | #define XFS_BMAP_REC_IADDR(bb,i,cur) \ | 217 | #define XFS_BMAP_REC_IADDR(bb,i,cur) \ |
219 | (XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_ISIZE( \ | 218 | (XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_ISIZE( \ |
220 | INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ | 219 | be16_to_cpu((bb)->bb_level), cur), \ |
221 | xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ | 220 | xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ |
222 | INT_GET((bb)->bb_level, ARCH_CONVERT), cur))) | 221 | be16_to_cpu((bb)->bb_level), cur))) |
223 | 222 | ||
224 | #define XFS_BMAP_KEY_DADDR(bb,i,cur) \ | 223 | #define XFS_BMAP_KEY_DADDR(bb,i,cur) \ |
225 | (XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_DSIZE( \ | 224 | (XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_DSIZE( \ |
226 | INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ | 225 | be16_to_cpu((bb)->bb_level), cur), \ |
227 | xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ | 226 | xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ |
228 | INT_GET((bb)->bb_level, ARCH_CONVERT), cur))) | 227 | be16_to_cpu((bb)->bb_level), cur))) |
229 | #define XFS_BMAP_KEY_IADDR(bb,i,cur) \ | 228 | #define XFS_BMAP_KEY_IADDR(bb,i,cur) \ |
230 | (XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_ISIZE( \ | 229 | (XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_ISIZE( \ |
231 | INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ | 230 | be16_to_cpu((bb)->bb_level), cur), \ |
232 | xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ | 231 | xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ |
233 | INT_GET((bb)->bb_level, ARCH_CONVERT), cur))) | 232 | be16_to_cpu((bb)->bb_level), cur))) |
234 | 233 | ||
235 | #define XFS_BMAP_PTR_DADDR(bb,i,cur) \ | 234 | #define XFS_BMAP_PTR_DADDR(bb,i,cur) \ |
236 | (XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_DSIZE( \ | 235 | (XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_DSIZE( \ |
237 | INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ | 236 | be16_to_cpu((bb)->bb_level), cur), \ |
238 | xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ | 237 | xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ |
239 | INT_GET((bb)->bb_level, ARCH_CONVERT), cur))) | 238 | be16_to_cpu((bb)->bb_level), cur))) |
240 | #define XFS_BMAP_PTR_IADDR(bb,i,cur) \ | 239 | #define XFS_BMAP_PTR_IADDR(bb,i,cur) \ |
241 | (XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_ISIZE( \ | 240 | (XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_ISIZE( \ |
242 | INT_GET((bb)->bb_level, ARCH_CONVERT), cur), \ | 241 | be16_to_cpu((bb)->bb_level), cur), \ |
243 | xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ | 242 | xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ |
244 | INT_GET((bb)->bb_level, ARCH_CONVERT), cur))) | 243 | be16_to_cpu((bb)->bb_level), cur))) |
245 | 244 | ||
246 | /* | 245 | /* |
247 | * These are to be used when we know the size of the block and | 246 | * These are to be used when we know the size of the block and |
@@ -254,7 +253,7 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; | |||
254 | #define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) \ | 253 | #define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) \ |
255 | (XFS_BTREE_PTR_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz))) | 254 | (XFS_BTREE_PTR_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz))) |
256 | 255 | ||
257 | #define XFS_BMAP_BROOT_NUMRECS(bb) INT_GET((bb)->bb_numrecs, ARCH_CONVERT) | 256 | #define XFS_BMAP_BROOT_NUMRECS(bb) be16_to_cpu((bb)->bb_numrecs) |
258 | #define XFS_BMAP_BROOT_MAXRECS(sz) XFS_BTREE_BLOCK_MAXRECS(sz,xfs_bmbt,0) | 257 | #define XFS_BMAP_BROOT_MAXRECS(sz) XFS_BTREE_BLOCK_MAXRECS(sz,xfs_bmbt,0) |
259 | 258 | ||
260 | #define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \ | 259 | #define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \ |
@@ -262,7 +261,7 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; | |||
262 | ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)))) | 261 | ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)))) |
263 | 262 | ||
264 | #define XFS_BMAP_BROOT_SPACE(bb) \ | 263 | #define XFS_BMAP_BROOT_SPACE(bb) \ |
265 | (XFS_BMAP_BROOT_SPACE_CALC(INT_GET((bb)->bb_numrecs, ARCH_CONVERT))) | 264 | (XFS_BMAP_BROOT_SPACE_CALC(be16_to_cpu((bb)->bb_numrecs))) |
266 | #define XFS_BMDR_SPACE_CALC(nrecs) \ | 265 | #define XFS_BMDR_SPACE_CALC(nrecs) \ |
267 | (int)(sizeof(xfs_bmdr_block_t) + \ | 266 | (int)(sizeof(xfs_bmdr_block_t) + \ |
268 | ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)))) | 267 | ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)))) |
@@ -273,11 +272,10 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; | |||
273 | #define XFS_BM_MAXLEVELS(mp,w) ((mp)->m_bm_maxlevels[(w)]) | 272 | #define XFS_BM_MAXLEVELS(mp,w) ((mp)->m_bm_maxlevels[(w)]) |
274 | 273 | ||
275 | #define XFS_BMAP_SANITY_CHECK(mp,bb,level) \ | 274 | #define XFS_BMAP_SANITY_CHECK(mp,bb,level) \ |
276 | (INT_GET((bb)->bb_magic, ARCH_CONVERT) == XFS_BMAP_MAGIC && \ | 275 | (be32_to_cpu((bb)->bb_magic) == XFS_BMAP_MAGIC && \ |
277 | INT_GET((bb)->bb_level, ARCH_CONVERT) == level && \ | 276 | be16_to_cpu((bb)->bb_level) == level && \ |
278 | INT_GET((bb)->bb_numrecs, ARCH_CONVERT) > 0 && \ | 277 | be16_to_cpu((bb)->bb_numrecs) > 0 && \ |
279 | INT_GET((bb)->bb_numrecs, ARCH_CONVERT) <= \ | 278 | be16_to_cpu((bb)->bb_numrecs) <= (mp)->m_bmap_dmxr[(level) != 0]) |
280 | (mp)->m_bmap_dmxr[(level) != 0]) | ||
281 | 279 | ||
282 | 280 | ||
283 | #ifdef __KERNEL__ | 281 | #ifdef __KERNEL__ |
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 9de5a1f312c0..52d5d095fc35 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c | |||
@@ -90,11 +90,14 @@ xfs_btree_maxrecs( | |||
90 | switch (cur->bc_btnum) { | 90 | switch (cur->bc_btnum) { |
91 | case XFS_BTNUM_BNO: | 91 | case XFS_BTNUM_BNO: |
92 | case XFS_BTNUM_CNT: | 92 | case XFS_BTNUM_CNT: |
93 | return (int)XFS_ALLOC_BLOCK_MAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur); | 93 | return (int)XFS_ALLOC_BLOCK_MAXRECS( |
94 | be16_to_cpu(block->bb_h.bb_level), cur); | ||
94 | case XFS_BTNUM_BMAP: | 95 | case XFS_BTNUM_BMAP: |
95 | return (int)XFS_BMAP_BLOCK_IMAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur); | 96 | return (int)XFS_BMAP_BLOCK_IMAXRECS( |
97 | be16_to_cpu(block->bb_h.bb_level), cur); | ||
96 | case XFS_BTNUM_INO: | 98 | case XFS_BTNUM_INO: |
97 | return (int)XFS_INOBT_BLOCK_MAXRECS(INT_GET(block->bb_h.bb_level, ARCH_CONVERT), cur); | 99 | return (int)XFS_INOBT_BLOCK_MAXRECS( |
100 | be16_to_cpu(block->bb_h.bb_level), cur); | ||
98 | default: | 101 | default: |
99 | ASSERT(0); | 102 | ASSERT(0); |
100 | return 0; | 103 | return 0; |
@@ -140,7 +143,7 @@ xfs_btree_check_key( | |||
140 | 143 | ||
141 | k1 = ak1; | 144 | k1 = ak1; |
142 | k2 = ak2; | 145 | k2 = ak2; |
143 | ASSERT(INT_GET(k1->ar_startblock, ARCH_CONVERT) < INT_GET(k2->ar_startblock, ARCH_CONVERT)); | 146 | ASSERT(be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock)); |
144 | break; | 147 | break; |
145 | } | 148 | } |
146 | case XFS_BTNUM_CNT: { | 149 | case XFS_BTNUM_CNT: { |
@@ -149,9 +152,9 @@ xfs_btree_check_key( | |||
149 | 152 | ||
150 | k1 = ak1; | 153 | k1 = ak1; |
151 | k2 = ak2; | 154 | k2 = ak2; |
152 | ASSERT(INT_GET(k1->ar_blockcount, ARCH_CONVERT) < INT_GET(k2->ar_blockcount, ARCH_CONVERT) || | 155 | ASSERT(be32_to_cpu(k1->ar_blockcount) < be32_to_cpu(k2->ar_blockcount) || |
153 | (INT_GET(k1->ar_blockcount, ARCH_CONVERT) == INT_GET(k2->ar_blockcount, ARCH_CONVERT) && | 156 | (k1->ar_blockcount == k2->ar_blockcount && |
154 | INT_GET(k1->ar_startblock, ARCH_CONVERT) < INT_GET(k2->ar_startblock, ARCH_CONVERT))); | 157 | be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock))); |
155 | break; | 158 | break; |
156 | } | 159 | } |
157 | case XFS_BTNUM_BMAP: { | 160 | case XFS_BTNUM_BMAP: { |
@@ -194,16 +197,16 @@ xfs_btree_check_lblock( | |||
194 | 197 | ||
195 | mp = cur->bc_mp; | 198 | mp = cur->bc_mp; |
196 | lblock_ok = | 199 | lblock_ok = |
197 | INT_GET(block->bb_magic, ARCH_CONVERT) == xfs_magics[cur->bc_btnum] && | 200 | be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] && |
198 | INT_GET(block->bb_level, ARCH_CONVERT) == level && | 201 | be16_to_cpu(block->bb_level) == level && |
199 | INT_GET(block->bb_numrecs, ARCH_CONVERT) <= | 202 | be16_to_cpu(block->bb_numrecs) <= |
200 | xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && | 203 | xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && |
201 | block->bb_leftsib && | 204 | block->bb_leftsib && |
202 | (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLDFSBNO || | 205 | (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO || |
203 | XFS_FSB_SANITY_CHECK(mp, INT_GET(block->bb_leftsib, ARCH_CONVERT))) && | 206 | XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_leftsib))) && |
204 | block->bb_rightsib && | 207 | block->bb_rightsib && |
205 | (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLDFSBNO || | 208 | (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO || |
206 | XFS_FSB_SANITY_CHECK(mp, INT_GET(block->bb_rightsib, ARCH_CONVERT))); | 209 | XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_rightsib))); |
207 | if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK, | 210 | if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK, |
208 | XFS_RANDOM_BTREE_CHECK_LBLOCK))) { | 211 | XFS_RANDOM_BTREE_CHECK_LBLOCK))) { |
209 | if (bp) | 212 | if (bp) |
@@ -251,8 +254,9 @@ xfs_btree_check_rec( | |||
251 | 254 | ||
252 | r1 = ar1; | 255 | r1 = ar1; |
253 | r2 = ar2; | 256 | r2 = ar2; |
254 | ASSERT(INT_GET(r1->ar_startblock, ARCH_CONVERT) + INT_GET(r1->ar_blockcount, ARCH_CONVERT) <= | 257 | ASSERT(be32_to_cpu(r1->ar_startblock) + |
255 | INT_GET(r2->ar_startblock, ARCH_CONVERT)); | 258 | be32_to_cpu(r1->ar_blockcount) <= |
259 | be32_to_cpu(r2->ar_startblock)); | ||
256 | break; | 260 | break; |
257 | } | 261 | } |
258 | case XFS_BTNUM_CNT: { | 262 | case XFS_BTNUM_CNT: { |
@@ -261,9 +265,9 @@ xfs_btree_check_rec( | |||
261 | 265 | ||
262 | r1 = ar1; | 266 | r1 = ar1; |
263 | r2 = ar2; | 267 | r2 = ar2; |
264 | ASSERT(INT_GET(r1->ar_blockcount, ARCH_CONVERT) < INT_GET(r2->ar_blockcount, ARCH_CONVERT) || | 268 | ASSERT(be32_to_cpu(r1->ar_blockcount) < be32_to_cpu(r2->ar_blockcount) || |
265 | (INT_GET(r1->ar_blockcount, ARCH_CONVERT) == INT_GET(r2->ar_blockcount, ARCH_CONVERT) && | 269 | (r1->ar_blockcount == r2->ar_blockcount && |
266 | INT_GET(r1->ar_startblock, ARCH_CONVERT) < INT_GET(r2->ar_startblock, ARCH_CONVERT))); | 270 | be32_to_cpu(r1->ar_startblock) < be32_to_cpu(r2->ar_startblock))); |
267 | break; | 271 | break; |
268 | } | 272 | } |
269 | case XFS_BTNUM_BMAP: { | 273 | case XFS_BTNUM_BMAP: { |
@@ -311,17 +315,17 @@ xfs_btree_check_sblock( | |||
311 | 315 | ||
312 | agbp = cur->bc_private.a.agbp; | 316 | agbp = cur->bc_private.a.agbp; |
313 | agf = XFS_BUF_TO_AGF(agbp); | 317 | agf = XFS_BUF_TO_AGF(agbp); |
314 | agflen = INT_GET(agf->agf_length, ARCH_CONVERT); | 318 | agflen = be32_to_cpu(agf->agf_length); |
315 | sblock_ok = | 319 | sblock_ok = |
316 | INT_GET(block->bb_magic, ARCH_CONVERT) == xfs_magics[cur->bc_btnum] && | 320 | be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] && |
317 | INT_GET(block->bb_level, ARCH_CONVERT) == level && | 321 | be16_to_cpu(block->bb_level) == level && |
318 | INT_GET(block->bb_numrecs, ARCH_CONVERT) <= | 322 | be16_to_cpu(block->bb_numrecs) <= |
319 | xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && | 323 | xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && |
320 | (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK || | 324 | (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK || |
321 | INT_GET(block->bb_leftsib, ARCH_CONVERT) < agflen) && | 325 | be32_to_cpu(block->bb_leftsib) < agflen) && |
322 | block->bb_leftsib && | 326 | block->bb_leftsib && |
323 | (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK || | 327 | (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK || |
324 | INT_GET(block->bb_rightsib, ARCH_CONVERT) < agflen) && | 328 | be32_to_cpu(block->bb_rightsib) < agflen) && |
325 | block->bb_rightsib; | 329 | block->bb_rightsib; |
326 | if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp, | 330 | if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp, |
327 | XFS_ERRTAG_BTREE_CHECK_SBLOCK, | 331 | XFS_ERRTAG_BTREE_CHECK_SBLOCK, |
@@ -352,7 +356,7 @@ xfs_btree_check_sptr( | |||
352 | XFS_WANT_CORRUPTED_RETURN( | 356 | XFS_WANT_CORRUPTED_RETURN( |
353 | level > 0 && | 357 | level > 0 && |
354 | ptr != NULLAGBLOCK && ptr != 0 && | 358 | ptr != NULLAGBLOCK && ptr != 0 && |
355 | ptr < INT_GET(agf->agf_length, ARCH_CONVERT)); | 359 | ptr < be32_to_cpu(agf->agf_length)); |
356 | return 0; | 360 | return 0; |
357 | } | 361 | } |
358 | 362 | ||
@@ -591,15 +595,15 @@ xfs_btree_init_cursor( | |||
591 | case XFS_BTNUM_BNO: | 595 | case XFS_BTNUM_BNO: |
592 | case XFS_BTNUM_CNT: | 596 | case XFS_BTNUM_CNT: |
593 | agf = XFS_BUF_TO_AGF(agbp); | 597 | agf = XFS_BUF_TO_AGF(agbp); |
594 | nlevels = INT_GET(agf->agf_levels[btnum], ARCH_CONVERT); | 598 | nlevels = be32_to_cpu(agf->agf_levels[btnum]); |
595 | break; | 599 | break; |
596 | case XFS_BTNUM_BMAP: | 600 | case XFS_BTNUM_BMAP: |
597 | ifp = XFS_IFORK_PTR(ip, whichfork); | 601 | ifp = XFS_IFORK_PTR(ip, whichfork); |
598 | nlevels = INT_GET(ifp->if_broot->bb_level, ARCH_CONVERT) + 1; | 602 | nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1; |
599 | break; | 603 | break; |
600 | case XFS_BTNUM_INO: | 604 | case XFS_BTNUM_INO: |
601 | agi = XFS_BUF_TO_AGI(agbp); | 605 | agi = XFS_BUF_TO_AGI(agbp); |
602 | nlevels = INT_GET(agi->agi_level, ARCH_CONVERT); | 606 | nlevels = be32_to_cpu(agi->agi_level); |
603 | break; | 607 | break; |
604 | default: | 608 | default: |
605 | ASSERT(0); | 609 | ASSERT(0); |
@@ -663,9 +667,9 @@ xfs_btree_islastblock( | |||
663 | block = xfs_btree_get_block(cur, level, &bp); | 667 | block = xfs_btree_get_block(cur, level, &bp); |
664 | xfs_btree_check_block(cur, block, level, bp); | 668 | xfs_btree_check_block(cur, block, level, bp); |
665 | if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) | 669 | if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) |
666 | return INT_GET(block->bb_u.l.bb_rightsib, ARCH_CONVERT) == NULLDFSBNO; | 670 | return be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO; |
667 | else | 671 | else |
668 | return INT_GET(block->bb_u.s.bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK; | 672 | return be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK; |
669 | } | 673 | } |
670 | 674 | ||
671 | /* | 675 | /* |
@@ -693,7 +697,7 @@ xfs_btree_lastrec( | |||
693 | /* | 697 | /* |
694 | * Set the ptr value to numrecs, that's the last record/key. | 698 | * Set the ptr value to numrecs, that's the last record/key. |
695 | */ | 699 | */ |
696 | cur->bc_ptrs[level] = INT_GET(block->bb_h.bb_numrecs, ARCH_CONVERT); | 700 | cur->bc_ptrs[level] = be16_to_cpu(block->bb_h.bb_numrecs); |
697 | return 1; | 701 | return 1; |
698 | } | 702 | } |
699 | 703 | ||
@@ -863,38 +867,38 @@ xfs_btree_readahead_core( | |||
863 | case XFS_BTNUM_BNO: | 867 | case XFS_BTNUM_BNO: |
864 | case XFS_BTNUM_CNT: | 868 | case XFS_BTNUM_CNT: |
865 | a = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); | 869 | a = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); |
866 | if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(a->bb_leftsib, ARCH_CONVERT) != NULLAGBLOCK) { | 870 | if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(a->bb_leftsib) != NULLAGBLOCK) { |
867 | xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, | 871 | xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, |
868 | INT_GET(a->bb_leftsib, ARCH_CONVERT), 1); | 872 | be32_to_cpu(a->bb_leftsib), 1); |
869 | rval++; | 873 | rval++; |
870 | } | 874 | } |
871 | if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(a->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 875 | if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(a->bb_rightsib) != NULLAGBLOCK) { |
872 | xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, | 876 | xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, |
873 | INT_GET(a->bb_rightsib, ARCH_CONVERT), 1); | 877 | be32_to_cpu(a->bb_rightsib), 1); |
874 | rval++; | 878 | rval++; |
875 | } | 879 | } |
876 | break; | 880 | break; |
877 | case XFS_BTNUM_BMAP: | 881 | case XFS_BTNUM_BMAP: |
878 | b = XFS_BUF_TO_BMBT_BLOCK(cur->bc_bufs[lev]); | 882 | b = XFS_BUF_TO_BMBT_BLOCK(cur->bc_bufs[lev]); |
879 | if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(b->bb_leftsib, ARCH_CONVERT) != NULLDFSBNO) { | 883 | if ((lr & XFS_BTCUR_LEFTRA) && be64_to_cpu(b->bb_leftsib) != NULLDFSBNO) { |
880 | xfs_btree_reada_bufl(cur->bc_mp, INT_GET(b->bb_leftsib, ARCH_CONVERT), 1); | 884 | xfs_btree_reada_bufl(cur->bc_mp, be64_to_cpu(b->bb_leftsib), 1); |
881 | rval++; | 885 | rval++; |
882 | } | 886 | } |
883 | if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(b->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) { | 887 | if ((lr & XFS_BTCUR_RIGHTRA) && be64_to_cpu(b->bb_rightsib) != NULLDFSBNO) { |
884 | xfs_btree_reada_bufl(cur->bc_mp, INT_GET(b->bb_rightsib, ARCH_CONVERT), 1); | 888 | xfs_btree_reada_bufl(cur->bc_mp, be64_to_cpu(b->bb_rightsib), 1); |
885 | rval++; | 889 | rval++; |
886 | } | 890 | } |
887 | break; | 891 | break; |
888 | case XFS_BTNUM_INO: | 892 | case XFS_BTNUM_INO: |
889 | i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); | 893 | i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); |
890 | if ((lr & XFS_BTCUR_LEFTRA) && INT_GET(i->bb_leftsib, ARCH_CONVERT) != NULLAGBLOCK) { | 894 | if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) { |
891 | xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, | 895 | xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, |
892 | INT_GET(i->bb_leftsib, ARCH_CONVERT), 1); | 896 | be32_to_cpu(i->bb_leftsib), 1); |
893 | rval++; | 897 | rval++; |
894 | } | 898 | } |
895 | if ((lr & XFS_BTCUR_RIGHTRA) && INT_GET(i->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 899 | if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) { |
896 | xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, | 900 | xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, |
897 | INT_GET(i->bb_rightsib, ARCH_CONVERT), 1); | 901 | be32_to_cpu(i->bb_rightsib), 1); |
898 | rval++; | 902 | rval++; |
899 | } | 903 | } |
900 | break; | 904 | break; |
@@ -926,14 +930,14 @@ xfs_btree_setbuf( | |||
926 | return; | 930 | return; |
927 | b = XFS_BUF_TO_BLOCK(bp); | 931 | b = XFS_BUF_TO_BLOCK(bp); |
928 | if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) { | 932 | if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) { |
929 | if (INT_GET(b->bb_u.l.bb_leftsib, ARCH_CONVERT) == NULLDFSBNO) | 933 | if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO) |
930 | cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; | 934 | cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; |
931 | if (INT_GET(b->bb_u.l.bb_rightsib, ARCH_CONVERT) == NULLDFSBNO) | 935 | if (be64_to_cpu(b->bb_u.l.bb_rightsib) == NULLDFSBNO) |
932 | cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; | 936 | cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; |
933 | } else { | 937 | } else { |
934 | if (INT_GET(b->bb_u.s.bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) | 938 | if (be32_to_cpu(b->bb_u.s.bb_leftsib) == NULLAGBLOCK) |
935 | cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; | 939 | cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; |
936 | if (INT_GET(b->bb_u.s.bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) | 940 | if (be32_to_cpu(b->bb_u.s.bb_rightsib) == NULLAGBLOCK) |
937 | cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; | 941 | cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; |
938 | } | 942 | } |
939 | } | 943 | } |
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index e6b2a09401c6..44f1bd98064a 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h | |||
@@ -39,25 +39,23 @@ struct xfs_trans; | |||
39 | /* | 39 | /* |
40 | * Short form header: space allocation btrees. | 40 | * Short form header: space allocation btrees. |
41 | */ | 41 | */ |
42 | typedef struct xfs_btree_sblock | 42 | typedef struct xfs_btree_sblock { |
43 | { | 43 | __be32 bb_magic; /* magic number for block type */ |
44 | __uint32_t bb_magic; /* magic number for block type */ | 44 | __be16 bb_level; /* 0 is a leaf */ |
45 | __uint16_t bb_level; /* 0 is a leaf */ | 45 | __be16 bb_numrecs; /* current # of data records */ |
46 | __uint16_t bb_numrecs; /* current # of data records */ | 46 | __be32 bb_leftsib; /* left sibling block or NULLAGBLOCK */ |
47 | xfs_agblock_t bb_leftsib; /* left sibling block or NULLAGBLOCK */ | 47 | __be32 bb_rightsib; /* right sibling block or NULLAGBLOCK */ |
48 | xfs_agblock_t bb_rightsib; /* right sibling block or NULLAGBLOCK */ | ||
49 | } xfs_btree_sblock_t; | 48 | } xfs_btree_sblock_t; |
50 | 49 | ||
51 | /* | 50 | /* |
52 | * Long form header: bmap btrees. | 51 | * Long form header: bmap btrees. |
53 | */ | 52 | */ |
54 | typedef struct xfs_btree_lblock | 53 | typedef struct xfs_btree_lblock { |
55 | { | 54 | __be32 bb_magic; /* magic number for block type */ |
56 | __uint32_t bb_magic; /* magic number for block type */ | 55 | __be16 bb_level; /* 0 is a leaf */ |
57 | __uint16_t bb_level; /* 0 is a leaf */ | 56 | __be16 bb_numrecs; /* current # of data records */ |
58 | __uint16_t bb_numrecs; /* current # of data records */ | 57 | __be64 bb_leftsib; /* left sibling block or NULLDFSBNO */ |
59 | xfs_dfsbno_t bb_leftsib; /* left sibling block or NULLDFSBNO */ | 58 | __be64 bb_rightsib; /* right sibling block or NULLDFSBNO */ |
60 | xfs_dfsbno_t bb_rightsib; /* right sibling block or NULLDFSBNO */ | ||
61 | } xfs_btree_lblock_t; | 59 | } xfs_btree_lblock_t; |
62 | 60 | ||
63 | /* | 61 | /* |
@@ -65,24 +63,23 @@ typedef struct xfs_btree_lblock | |||
65 | */ | 63 | */ |
66 | typedef struct xfs_btree_hdr | 64 | typedef struct xfs_btree_hdr |
67 | { | 65 | { |
68 | __uint32_t bb_magic; /* magic number for block type */ | 66 | __be32 bb_magic; /* magic number for block type */ |
69 | __uint16_t bb_level; /* 0 is a leaf */ | 67 | __be16 bb_level; /* 0 is a leaf */ |
70 | __uint16_t bb_numrecs; /* current # of data records */ | 68 | __be16 bb_numrecs; /* current # of data records */ |
71 | } xfs_btree_hdr_t; | 69 | } xfs_btree_hdr_t; |
72 | 70 | ||
73 | typedef struct xfs_btree_block | 71 | typedef struct xfs_btree_block { |
74 | { | ||
75 | xfs_btree_hdr_t bb_h; /* header */ | 72 | xfs_btree_hdr_t bb_h; /* header */ |
76 | union { | 73 | union { |
77 | struct { | 74 | struct { |
78 | xfs_agblock_t bb_leftsib; | 75 | __be32 bb_leftsib; |
79 | xfs_agblock_t bb_rightsib; | 76 | __be32 bb_rightsib; |
80 | } s; /* short form pointers */ | 77 | } s; /* short form pointers */ |
81 | struct { | 78 | struct { |
82 | xfs_dfsbno_t bb_leftsib; | 79 | __be64 bb_leftsib; |
83 | xfs_dfsbno_t bb_rightsib; | 80 | __be64 bb_rightsib; |
84 | } l; /* long form pointers */ | 81 | } l; /* long form pointers */ |
85 | } bb_u; /* rest */ | 82 | } bb_u; /* rest */ |
86 | } xfs_btree_block_t; | 83 | } xfs_btree_block_t; |
87 | 84 | ||
88 | /* | 85 | /* |
@@ -146,7 +143,7 @@ typedef struct xfs_btree_cur | |||
146 | struct xfs_trans *bc_tp; /* transaction we're in, if any */ | 143 | struct xfs_trans *bc_tp; /* transaction we're in, if any */ |
147 | struct xfs_mount *bc_mp; /* file system mount struct */ | 144 | struct xfs_mount *bc_mp; /* file system mount struct */ |
148 | union { | 145 | union { |
149 | xfs_alloc_rec_t a; | 146 | xfs_alloc_rec_incore_t a; |
150 | xfs_bmbt_irec_t b; | 147 | xfs_bmbt_irec_t b; |
151 | xfs_inobt_rec_t i; | 148 | xfs_inobt_rec_t i; |
152 | } bc_rec; /* current insert/search record value */ | 149 | } bc_rec; /* current insert/search record value */ |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 239e701e9822..0ca597b0ca79 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -191,28 +191,26 @@ xfs_growfs_data_private( | |||
191 | XFS_FSS_TO_BB(mp, 1), 0); | 191 | XFS_FSS_TO_BB(mp, 1), 0); |
192 | agf = XFS_BUF_TO_AGF(bp); | 192 | agf = XFS_BUF_TO_AGF(bp); |
193 | memset(agf, 0, mp->m_sb.sb_sectsize); | 193 | memset(agf, 0, mp->m_sb.sb_sectsize); |
194 | INT_SET(agf->agf_magicnum, ARCH_CONVERT, XFS_AGF_MAGIC); | 194 | agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); |
195 | INT_SET(agf->agf_versionnum, ARCH_CONVERT, XFS_AGF_VERSION); | 195 | agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); |
196 | INT_SET(agf->agf_seqno, ARCH_CONVERT, agno); | 196 | agf->agf_seqno = cpu_to_be32(agno); |
197 | if (agno == nagcount - 1) | 197 | if (agno == nagcount - 1) |
198 | agsize = | 198 | agsize = |
199 | nb - | 199 | nb - |
200 | (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks); | 200 | (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks); |
201 | else | 201 | else |
202 | agsize = mp->m_sb.sb_agblocks; | 202 | agsize = mp->m_sb.sb_agblocks; |
203 | INT_SET(agf->agf_length, ARCH_CONVERT, agsize); | 203 | agf->agf_length = cpu_to_be32(agsize); |
204 | INT_SET(agf->agf_roots[XFS_BTNUM_BNOi], ARCH_CONVERT, | 204 | agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp)); |
205 | XFS_BNO_BLOCK(mp)); | 205 | agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp)); |
206 | INT_SET(agf->agf_roots[XFS_BTNUM_CNTi], ARCH_CONVERT, | 206 | agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1); |
207 | XFS_CNT_BLOCK(mp)); | 207 | agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1); |
208 | INT_SET(agf->agf_levels[XFS_BTNUM_BNOi], ARCH_CONVERT, 1); | ||
209 | INT_SET(agf->agf_levels[XFS_BTNUM_CNTi], ARCH_CONVERT, 1); | ||
210 | agf->agf_flfirst = 0; | 208 | agf->agf_flfirst = 0; |
211 | INT_SET(agf->agf_fllast, ARCH_CONVERT, XFS_AGFL_SIZE(mp) - 1); | 209 | agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1); |
212 | agf->agf_flcount = 0; | 210 | agf->agf_flcount = 0; |
213 | tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp); | 211 | tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp); |
214 | INT_SET(agf->agf_freeblks, ARCH_CONVERT, tmpsize); | 212 | agf->agf_freeblks = cpu_to_be32(tmpsize); |
215 | INT_SET(agf->agf_longest, ARCH_CONVERT, tmpsize); | 213 | agf->agf_longest = cpu_to_be32(tmpsize); |
216 | error = xfs_bwrite(mp, bp); | 214 | error = xfs_bwrite(mp, bp); |
217 | if (error) { | 215 | if (error) { |
218 | goto error0; | 216 | goto error0; |
@@ -225,19 +223,18 @@ xfs_growfs_data_private( | |||
225 | XFS_FSS_TO_BB(mp, 1), 0); | 223 | XFS_FSS_TO_BB(mp, 1), 0); |
226 | agi = XFS_BUF_TO_AGI(bp); | 224 | agi = XFS_BUF_TO_AGI(bp); |
227 | memset(agi, 0, mp->m_sb.sb_sectsize); | 225 | memset(agi, 0, mp->m_sb.sb_sectsize); |
228 | INT_SET(agi->agi_magicnum, ARCH_CONVERT, XFS_AGI_MAGIC); | 226 | agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); |
229 | INT_SET(agi->agi_versionnum, ARCH_CONVERT, XFS_AGI_VERSION); | 227 | agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); |
230 | INT_SET(agi->agi_seqno, ARCH_CONVERT, agno); | 228 | agi->agi_seqno = cpu_to_be32(agno); |
231 | INT_SET(agi->agi_length, ARCH_CONVERT, agsize); | 229 | agi->agi_length = cpu_to_be32(agsize); |
232 | agi->agi_count = 0; | 230 | agi->agi_count = 0; |
233 | INT_SET(agi->agi_root, ARCH_CONVERT, XFS_IBT_BLOCK(mp)); | 231 | agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp)); |
234 | INT_SET(agi->agi_level, ARCH_CONVERT, 1); | 232 | agi->agi_level = cpu_to_be32(1); |
235 | agi->agi_freecount = 0; | 233 | agi->agi_freecount = 0; |
236 | INT_SET(agi->agi_newino, ARCH_CONVERT, NULLAGINO); | 234 | agi->agi_newino = cpu_to_be32(NULLAGINO); |
237 | INT_SET(agi->agi_dirino, ARCH_CONVERT, NULLAGINO); | 235 | agi->agi_dirino = cpu_to_be32(NULLAGINO); |
238 | for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) | 236 | for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) |
239 | INT_SET(agi->agi_unlinked[bucket], ARCH_CONVERT, | 237 | agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); |
240 | NULLAGINO); | ||
241 | error = xfs_bwrite(mp, bp); | 238 | error = xfs_bwrite(mp, bp); |
242 | if (error) { | 239 | if (error) { |
243 | goto error0; | 240 | goto error0; |
@@ -250,17 +247,16 @@ xfs_growfs_data_private( | |||
250 | BTOBB(mp->m_sb.sb_blocksize), 0); | 247 | BTOBB(mp->m_sb.sb_blocksize), 0); |
251 | block = XFS_BUF_TO_SBLOCK(bp); | 248 | block = XFS_BUF_TO_SBLOCK(bp); |
252 | memset(block, 0, mp->m_sb.sb_blocksize); | 249 | memset(block, 0, mp->m_sb.sb_blocksize); |
253 | INT_SET(block->bb_magic, ARCH_CONVERT, XFS_ABTB_MAGIC); | 250 | block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC); |
254 | block->bb_level = 0; | 251 | block->bb_level = 0; |
255 | INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); | 252 | block->bb_numrecs = cpu_to_be16(1); |
256 | INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); | 253 | block->bb_leftsib = cpu_to_be32(NULLAGBLOCK); |
257 | INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); | 254 | block->bb_rightsib = cpu_to_be32(NULLAGBLOCK); |
258 | arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, | 255 | arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, |
259 | block, 1, mp->m_alloc_mxr[0]); | 256 | block, 1, mp->m_alloc_mxr[0]); |
260 | INT_SET(arec->ar_startblock, ARCH_CONVERT, | 257 | arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); |
261 | XFS_PREALLOC_BLOCKS(mp)); | 258 | arec->ar_blockcount = cpu_to_be32( |
262 | INT_SET(arec->ar_blockcount, ARCH_CONVERT, | 259 | agsize - be32_to_cpu(arec->ar_startblock)); |
263 | agsize - INT_GET(arec->ar_startblock, ARCH_CONVERT)); | ||
264 | error = xfs_bwrite(mp, bp); | 260 | error = xfs_bwrite(mp, bp); |
265 | if (error) { | 261 | if (error) { |
266 | goto error0; | 262 | goto error0; |
@@ -273,18 +269,17 @@ xfs_growfs_data_private( | |||
273 | BTOBB(mp->m_sb.sb_blocksize), 0); | 269 | BTOBB(mp->m_sb.sb_blocksize), 0); |
274 | block = XFS_BUF_TO_SBLOCK(bp); | 270 | block = XFS_BUF_TO_SBLOCK(bp); |
275 | memset(block, 0, mp->m_sb.sb_blocksize); | 271 | memset(block, 0, mp->m_sb.sb_blocksize); |
276 | INT_SET(block->bb_magic, ARCH_CONVERT, XFS_ABTC_MAGIC); | 272 | block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC); |
277 | block->bb_level = 0; | 273 | block->bb_level = 0; |
278 | INT_SET(block->bb_numrecs, ARCH_CONVERT, 1); | 274 | block->bb_numrecs = cpu_to_be16(1); |
279 | INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); | 275 | block->bb_leftsib = cpu_to_be32(NULLAGBLOCK); |
280 | INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); | 276 | block->bb_rightsib = cpu_to_be32(NULLAGBLOCK); |
281 | arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, | 277 | arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, |
282 | block, 1, mp->m_alloc_mxr[0]); | 278 | block, 1, mp->m_alloc_mxr[0]); |
283 | INT_SET(arec->ar_startblock, ARCH_CONVERT, | 279 | arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); |
284 | XFS_PREALLOC_BLOCKS(mp)); | 280 | arec->ar_blockcount = cpu_to_be32( |
285 | INT_SET(arec->ar_blockcount, ARCH_CONVERT, | 281 | agsize - be32_to_cpu(arec->ar_startblock)); |
286 | agsize - INT_GET(arec->ar_startblock, ARCH_CONVERT)); | 282 | nfree += be32_to_cpu(arec->ar_blockcount); |
287 | nfree += INT_GET(arec->ar_blockcount, ARCH_CONVERT); | ||
288 | error = xfs_bwrite(mp, bp); | 283 | error = xfs_bwrite(mp, bp); |
289 | if (error) { | 284 | if (error) { |
290 | goto error0; | 285 | goto error0; |
@@ -297,11 +292,11 @@ xfs_growfs_data_private( | |||
297 | BTOBB(mp->m_sb.sb_blocksize), 0); | 292 | BTOBB(mp->m_sb.sb_blocksize), 0); |
298 | block = XFS_BUF_TO_SBLOCK(bp); | 293 | block = XFS_BUF_TO_SBLOCK(bp); |
299 | memset(block, 0, mp->m_sb.sb_blocksize); | 294 | memset(block, 0, mp->m_sb.sb_blocksize); |
300 | INT_SET(block->bb_magic, ARCH_CONVERT, XFS_IBT_MAGIC); | 295 | block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC); |
301 | block->bb_level = 0; | 296 | block->bb_level = 0; |
302 | block->bb_numrecs = 0; | 297 | block->bb_numrecs = 0; |
303 | INT_SET(block->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); | 298 | block->bb_leftsib = cpu_to_be32(NULLAGBLOCK); |
304 | INT_SET(block->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); | 299 | block->bb_rightsib = cpu_to_be32(NULLAGBLOCK); |
305 | error = xfs_bwrite(mp, bp); | 300 | error = xfs_bwrite(mp, bp); |
306 | if (error) { | 301 | if (error) { |
307 | goto error0; | 302 | goto error0; |
@@ -321,10 +316,9 @@ xfs_growfs_data_private( | |||
321 | } | 316 | } |
322 | ASSERT(bp); | 317 | ASSERT(bp); |
323 | agi = XFS_BUF_TO_AGI(bp); | 318 | agi = XFS_BUF_TO_AGI(bp); |
324 | INT_MOD(agi->agi_length, ARCH_CONVERT, new); | 319 | be32_add(&agi->agi_length, new); |
325 | ASSERT(nagcount == oagcount || | 320 | ASSERT(nagcount == oagcount || |
326 | INT_GET(agi->agi_length, ARCH_CONVERT) == | 321 | be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks); |
327 | mp->m_sb.sb_agblocks); | ||
328 | xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); | 322 | xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); |
329 | /* | 323 | /* |
330 | * Change agf length. | 324 | * Change agf length. |
@@ -335,14 +329,14 @@ xfs_growfs_data_private( | |||
335 | } | 329 | } |
336 | ASSERT(bp); | 330 | ASSERT(bp); |
337 | agf = XFS_BUF_TO_AGF(bp); | 331 | agf = XFS_BUF_TO_AGF(bp); |
338 | INT_MOD(agf->agf_length, ARCH_CONVERT, new); | 332 | be32_add(&agf->agf_length, new); |
339 | ASSERT(INT_GET(agf->agf_length, ARCH_CONVERT) == | 333 | ASSERT(be32_to_cpu(agf->agf_length) == |
340 | INT_GET(agi->agi_length, ARCH_CONVERT)); | 334 | be32_to_cpu(agi->agi_length)); |
341 | /* | 335 | /* |
342 | * Free the new space. | 336 | * Free the new space. |
343 | */ | 337 | */ |
344 | error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno, | 338 | error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno, |
345 | INT_GET(agf->agf_length, ARCH_CONVERT) - new), new); | 339 | be32_to_cpu(agf->agf_length) - new), new); |
346 | if (error) { | 340 | if (error) { |
347 | goto error0; | 341 | goto error0; |
348 | } | 342 | } |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index d8ceb3d1865f..8f3fae1aa98a 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
@@ -178,8 +178,8 @@ xfs_ialloc_ag_alloc( | |||
178 | * Ideally they should be spaced out through the a.g. | 178 | * Ideally they should be spaced out through the a.g. |
179 | * For now, just allocate blocks up front. | 179 | * For now, just allocate blocks up front. |
180 | */ | 180 | */ |
181 | args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT); | 181 | args.agbno = be32_to_cpu(agi->agi_root); |
182 | args.fsbno = XFS_AGB_TO_FSB(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT), | 182 | args.fsbno = XFS_AGB_TO_FSB(args.mp, be32_to_cpu(agi->agi_seqno), |
183 | args.agbno); | 183 | args.agbno); |
184 | /* | 184 | /* |
185 | * Allocate a fixed-size extent of inodes. | 185 | * Allocate a fixed-size extent of inodes. |
@@ -201,9 +201,9 @@ xfs_ialloc_ag_alloc( | |||
201 | */ | 201 | */ |
202 | if (isaligned && args.fsbno == NULLFSBLOCK) { | 202 | if (isaligned && args.fsbno == NULLFSBLOCK) { |
203 | args.type = XFS_ALLOCTYPE_NEAR_BNO; | 203 | args.type = XFS_ALLOCTYPE_NEAR_BNO; |
204 | args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT); | 204 | args.agbno = be32_to_cpu(agi->agi_root); |
205 | args.fsbno = XFS_AGB_TO_FSB(args.mp, | 205 | args.fsbno = XFS_AGB_TO_FSB(args.mp, |
206 | INT_GET(agi->agi_seqno, ARCH_CONVERT), args.agbno); | 206 | be32_to_cpu(agi->agi_seqno), args.agbno); |
207 | if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) && | 207 | if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) && |
208 | args.mp->m_sb.sb_inoalignmt >= | 208 | args.mp->m_sb.sb_inoalignmt >= |
209 | XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp))) | 209 | XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp))) |
@@ -258,7 +258,7 @@ xfs_ialloc_ag_alloc( | |||
258 | /* | 258 | /* |
259 | * Get the block. | 259 | * Get the block. |
260 | */ | 260 | */ |
261 | d = XFS_AGB_TO_DADDR(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT), | 261 | d = XFS_AGB_TO_DADDR(args.mp, be32_to_cpu(agi->agi_seqno), |
262 | args.agbno + (j * blks_per_cluster)); | 262 | args.agbno + (j * blks_per_cluster)); |
263 | fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d, | 263 | fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d, |
264 | args.mp->m_bsize * blks_per_cluster, | 264 | args.mp->m_bsize * blks_per_cluster, |
@@ -278,17 +278,17 @@ xfs_ialloc_ag_alloc( | |||
278 | } | 278 | } |
279 | xfs_trans_inode_alloc_buf(tp, fbuf); | 279 | xfs_trans_inode_alloc_buf(tp, fbuf); |
280 | } | 280 | } |
281 | INT_MOD(agi->agi_count, ARCH_CONVERT, newlen); | 281 | be32_add(&agi->agi_count, newlen); |
282 | INT_MOD(agi->agi_freecount, ARCH_CONVERT, newlen); | 282 | be32_add(&agi->agi_freecount, newlen); |
283 | down_read(&args.mp->m_peraglock); | 283 | down_read(&args.mp->m_peraglock); |
284 | args.mp->m_perag[INT_GET(agi->agi_seqno, ARCH_CONVERT)].pagi_freecount += newlen; | 284 | args.mp->m_perag[be32_to_cpu(agi->agi_seqno)].pagi_freecount += newlen; |
285 | up_read(&args.mp->m_peraglock); | 285 | up_read(&args.mp->m_peraglock); |
286 | INT_SET(agi->agi_newino, ARCH_CONVERT, newino); | 286 | agi->agi_newino = cpu_to_be32(newino); |
287 | /* | 287 | /* |
288 | * Insert records describing the new inode chunk into the btree. | 288 | * Insert records describing the new inode chunk into the btree. |
289 | */ | 289 | */ |
290 | cur = xfs_btree_init_cursor(args.mp, tp, agbp, | 290 | cur = xfs_btree_init_cursor(args.mp, tp, agbp, |
291 | INT_GET(agi->agi_seqno, ARCH_CONVERT), | 291 | be32_to_cpu(agi->agi_seqno), |
292 | XFS_BTNUM_INO, (xfs_inode_t *)0, 0); | 292 | XFS_BTNUM_INO, (xfs_inode_t *)0, 0); |
293 | for (thisino = newino; | 293 | for (thisino = newino; |
294 | thisino < newino + newlen; | 294 | thisino < newino + newlen; |
@@ -528,7 +528,7 @@ xfs_dialloc( | |||
528 | return 0; | 528 | return 0; |
529 | } | 529 | } |
530 | agi = XFS_BUF_TO_AGI(agbp); | 530 | agi = XFS_BUF_TO_AGI(agbp); |
531 | ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); | 531 | ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); |
532 | } else { | 532 | } else { |
533 | /* | 533 | /* |
534 | * Continue where we left off before. In this case, we | 534 | * Continue where we left off before. In this case, we |
@@ -536,12 +536,12 @@ xfs_dialloc( | |||
536 | */ | 536 | */ |
537 | agbp = *IO_agbp; | 537 | agbp = *IO_agbp; |
538 | agi = XFS_BUF_TO_AGI(agbp); | 538 | agi = XFS_BUF_TO_AGI(agbp); |
539 | ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); | 539 | ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); |
540 | ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0); | 540 | ASSERT(be32_to_cpu(agi->agi_freecount) > 0); |
541 | } | 541 | } |
542 | mp = tp->t_mountp; | 542 | mp = tp->t_mountp; |
543 | agcount = mp->m_sb.sb_agcount; | 543 | agcount = mp->m_sb.sb_agcount; |
544 | agno = INT_GET(agi->agi_seqno, ARCH_CONVERT); | 544 | agno = be32_to_cpu(agi->agi_seqno); |
545 | tagno = agno; | 545 | tagno = agno; |
546 | pagno = XFS_INO_TO_AGNO(mp, parent); | 546 | pagno = XFS_INO_TO_AGNO(mp, parent); |
547 | pagino = XFS_INO_TO_AGINO(mp, parent); | 547 | pagino = XFS_INO_TO_AGINO(mp, parent); |
@@ -589,7 +589,7 @@ xfs_dialloc( | |||
589 | * can commit the current transaction and call | 589 | * can commit the current transaction and call |
590 | * us again where we left off. | 590 | * us again where we left off. |
591 | */ | 591 | */ |
592 | ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0); | 592 | ASSERT(be32_to_cpu(agi->agi_freecount) > 0); |
593 | *alloc_done = B_TRUE; | 593 | *alloc_done = B_TRUE; |
594 | *IO_agbp = agbp; | 594 | *IO_agbp = agbp; |
595 | *inop = NULLFSINO; | 595 | *inop = NULLFSINO; |
@@ -620,7 +620,7 @@ nextag: | |||
620 | if (error) | 620 | if (error) |
621 | goto nextag; | 621 | goto nextag; |
622 | agi = XFS_BUF_TO_AGI(agbp); | 622 | agi = XFS_BUF_TO_AGI(agbp); |
623 | ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); | 623 | ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); |
624 | } | 624 | } |
625 | /* | 625 | /* |
626 | * Here with an allocation group that has a free inode. | 626 | * Here with an allocation group that has a free inode. |
@@ -629,14 +629,14 @@ nextag: | |||
629 | */ | 629 | */ |
630 | agno = tagno; | 630 | agno = tagno; |
631 | *IO_agbp = NULL; | 631 | *IO_agbp = NULL; |
632 | cur = xfs_btree_init_cursor(mp, tp, agbp, INT_GET(agi->agi_seqno, ARCH_CONVERT), | 632 | cur = xfs_btree_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno), |
633 | XFS_BTNUM_INO, (xfs_inode_t *)0, 0); | 633 | XFS_BTNUM_INO, (xfs_inode_t *)0, 0); |
634 | /* | 634 | /* |
635 | * If pagino is 0 (this is the root inode allocation) use newino. | 635 | * If pagino is 0 (this is the root inode allocation) use newino. |
636 | * This must work because we've just allocated some. | 636 | * This must work because we've just allocated some. |
637 | */ | 637 | */ |
638 | if (!pagino) | 638 | if (!pagino) |
639 | pagino = INT_GET(agi->agi_newino, ARCH_CONVERT); | 639 | pagino = be32_to_cpu(agi->agi_newino); |
640 | #ifdef DEBUG | 640 | #ifdef DEBUG |
641 | if (cur->bc_nlevels == 1) { | 641 | if (cur->bc_nlevels == 1) { |
642 | int freecount = 0; | 642 | int freecount = 0; |
@@ -654,7 +654,7 @@ nextag: | |||
654 | goto error0; | 654 | goto error0; |
655 | } while (i == 1); | 655 | } while (i == 1); |
656 | 656 | ||
657 | ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || | 657 | ASSERT(freecount == be32_to_cpu(agi->agi_freecount) || |
658 | XFS_FORCED_SHUTDOWN(mp)); | 658 | XFS_FORCED_SHUTDOWN(mp)); |
659 | } | 659 | } |
660 | #endif | 660 | #endif |
@@ -813,9 +813,9 @@ nextag: | |||
813 | * In a different a.g. from the parent. | 813 | * In a different a.g. from the parent. |
814 | * See if the most recently allocated block has any free. | 814 | * See if the most recently allocated block has any free. |
815 | */ | 815 | */ |
816 | else if (INT_GET(agi->agi_newino, ARCH_CONVERT) != NULLAGINO) { | 816 | else if (be32_to_cpu(agi->agi_newino) != NULLAGINO) { |
817 | if ((error = xfs_inobt_lookup_eq(cur, | 817 | if ((error = xfs_inobt_lookup_eq(cur, |
818 | INT_GET(agi->agi_newino, ARCH_CONVERT), 0, 0, &i))) | 818 | be32_to_cpu(agi->agi_newino), 0, 0, &i))) |
819 | goto error0; | 819 | goto error0; |
820 | if (i == 1 && | 820 | if (i == 1 && |
821 | (error = xfs_inobt_get_rec(cur, &rec.ir_startino, | 821 | (error = xfs_inobt_get_rec(cur, &rec.ir_startino, |
@@ -862,7 +862,7 @@ nextag: | |||
862 | if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, | 862 | if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, |
863 | rec.ir_free))) | 863 | rec.ir_free))) |
864 | goto error0; | 864 | goto error0; |
865 | INT_MOD(agi->agi_freecount, ARCH_CONVERT, -1); | 865 | be32_add(&agi->agi_freecount, -1); |
866 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); | 866 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); |
867 | down_read(&mp->m_peraglock); | 867 | down_read(&mp->m_peraglock); |
868 | mp->m_perag[tagno].pagi_freecount--; | 868 | mp->m_perag[tagno].pagi_freecount--; |
@@ -882,7 +882,7 @@ nextag: | |||
882 | if ((error = xfs_inobt_increment(cur, 0, &i))) | 882 | if ((error = xfs_inobt_increment(cur, 0, &i))) |
883 | goto error0; | 883 | goto error0; |
884 | } while (i == 1); | 884 | } while (i == 1); |
885 | ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || | 885 | ASSERT(freecount == be32_to_cpu(agi->agi_freecount) || |
886 | XFS_FORCED_SHUTDOWN(mp)); | 886 | XFS_FORCED_SHUTDOWN(mp)); |
887 | } | 887 | } |
888 | #endif | 888 | #endif |
@@ -970,8 +970,8 @@ xfs_difree( | |||
970 | return error; | 970 | return error; |
971 | } | 971 | } |
972 | agi = XFS_BUF_TO_AGI(agbp); | 972 | agi = XFS_BUF_TO_AGI(agbp); |
973 | ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); | 973 | ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); |
974 | ASSERT(agbno < INT_GET(agi->agi_length, ARCH_CONVERT)); | 974 | ASSERT(agbno < be32_to_cpu(agi->agi_length)); |
975 | /* | 975 | /* |
976 | * Initialize the cursor. | 976 | * Initialize the cursor. |
977 | */ | 977 | */ |
@@ -993,7 +993,7 @@ xfs_difree( | |||
993 | goto error0; | 993 | goto error0; |
994 | } | 994 | } |
995 | } while (i == 1); | 995 | } while (i == 1); |
996 | ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || | 996 | ASSERT(freecount == be32_to_cpu(agi->agi_freecount) || |
997 | XFS_FORCED_SHUTDOWN(mp)); | 997 | XFS_FORCED_SHUTDOWN(mp)); |
998 | } | 998 | } |
999 | #endif | 999 | #endif |
@@ -1042,8 +1042,8 @@ xfs_difree( | |||
1042 | * to be freed when the transaction is committed. | 1042 | * to be freed when the transaction is committed. |
1043 | */ | 1043 | */ |
1044 | ilen = XFS_IALLOC_INODES(mp); | 1044 | ilen = XFS_IALLOC_INODES(mp); |
1045 | INT_MOD(agi->agi_count, ARCH_CONVERT, -ilen); | 1045 | be32_add(&agi->agi_count, -ilen); |
1046 | INT_MOD(agi->agi_freecount, ARCH_CONVERT, -(ilen - 1)); | 1046 | be32_add(&agi->agi_freecount, -(ilen - 1)); |
1047 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); | 1047 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); |
1048 | down_read(&mp->m_peraglock); | 1048 | down_read(&mp->m_peraglock); |
1049 | mp->m_perag[agno].pagi_freecount -= ilen - 1; | 1049 | mp->m_perag[agno].pagi_freecount -= ilen - 1; |
@@ -1072,7 +1072,7 @@ xfs_difree( | |||
1072 | /* | 1072 | /* |
1073 | * Change the inode free counts and log the ag/sb changes. | 1073 | * Change the inode free counts and log the ag/sb changes. |
1074 | */ | 1074 | */ |
1075 | INT_MOD(agi->agi_freecount, ARCH_CONVERT, 1); | 1075 | be32_add(&agi->agi_freecount, 1); |
1076 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); | 1076 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); |
1077 | down_read(&mp->m_peraglock); | 1077 | down_read(&mp->m_peraglock); |
1078 | mp->m_perag[agno].pagi_freecount++; | 1078 | mp->m_perag[agno].pagi_freecount++; |
@@ -1098,7 +1098,7 @@ xfs_difree( | |||
1098 | goto error0; | 1098 | goto error0; |
1099 | } | 1099 | } |
1100 | } while (i == 1); | 1100 | } while (i == 1); |
1101 | ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) || | 1101 | ASSERT(freecount == be32_to_cpu(agi->agi_freecount) || |
1102 | XFS_FORCED_SHUTDOWN(mp)); | 1102 | XFS_FORCED_SHUTDOWN(mp)); |
1103 | } | 1103 | } |
1104 | #endif | 1104 | #endif |
@@ -1307,7 +1307,7 @@ xfs_ialloc_log_agi( | |||
1307 | xfs_agi_t *agi; /* allocation group header */ | 1307 | xfs_agi_t *agi; /* allocation group header */ |
1308 | 1308 | ||
1309 | agi = XFS_BUF_TO_AGI(bp); | 1309 | agi = XFS_BUF_TO_AGI(bp); |
1310 | ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); | 1310 | ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); |
1311 | #endif | 1311 | #endif |
1312 | /* | 1312 | /* |
1313 | * Compute byte offsets for the first and last fields. | 1313 | * Compute byte offsets for the first and last fields. |
@@ -1349,9 +1349,8 @@ xfs_ialloc_read_agi( | |||
1349 | */ | 1349 | */ |
1350 | agi = XFS_BUF_TO_AGI(bp); | 1350 | agi = XFS_BUF_TO_AGI(bp); |
1351 | agi_ok = | 1351 | agi_ok = |
1352 | INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC && | 1352 | be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && |
1353 | XFS_AGI_GOOD_VERSION( | 1353 | XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); |
1354 | INT_GET(agi->agi_versionnum, ARCH_CONVERT)); | ||
1355 | if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI, | 1354 | if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI, |
1356 | XFS_RANDOM_IALLOC_READ_AGI))) { | 1355 | XFS_RANDOM_IALLOC_READ_AGI))) { |
1357 | XFS_CORRUPTION_ERROR("xfs_ialloc_read_agi", XFS_ERRLEVEL_LOW, | 1356 | XFS_CORRUPTION_ERROR("xfs_ialloc_read_agi", XFS_ERRLEVEL_LOW, |
@@ -1361,16 +1360,15 @@ xfs_ialloc_read_agi( | |||
1361 | } | 1360 | } |
1362 | pag = &mp->m_perag[agno]; | 1361 | pag = &mp->m_perag[agno]; |
1363 | if (!pag->pagi_init) { | 1362 | if (!pag->pagi_init) { |
1364 | pag->pagi_freecount = INT_GET(agi->agi_freecount, ARCH_CONVERT); | 1363 | pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); |
1365 | pag->pagi_init = 1; | 1364 | pag->pagi_init = 1; |
1366 | } else { | 1365 | } else { |
1367 | /* | 1366 | /* |
1368 | * It's possible for these to be out of sync if | 1367 | * It's possible for these to be out of sync if |
1369 | * we are in the middle of a forced shutdown. | 1368 | * we are in the middle of a forced shutdown. |
1370 | */ | 1369 | */ |
1371 | ASSERT(pag->pagi_freecount == | 1370 | ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || |
1372 | INT_GET(agi->agi_freecount, ARCH_CONVERT) | 1371 | XFS_FORCED_SHUTDOWN(mp)); |
1373 | || XFS_FORCED_SHUTDOWN(mp)); | ||
1374 | } | 1372 | } |
1375 | 1373 | ||
1376 | #ifdef DEBUG | 1374 | #ifdef DEBUG |
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 6912143f6ffe..60c65683462d 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c | |||
@@ -118,7 +118,7 @@ xfs_inobt_delrec( | |||
118 | * Fail if we're off the end of the block. | 118 | * Fail if we're off the end of the block. |
119 | */ | 119 | */ |
120 | 120 | ||
121 | numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 121 | numrecs = be16_to_cpu(block->bb_numrecs); |
122 | if (ptr > numrecs) { | 122 | if (ptr > numrecs) { |
123 | *stat = 0; | 123 | *stat = 0; |
124 | return 0; | 124 | return 0; |
@@ -133,7 +133,7 @@ xfs_inobt_delrec( | |||
133 | pp = XFS_INOBT_PTR_ADDR(block, 1, cur); | 133 | pp = XFS_INOBT_PTR_ADDR(block, 1, cur); |
134 | #ifdef DEBUG | 134 | #ifdef DEBUG |
135 | for (i = ptr; i < numrecs; i++) { | 135 | for (i = ptr; i < numrecs; i++) { |
136 | if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) | 136 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i]), level))) |
137 | return error; | 137 | return error; |
138 | } | 138 | } |
139 | #endif | 139 | #endif |
@@ -170,7 +170,7 @@ xfs_inobt_delrec( | |||
170 | * Decrement and log the number of entries in the block. | 170 | * Decrement and log the number of entries in the block. |
171 | */ | 171 | */ |
172 | numrecs--; | 172 | numrecs--; |
173 | INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); | 173 | block->bb_numrecs = cpu_to_be16(numrecs); |
174 | xfs_inobt_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); | 174 | xfs_inobt_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); |
175 | /* | 175 | /* |
176 | * Is this the root level? If so, we're almost done. | 176 | * Is this the root level? If so, we're almost done. |
@@ -189,9 +189,9 @@ xfs_inobt_delrec( | |||
189 | * pp is still set to the first pointer in the block. | 189 | * pp is still set to the first pointer in the block. |
190 | * Make it the new root of the btree. | 190 | * Make it the new root of the btree. |
191 | */ | 191 | */ |
192 | bno = INT_GET(agi->agi_root, ARCH_CONVERT); | 192 | bno = be32_to_cpu(agi->agi_root); |
193 | agi->agi_root = *pp; | 193 | agi->agi_root = *pp; |
194 | INT_MOD(agi->agi_level, ARCH_CONVERT, -1); | 194 | be32_add(&agi->agi_level, -1); |
195 | /* | 195 | /* |
196 | * Free the block. | 196 | * Free the block. |
197 | */ | 197 | */ |
@@ -234,8 +234,8 @@ xfs_inobt_delrec( | |||
234 | * tree balanced. Look at the left and right sibling blocks to | 234 | * tree balanced. Look at the left and right sibling blocks to |
235 | * see if we can re-balance by moving only one record. | 235 | * see if we can re-balance by moving only one record. |
236 | */ | 236 | */ |
237 | rbno = INT_GET(block->bb_rightsib, ARCH_CONVERT); | 237 | rbno = be32_to_cpu(block->bb_rightsib); |
238 | lbno = INT_GET(block->bb_leftsib, ARCH_CONVERT); | 238 | lbno = be32_to_cpu(block->bb_leftsib); |
239 | bno = NULLAGBLOCK; | 239 | bno = NULLAGBLOCK; |
240 | ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); | 240 | ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); |
241 | /* | 241 | /* |
@@ -272,18 +272,18 @@ xfs_inobt_delrec( | |||
272 | /* | 272 | /* |
273 | * Grab the current block number, for future use. | 273 | * Grab the current block number, for future use. |
274 | */ | 274 | */ |
275 | bno = INT_GET(right->bb_leftsib, ARCH_CONVERT); | 275 | bno = be32_to_cpu(right->bb_leftsib); |
276 | /* | 276 | /* |
277 | * If right block is full enough so that removing one entry | 277 | * If right block is full enough so that removing one entry |
278 | * won't make it too empty, and left-shifting an entry out | 278 | * won't make it too empty, and left-shifting an entry out |
279 | * of right to us works, we're done. | 279 | * of right to us works, we're done. |
280 | */ | 280 | */ |
281 | if (INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1 >= | 281 | if (be16_to_cpu(right->bb_numrecs) - 1 >= |
282 | XFS_INOBT_BLOCK_MINRECS(level, cur)) { | 282 | XFS_INOBT_BLOCK_MINRECS(level, cur)) { |
283 | if ((error = xfs_inobt_lshift(tcur, level, &i))) | 283 | if ((error = xfs_inobt_lshift(tcur, level, &i))) |
284 | goto error0; | 284 | goto error0; |
285 | if (i) { | 285 | if (i) { |
286 | ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= | 286 | ASSERT(be16_to_cpu(block->bb_numrecs) >= |
287 | XFS_INOBT_BLOCK_MINRECS(level, cur)); | 287 | XFS_INOBT_BLOCK_MINRECS(level, cur)); |
288 | xfs_btree_del_cursor(tcur, | 288 | xfs_btree_del_cursor(tcur, |
289 | XFS_BTREE_NOERROR); | 289 | XFS_BTREE_NOERROR); |
@@ -300,7 +300,7 @@ xfs_inobt_delrec( | |||
300 | * future reference, and fix up the temp cursor to point | 300 | * future reference, and fix up the temp cursor to point |
301 | * to our block again (last record). | 301 | * to our block again (last record). |
302 | */ | 302 | */ |
303 | rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); | 303 | rrecs = be16_to_cpu(right->bb_numrecs); |
304 | if (lbno != NULLAGBLOCK) { | 304 | if (lbno != NULLAGBLOCK) { |
305 | xfs_btree_firstrec(tcur, level); | 305 | xfs_btree_firstrec(tcur, level); |
306 | if ((error = xfs_inobt_decrement(tcur, level, &i))) | 306 | if ((error = xfs_inobt_decrement(tcur, level, &i))) |
@@ -332,18 +332,18 @@ xfs_inobt_delrec( | |||
332 | /* | 332 | /* |
333 | * Grab the current block number, for future use. | 333 | * Grab the current block number, for future use. |
334 | */ | 334 | */ |
335 | bno = INT_GET(left->bb_rightsib, ARCH_CONVERT); | 335 | bno = be32_to_cpu(left->bb_rightsib); |
336 | /* | 336 | /* |
337 | * If left block is full enough so that removing one entry | 337 | * If left block is full enough so that removing one entry |
338 | * won't make it too empty, and right-shifting an entry out | 338 | * won't make it too empty, and right-shifting an entry out |
339 | * of left to us works, we're done. | 339 | * of left to us works, we're done. |
340 | */ | 340 | */ |
341 | if (INT_GET(left->bb_numrecs, ARCH_CONVERT) - 1 >= | 341 | if (be16_to_cpu(left->bb_numrecs) - 1 >= |
342 | XFS_INOBT_BLOCK_MINRECS(level, cur)) { | 342 | XFS_INOBT_BLOCK_MINRECS(level, cur)) { |
343 | if ((error = xfs_inobt_rshift(tcur, level, &i))) | 343 | if ((error = xfs_inobt_rshift(tcur, level, &i))) |
344 | goto error0; | 344 | goto error0; |
345 | if (i) { | 345 | if (i) { |
346 | ASSERT(INT_GET(block->bb_numrecs, ARCH_CONVERT) >= | 346 | ASSERT(be16_to_cpu(block->bb_numrecs) >= |
347 | XFS_INOBT_BLOCK_MINRECS(level, cur)); | 347 | XFS_INOBT_BLOCK_MINRECS(level, cur)); |
348 | xfs_btree_del_cursor(tcur, | 348 | xfs_btree_del_cursor(tcur, |
349 | XFS_BTREE_NOERROR); | 349 | XFS_BTREE_NOERROR); |
@@ -357,7 +357,7 @@ xfs_inobt_delrec( | |||
357 | * Otherwise, grab the number of records in right for | 357 | * Otherwise, grab the number of records in right for |
358 | * future reference. | 358 | * future reference. |
359 | */ | 359 | */ |
360 | lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); | 360 | lrecs = be16_to_cpu(left->bb_numrecs); |
361 | } | 361 | } |
362 | /* | 362 | /* |
363 | * Delete the temp cursor, we're done with it. | 363 | * Delete the temp cursor, we're done with it. |
@@ -378,14 +378,14 @@ xfs_inobt_delrec( | |||
378 | */ | 378 | */ |
379 | rbno = bno; | 379 | rbno = bno; |
380 | right = block; | 380 | right = block; |
381 | rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); | 381 | rrecs = be16_to_cpu(right->bb_numrecs); |
382 | rbp = bp; | 382 | rbp = bp; |
383 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 383 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
384 | cur->bc_private.i.agno, lbno, 0, &lbp, | 384 | cur->bc_private.i.agno, lbno, 0, &lbp, |
385 | XFS_INO_BTREE_REF))) | 385 | XFS_INO_BTREE_REF))) |
386 | return error; | 386 | return error; |
387 | left = XFS_BUF_TO_INOBT_BLOCK(lbp); | 387 | left = XFS_BUF_TO_INOBT_BLOCK(lbp); |
388 | lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); | 388 | lrecs = be16_to_cpu(left->bb_numrecs); |
389 | if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) | 389 | if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) |
390 | return error; | 390 | return error; |
391 | } | 391 | } |
@@ -400,14 +400,14 @@ xfs_inobt_delrec( | |||
400 | */ | 400 | */ |
401 | lbno = bno; | 401 | lbno = bno; |
402 | left = block; | 402 | left = block; |
403 | lrecs = INT_GET(left->bb_numrecs, ARCH_CONVERT); | 403 | lrecs = be16_to_cpu(left->bb_numrecs); |
404 | lbp = bp; | 404 | lbp = bp; |
405 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 405 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
406 | cur->bc_private.i.agno, rbno, 0, &rbp, | 406 | cur->bc_private.i.agno, rbno, 0, &rbp, |
407 | XFS_INO_BTREE_REF))) | 407 | XFS_INO_BTREE_REF))) |
408 | return error; | 408 | return error; |
409 | right = XFS_BUF_TO_INOBT_BLOCK(rbp); | 409 | right = XFS_BUF_TO_INOBT_BLOCK(rbp); |
410 | rrecs = INT_GET(right->bb_numrecs, ARCH_CONVERT); | 410 | rrecs = be16_to_cpu(right->bb_numrecs); |
411 | if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) | 411 | if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) |
412 | return error; | 412 | return error; |
413 | } | 413 | } |
@@ -435,7 +435,7 @@ xfs_inobt_delrec( | |||
435 | rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); | 435 | rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); |
436 | #ifdef DEBUG | 436 | #ifdef DEBUG |
437 | for (i = 0; i < rrecs; i++) { | 437 | for (i = 0; i < rrecs; i++) { |
438 | if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) | 438 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) |
439 | return error; | 439 | return error; |
440 | } | 440 | } |
441 | #endif | 441 | #endif |
@@ -471,7 +471,7 @@ xfs_inobt_delrec( | |||
471 | * Fix up the number of records in the surviving block. | 471 | * Fix up the number of records in the surviving block. |
472 | */ | 472 | */ |
473 | lrecs += rrecs; | 473 | lrecs += rrecs; |
474 | INT_SET(left->bb_numrecs, ARCH_CONVERT, lrecs); | 474 | left->bb_numrecs = cpu_to_be16(lrecs); |
475 | /* | 475 | /* |
476 | * Fix up the right block pointer in the surviving block, and log it. | 476 | * Fix up the right block pointer in the surviving block, and log it. |
477 | */ | 477 | */ |
@@ -481,18 +481,18 @@ xfs_inobt_delrec( | |||
481 | * If there is a right sibling now, make it point to the | 481 | * If there is a right sibling now, make it point to the |
482 | * remaining block. | 482 | * remaining block. |
483 | */ | 483 | */ |
484 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 484 | if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) { |
485 | xfs_inobt_block_t *rrblock; | 485 | xfs_inobt_block_t *rrblock; |
486 | xfs_buf_t *rrbp; | 486 | xfs_buf_t *rrbp; |
487 | 487 | ||
488 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 488 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
489 | cur->bc_private.i.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, | 489 | cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib), 0, |
490 | &rrbp, XFS_INO_BTREE_REF))) | 490 | &rrbp, XFS_INO_BTREE_REF))) |
491 | return error; | 491 | return error; |
492 | rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); | 492 | rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); |
493 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) | 493 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) |
494 | return error; | 494 | return error; |
495 | INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, lbno); | 495 | rrblock->bb_leftsib = cpu_to_be32(lbno); |
496 | xfs_inobt_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); | 496 | xfs_inobt_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); |
497 | } | 497 | } |
498 | /* | 498 | /* |
@@ -584,7 +584,7 @@ xfs_inobt_insrec( | |||
584 | */ | 584 | */ |
585 | bp = cur->bc_bufs[level]; | 585 | bp = cur->bc_bufs[level]; |
586 | block = XFS_BUF_TO_INOBT_BLOCK(bp); | 586 | block = XFS_BUF_TO_INOBT_BLOCK(bp); |
587 | numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 587 | numrecs = be16_to_cpu(block->bb_numrecs); |
588 | #ifdef DEBUG | 588 | #ifdef DEBUG |
589 | if ((error = xfs_btree_check_sblock(cur, block, level, bp))) | 589 | if ((error = xfs_btree_check_sblock(cur, block, level, bp))) |
590 | return error; | 590 | return error; |
@@ -658,7 +658,7 @@ xfs_inobt_insrec( | |||
658 | * At this point we know there's room for our new entry in the block | 658 | * At this point we know there's room for our new entry in the block |
659 | * we're pointing at. | 659 | * we're pointing at. |
660 | */ | 660 | */ |
661 | numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 661 | numrecs = be16_to_cpu(block->bb_numrecs); |
662 | if (level > 0) { | 662 | if (level > 0) { |
663 | /* | 663 | /* |
664 | * It's a non-leaf entry. Make a hole for the new data | 664 | * It's a non-leaf entry. Make a hole for the new data |
@@ -668,7 +668,7 @@ xfs_inobt_insrec( | |||
668 | pp = XFS_INOBT_PTR_ADDR(block, 1, cur); | 668 | pp = XFS_INOBT_PTR_ADDR(block, 1, cur); |
669 | #ifdef DEBUG | 669 | #ifdef DEBUG |
670 | for (i = numrecs; i >= ptr; i--) { | 670 | for (i = numrecs; i >= ptr; i--) { |
671 | if ((error = xfs_btree_check_sptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), level))) | 671 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level))) |
672 | return error; | 672 | return error; |
673 | } | 673 | } |
674 | #endif | 674 | #endif |
@@ -684,9 +684,9 @@ xfs_inobt_insrec( | |||
684 | return error; | 684 | return error; |
685 | #endif | 685 | #endif |
686 | kp[ptr - 1] = key; /* INT_: struct copy */ | 686 | kp[ptr - 1] = key; /* INT_: struct copy */ |
687 | INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); | 687 | pp[ptr - 1] = cpu_to_be32(*bnop); |
688 | numrecs++; | 688 | numrecs++; |
689 | INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); | 689 | block->bb_numrecs = cpu_to_be16(numrecs); |
690 | xfs_inobt_log_keys(cur, bp, ptr, numrecs); | 690 | xfs_inobt_log_keys(cur, bp, ptr, numrecs); |
691 | xfs_inobt_log_ptrs(cur, bp, ptr, numrecs); | 691 | xfs_inobt_log_ptrs(cur, bp, ptr, numrecs); |
692 | } else { | 692 | } else { |
@@ -702,7 +702,7 @@ xfs_inobt_insrec( | |||
702 | */ | 702 | */ |
703 | rp[ptr - 1] = *recp; /* INT_: struct copy */ | 703 | rp[ptr - 1] = *recp; /* INT_: struct copy */ |
704 | numrecs++; | 704 | numrecs++; |
705 | INT_SET(block->bb_numrecs, ARCH_CONVERT, numrecs); | 705 | block->bb_numrecs = cpu_to_be16(numrecs); |
706 | xfs_inobt_log_recs(cur, bp, ptr, numrecs); | 706 | xfs_inobt_log_recs(cur, bp, ptr, numrecs); |
707 | } | 707 | } |
708 | /* | 708 | /* |
@@ -857,8 +857,8 @@ xfs_inobt_lookup( | |||
857 | xfs_agi_t *agi; /* a.g. inode header */ | 857 | xfs_agi_t *agi; /* a.g. inode header */ |
858 | 858 | ||
859 | agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); | 859 | agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); |
860 | agno = INT_GET(agi->agi_seqno, ARCH_CONVERT); | 860 | agno = be32_to_cpu(agi->agi_seqno); |
861 | agbno = INT_GET(agi->agi_root, ARCH_CONVERT); | 861 | agbno = be32_to_cpu(agi->agi_root); |
862 | } | 862 | } |
863 | /* | 863 | /* |
864 | * Iterate over each level in the btree, starting at the root. | 864 | * Iterate over each level in the btree, starting at the root. |
@@ -925,7 +925,7 @@ xfs_inobt_lookup( | |||
925 | * Set low and high entry numbers, 1-based. | 925 | * Set low and high entry numbers, 1-based. |
926 | */ | 926 | */ |
927 | low = 1; | 927 | low = 1; |
928 | if (!(high = INT_GET(block->bb_numrecs, ARCH_CONVERT))) { | 928 | if (!(high = be16_to_cpu(block->bb_numrecs))) { |
929 | /* | 929 | /* |
930 | * If the block is empty, the tree must | 930 | * If the block is empty, the tree must |
931 | * be an empty leaf. | 931 | * be an empty leaf. |
@@ -992,7 +992,7 @@ xfs_inobt_lookup( | |||
992 | */ | 992 | */ |
993 | if (diff > 0 && --keyno < 1) | 993 | if (diff > 0 && --keyno < 1) |
994 | keyno = 1; | 994 | keyno = 1; |
995 | agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, keyno, cur), ARCH_CONVERT); | 995 | agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, keyno, cur)); |
996 | #ifdef DEBUG | 996 | #ifdef DEBUG |
997 | if ((error = xfs_btree_check_sptr(cur, agbno, level))) | 997 | if ((error = xfs_btree_check_sptr(cur, agbno, level))) |
998 | return error; | 998 | return error; |
@@ -1011,8 +1011,8 @@ xfs_inobt_lookup( | |||
1011 | * not the last block, we're in the wrong block. | 1011 | * not the last block, we're in the wrong block. |
1012 | */ | 1012 | */ |
1013 | if (dir == XFS_LOOKUP_GE && | 1013 | if (dir == XFS_LOOKUP_GE && |
1014 | keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT) && | 1014 | keyno > be16_to_cpu(block->bb_numrecs) && |
1015 | INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 1015 | be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) { |
1016 | int i; | 1016 | int i; |
1017 | 1017 | ||
1018 | cur->bc_ptrs[0] = keyno; | 1018 | cur->bc_ptrs[0] = keyno; |
@@ -1029,7 +1029,7 @@ xfs_inobt_lookup( | |||
1029 | /* | 1029 | /* |
1030 | * Return if we succeeded or not. | 1030 | * Return if we succeeded or not. |
1031 | */ | 1031 | */ |
1032 | if (keyno == 0 || keyno > INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 1032 | if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) |
1033 | *stat = 0; | 1033 | *stat = 0; |
1034 | else | 1034 | else |
1035 | *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); | 1035 | *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); |
@@ -1075,7 +1075,7 @@ xfs_inobt_lshift( | |||
1075 | /* | 1075 | /* |
1076 | * If we've got no left sibling then we can't shift an entry left. | 1076 | * If we've got no left sibling then we can't shift an entry left. |
1077 | */ | 1077 | */ |
1078 | if (INT_GET(right->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1078 | if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) { |
1079 | *stat = 0; | 1079 | *stat = 0; |
1080 | return 0; | 1080 | return 0; |
1081 | } | 1081 | } |
@@ -1091,8 +1091,8 @@ xfs_inobt_lshift( | |||
1091 | * Set up the left neighbor as "left". | 1091 | * Set up the left neighbor as "left". |
1092 | */ | 1092 | */ |
1093 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1093 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1094 | cur->bc_private.i.agno, INT_GET(right->bb_leftsib, ARCH_CONVERT), 0, &lbp, | 1094 | cur->bc_private.i.agno, be32_to_cpu(right->bb_leftsib), |
1095 | XFS_INO_BTREE_REF))) | 1095 | 0, &lbp, XFS_INO_BTREE_REF))) |
1096 | return error; | 1096 | return error; |
1097 | left = XFS_BUF_TO_INOBT_BLOCK(lbp); | 1097 | left = XFS_BUF_TO_INOBT_BLOCK(lbp); |
1098 | if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) | 1098 | if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) |
@@ -1100,11 +1100,11 @@ xfs_inobt_lshift( | |||
1100 | /* | 1100 | /* |
1101 | * If it's full, it can't take another entry. | 1101 | * If it's full, it can't take another entry. |
1102 | */ | 1102 | */ |
1103 | if (INT_GET(left->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { | 1103 | if (be16_to_cpu(left->bb_numrecs) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { |
1104 | *stat = 0; | 1104 | *stat = 0; |
1105 | return 0; | 1105 | return 0; |
1106 | } | 1106 | } |
1107 | nrec = INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1; | 1107 | nrec = be16_to_cpu(left->bb_numrecs) + 1; |
1108 | /* | 1108 | /* |
1109 | * If non-leaf, copy a key and a ptr to the left block. | 1109 | * If non-leaf, copy a key and a ptr to the left block. |
1110 | */ | 1110 | */ |
@@ -1116,7 +1116,7 @@ xfs_inobt_lshift( | |||
1116 | lpp = XFS_INOBT_PTR_ADDR(left, nrec, cur); | 1116 | lpp = XFS_INOBT_PTR_ADDR(left, nrec, cur); |
1117 | rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); | 1117 | rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); |
1118 | #ifdef DEBUG | 1118 | #ifdef DEBUG |
1119 | if ((error = xfs_btree_check_sptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) | 1119 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level))) |
1120 | return error; | 1120 | return error; |
1121 | #endif | 1121 | #endif |
1122 | *lpp = *rpp; /* INT_: no-change copy */ | 1122 | *lpp = *rpp; /* INT_: no-change copy */ |
@@ -1134,7 +1134,7 @@ xfs_inobt_lshift( | |||
1134 | /* | 1134 | /* |
1135 | * Bump and log left's numrecs, decrement and log right's numrecs. | 1135 | * Bump and log left's numrecs, decrement and log right's numrecs. |
1136 | */ | 1136 | */ |
1137 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, +1); | 1137 | be16_add(&left->bb_numrecs, 1); |
1138 | xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); | 1138 | xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); |
1139 | #ifdef DEBUG | 1139 | #ifdef DEBUG |
1140 | if (level > 0) | 1140 | if (level > 0) |
@@ -1142,26 +1142,26 @@ xfs_inobt_lshift( | |||
1142 | else | 1142 | else |
1143 | xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp); | 1143 | xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp); |
1144 | #endif | 1144 | #endif |
1145 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, -1); | 1145 | be16_add(&right->bb_numrecs, -1); |
1146 | xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); | 1146 | xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); |
1147 | /* | 1147 | /* |
1148 | * Slide the contents of right down one entry. | 1148 | * Slide the contents of right down one entry. |
1149 | */ | 1149 | */ |
1150 | if (level > 0) { | 1150 | if (level > 0) { |
1151 | #ifdef DEBUG | 1151 | #ifdef DEBUG |
1152 | for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { | 1152 | for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { |
1153 | if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), | 1153 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]), |
1154 | level))) | 1154 | level))) |
1155 | return error; | 1155 | return error; |
1156 | } | 1156 | } |
1157 | #endif | 1157 | #endif |
1158 | memmove(rkp, rkp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); | 1158 | memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1159 | memmove(rpp, rpp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); | 1159 | memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1160 | xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1160 | xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1161 | xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1161 | xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1162 | } else { | 1162 | } else { |
1163 | memmove(rrp, rrp + 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1163 | memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1164 | xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1164 | xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1165 | key.ir_startino = rrp->ir_startino; /* INT_: direct copy */ | 1165 | key.ir_startino = rrp->ir_startino; /* INT_: direct copy */ |
1166 | rkp = &key; | 1166 | rkp = &key; |
1167 | } | 1167 | } |
@@ -1213,7 +1213,7 @@ xfs_inobt_newroot( | |||
1213 | args.tp = cur->bc_tp; | 1213 | args.tp = cur->bc_tp; |
1214 | args.mp = cur->bc_mp; | 1214 | args.mp = cur->bc_mp; |
1215 | args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, | 1215 | args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, |
1216 | INT_GET(agi->agi_root, ARCH_CONVERT)); | 1216 | be32_to_cpu(agi->agi_root)); |
1217 | args.mod = args.minleft = args.alignment = args.total = args.wasdel = | 1217 | args.mod = args.minleft = args.alignment = args.total = args.wasdel = |
1218 | args.isfl = args.userdata = args.minalignslop = 0; | 1218 | args.isfl = args.userdata = args.minalignslop = 0; |
1219 | args.minlen = args.maxlen = args.prod = 1; | 1219 | args.minlen = args.maxlen = args.prod = 1; |
@@ -1233,8 +1233,8 @@ xfs_inobt_newroot( | |||
1233 | /* | 1233 | /* |
1234 | * Set the root data in the a.g. inode structure. | 1234 | * Set the root data in the a.g. inode structure. |
1235 | */ | 1235 | */ |
1236 | INT_SET(agi->agi_root, ARCH_CONVERT, args.agbno); | 1236 | agi->agi_root = cpu_to_be32(args.agbno); |
1237 | INT_MOD(agi->agi_level, ARCH_CONVERT, 1); | 1237 | be32_add(&agi->agi_level, 1); |
1238 | xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp, | 1238 | xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp, |
1239 | XFS_AGI_ROOT | XFS_AGI_LEVEL); | 1239 | XFS_AGI_ROOT | XFS_AGI_LEVEL); |
1240 | /* | 1240 | /* |
@@ -1249,14 +1249,14 @@ xfs_inobt_newroot( | |||
1249 | if ((error = xfs_btree_check_sblock(cur, block, cur->bc_nlevels - 1, bp))) | 1249 | if ((error = xfs_btree_check_sblock(cur, block, cur->bc_nlevels - 1, bp))) |
1250 | return error; | 1250 | return error; |
1251 | #endif | 1251 | #endif |
1252 | if (INT_GET(block->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 1252 | if (be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) { |
1253 | /* | 1253 | /* |
1254 | * Our block is left, pick up the right block. | 1254 | * Our block is left, pick up the right block. |
1255 | */ | 1255 | */ |
1256 | lbp = bp; | 1256 | lbp = bp; |
1257 | lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp)); | 1257 | lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp)); |
1258 | left = block; | 1258 | left = block; |
1259 | rbno = INT_GET(left->bb_rightsib, ARCH_CONVERT); | 1259 | rbno = be32_to_cpu(left->bb_rightsib); |
1260 | if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, | 1260 | if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, |
1261 | rbno, 0, &rbp, XFS_INO_BTREE_REF))) | 1261 | rbno, 0, &rbp, XFS_INO_BTREE_REF))) |
1262 | return error; | 1262 | return error; |
@@ -1273,7 +1273,7 @@ xfs_inobt_newroot( | |||
1273 | rbp = bp; | 1273 | rbp = bp; |
1274 | rbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(rbp)); | 1274 | rbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(rbp)); |
1275 | right = block; | 1275 | right = block; |
1276 | lbno = INT_GET(right->bb_leftsib, ARCH_CONVERT); | 1276 | lbno = be32_to_cpu(right->bb_leftsib); |
1277 | if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, | 1277 | if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, |
1278 | lbno, 0, &lbp, XFS_INO_BTREE_REF))) | 1278 | lbno, 0, &lbp, XFS_INO_BTREE_REF))) |
1279 | return error; | 1279 | return error; |
@@ -1287,18 +1287,18 @@ xfs_inobt_newroot( | |||
1287 | /* | 1287 | /* |
1288 | * Fill in the new block's btree header and log it. | 1288 | * Fill in the new block's btree header and log it. |
1289 | */ | 1289 | */ |
1290 | INT_SET(new->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); | 1290 | new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); |
1291 | INT_SET(new->bb_level, ARCH_CONVERT, (__uint16_t)cur->bc_nlevels); | 1291 | new->bb_level = cpu_to_be16(cur->bc_nlevels); |
1292 | INT_SET(new->bb_numrecs, ARCH_CONVERT, 2); | 1292 | new->bb_numrecs = cpu_to_be16(2); |
1293 | INT_SET(new->bb_leftsib, ARCH_CONVERT, NULLAGBLOCK); | 1293 | new->bb_leftsib = cpu_to_be32(NULLAGBLOCK); |
1294 | INT_SET(new->bb_rightsib, ARCH_CONVERT, NULLAGBLOCK); | 1294 | new->bb_rightsib = cpu_to_be32(NULLAGBLOCK); |
1295 | xfs_inobt_log_block(args.tp, nbp, XFS_BB_ALL_BITS); | 1295 | xfs_inobt_log_block(args.tp, nbp, XFS_BB_ALL_BITS); |
1296 | ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); | 1296 | ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); |
1297 | /* | 1297 | /* |
1298 | * Fill in the key data in the new root. | 1298 | * Fill in the key data in the new root. |
1299 | */ | 1299 | */ |
1300 | kp = XFS_INOBT_KEY_ADDR(new, 1, cur); | 1300 | kp = XFS_INOBT_KEY_ADDR(new, 1, cur); |
1301 | if (INT_GET(left->bb_level, ARCH_CONVERT) > 0) { | 1301 | if (be16_to_cpu(left->bb_level) > 0) { |
1302 | kp[0] = *XFS_INOBT_KEY_ADDR(left, 1, cur); /* INT_: struct copy */ | 1302 | kp[0] = *XFS_INOBT_KEY_ADDR(left, 1, cur); /* INT_: struct copy */ |
1303 | kp[1] = *XFS_INOBT_KEY_ADDR(right, 1, cur); /* INT_: struct copy */ | 1303 | kp[1] = *XFS_INOBT_KEY_ADDR(right, 1, cur); /* INT_: struct copy */ |
1304 | } else { | 1304 | } else { |
@@ -1312,8 +1312,8 @@ xfs_inobt_newroot( | |||
1312 | * Fill in the pointer data in the new root. | 1312 | * Fill in the pointer data in the new root. |
1313 | */ | 1313 | */ |
1314 | pp = XFS_INOBT_PTR_ADDR(new, 1, cur); | 1314 | pp = XFS_INOBT_PTR_ADDR(new, 1, cur); |
1315 | INT_SET(pp[0], ARCH_CONVERT, lbno); | 1315 | pp[0] = cpu_to_be32(lbno); |
1316 | INT_SET(pp[1], ARCH_CONVERT, rbno); | 1316 | pp[1] = cpu_to_be32(rbno); |
1317 | xfs_inobt_log_ptrs(cur, nbp, 1, 2); | 1317 | xfs_inobt_log_ptrs(cur, nbp, 1, 2); |
1318 | /* | 1318 | /* |
1319 | * Fix up the cursor. | 1319 | * Fix up the cursor. |
@@ -1362,7 +1362,7 @@ xfs_inobt_rshift( | |||
1362 | /* | 1362 | /* |
1363 | * If we've got no right sibling then we can't shift an entry right. | 1363 | * If we've got no right sibling then we can't shift an entry right. |
1364 | */ | 1364 | */ |
1365 | if (INT_GET(left->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1365 | if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) { |
1366 | *stat = 0; | 1366 | *stat = 0; |
1367 | return 0; | 1367 | return 0; |
1368 | } | 1368 | } |
@@ -1370,7 +1370,7 @@ xfs_inobt_rshift( | |||
1370 | * If the cursor entry is the one that would be moved, don't | 1370 | * If the cursor entry is the one that would be moved, don't |
1371 | * do it... it's too complicated. | 1371 | * do it... it's too complicated. |
1372 | */ | 1372 | */ |
1373 | if (cur->bc_ptrs[level] >= INT_GET(left->bb_numrecs, ARCH_CONVERT)) { | 1373 | if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) { |
1374 | *stat = 0; | 1374 | *stat = 0; |
1375 | return 0; | 1375 | return 0; |
1376 | } | 1376 | } |
@@ -1378,8 +1378,8 @@ xfs_inobt_rshift( | |||
1378 | * Set up the right neighbor as "right". | 1378 | * Set up the right neighbor as "right". |
1379 | */ | 1379 | */ |
1380 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1380 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1381 | cur->bc_private.i.agno, INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rbp, | 1381 | cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib), |
1382 | XFS_INO_BTREE_REF))) | 1382 | 0, &rbp, XFS_INO_BTREE_REF))) |
1383 | return error; | 1383 | return error; |
1384 | right = XFS_BUF_TO_INOBT_BLOCK(rbp); | 1384 | right = XFS_BUF_TO_INOBT_BLOCK(rbp); |
1385 | if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) | 1385 | if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) |
@@ -1387,7 +1387,7 @@ xfs_inobt_rshift( | |||
1387 | /* | 1387 | /* |
1388 | * If it's full, it can't take another entry. | 1388 | * If it's full, it can't take another entry. |
1389 | */ | 1389 | */ |
1390 | if (INT_GET(right->bb_numrecs, ARCH_CONVERT) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { | 1390 | if (be16_to_cpu(right->bb_numrecs) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { |
1391 | *stat = 0; | 1391 | *stat = 0; |
1392 | return 0; | 1392 | return 0; |
1393 | } | 1393 | } |
@@ -1396,41 +1396,41 @@ xfs_inobt_rshift( | |||
1396 | * copy the last left block entry to the hole. | 1396 | * copy the last left block entry to the hole. |
1397 | */ | 1397 | */ |
1398 | if (level > 0) { | 1398 | if (level > 0) { |
1399 | lkp = XFS_INOBT_KEY_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1399 | lkp = XFS_INOBT_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1400 | lpp = XFS_INOBT_PTR_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1400 | lpp = XFS_INOBT_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1401 | rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); | 1401 | rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); |
1402 | rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); | 1402 | rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); |
1403 | #ifdef DEBUG | 1403 | #ifdef DEBUG |
1404 | for (i = INT_GET(right->bb_numrecs, ARCH_CONVERT) - 1; i >= 0; i--) { | 1404 | for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) { |
1405 | if ((error = xfs_btree_check_sptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) | 1405 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) |
1406 | return error; | 1406 | return error; |
1407 | } | 1407 | } |
1408 | #endif | 1408 | #endif |
1409 | memmove(rkp + 1, rkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); | 1409 | memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1410 | memmove(rpp + 1, rpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); | 1410 | memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1411 | #ifdef DEBUG | 1411 | #ifdef DEBUG |
1412 | if ((error = xfs_btree_check_sptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) | 1412 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level))) |
1413 | return error; | 1413 | return error; |
1414 | #endif | 1414 | #endif |
1415 | *rkp = *lkp; /* INT_: no change copy */ | 1415 | *rkp = *lkp; /* INT_: no change copy */ |
1416 | *rpp = *lpp; /* INT_: no change copy */ | 1416 | *rpp = *lpp; /* INT_: no change copy */ |
1417 | xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1417 | xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1418 | xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1418 | xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1419 | } else { | 1419 | } else { |
1420 | lrp = XFS_INOBT_REC_ADDR(left, INT_GET(left->bb_numrecs, ARCH_CONVERT), cur); | 1420 | lrp = XFS_INOBT_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); |
1421 | rrp = XFS_INOBT_REC_ADDR(right, 1, cur); | 1421 | rrp = XFS_INOBT_REC_ADDR(right, 1, cur); |
1422 | memmove(rrp + 1, rrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1422 | memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1423 | *rrp = *lrp; | 1423 | *rrp = *lrp; |
1424 | xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1); | 1424 | xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); |
1425 | key.ir_startino = rrp->ir_startino; /* INT_: direct copy */ | 1425 | key.ir_startino = rrp->ir_startino; /* INT_: direct copy */ |
1426 | rkp = &key; | 1426 | rkp = &key; |
1427 | } | 1427 | } |
1428 | /* | 1428 | /* |
1429 | * Decrement and log left's numrecs, bump and log right's numrecs. | 1429 | * Decrement and log left's numrecs, bump and log right's numrecs. |
1430 | */ | 1430 | */ |
1431 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, -1); | 1431 | be16_add(&left->bb_numrecs, -1); |
1432 | xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); | 1432 | xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); |
1433 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); | 1433 | be16_add(&right->bb_numrecs, 1); |
1434 | #ifdef DEBUG | 1434 | #ifdef DEBUG |
1435 | if (level > 0) | 1435 | if (level > 0) |
1436 | xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); | 1436 | xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); |
@@ -1522,17 +1522,17 @@ xfs_inobt_split( | |||
1522 | /* | 1522 | /* |
1523 | * Fill in the btree header for the new block. | 1523 | * Fill in the btree header for the new block. |
1524 | */ | 1524 | */ |
1525 | INT_SET(right->bb_magic, ARCH_CONVERT, xfs_magics[cur->bc_btnum]); | 1525 | right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); |
1526 | right->bb_level = left->bb_level; /* INT_: direct copy */ | 1526 | right->bb_level = left->bb_level; |
1527 | INT_SET(right->bb_numrecs, ARCH_CONVERT, (__uint16_t)(INT_GET(left->bb_numrecs, ARCH_CONVERT) / 2)); | 1527 | right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); |
1528 | /* | 1528 | /* |
1529 | * Make sure that if there's an odd number of entries now, that | 1529 | * Make sure that if there's an odd number of entries now, that |
1530 | * each new block will have the same number of entries. | 1530 | * each new block will have the same number of entries. |
1531 | */ | 1531 | */ |
1532 | if ((INT_GET(left->bb_numrecs, ARCH_CONVERT) & 1) && | 1532 | if ((be16_to_cpu(left->bb_numrecs) & 1) && |
1533 | cur->bc_ptrs[level] <= INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1) | 1533 | cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) |
1534 | INT_MOD(right->bb_numrecs, ARCH_CONVERT, +1); | 1534 | be16_add(&right->bb_numrecs, 1); |
1535 | i = INT_GET(left->bb_numrecs, ARCH_CONVERT) - INT_GET(right->bb_numrecs, ARCH_CONVERT) + 1; | 1535 | i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; |
1536 | /* | 1536 | /* |
1537 | * For non-leaf blocks, copy keys and addresses over to the new block. | 1537 | * For non-leaf blocks, copy keys and addresses over to the new block. |
1538 | */ | 1538 | */ |
@@ -1542,15 +1542,15 @@ xfs_inobt_split( | |||
1542 | rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); | 1542 | rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); |
1543 | rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); | 1543 | rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); |
1544 | #ifdef DEBUG | 1544 | #ifdef DEBUG |
1545 | for (i = 0; i < INT_GET(right->bb_numrecs, ARCH_CONVERT); i++) { | 1545 | for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { |
1546 | if ((error = xfs_btree_check_sptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) | 1546 | if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) |
1547 | return error; | 1547 | return error; |
1548 | } | 1548 | } |
1549 | #endif | 1549 | #endif |
1550 | memcpy(rkp, lkp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rkp)); | 1550 | memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); |
1551 | memcpy(rpp, lpp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rpp)); | 1551 | memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); |
1552 | xfs_inobt_log_keys(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1552 | xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1553 | xfs_inobt_log_ptrs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1553 | xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1554 | *keyp = *rkp; | 1554 | *keyp = *rkp; |
1555 | } | 1555 | } |
1556 | /* | 1556 | /* |
@@ -1559,36 +1559,36 @@ xfs_inobt_split( | |||
1559 | else { | 1559 | else { |
1560 | lrp = XFS_INOBT_REC_ADDR(left, i, cur); | 1560 | lrp = XFS_INOBT_REC_ADDR(left, i, cur); |
1561 | rrp = XFS_INOBT_REC_ADDR(right, 1, cur); | 1561 | rrp = XFS_INOBT_REC_ADDR(right, 1, cur); |
1562 | memcpy(rrp, lrp, INT_GET(right->bb_numrecs, ARCH_CONVERT) * sizeof(*rrp)); | 1562 | memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); |
1563 | xfs_inobt_log_recs(cur, rbp, 1, INT_GET(right->bb_numrecs, ARCH_CONVERT)); | 1563 | xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1564 | keyp->ir_startino = rrp->ir_startino; /* INT_: direct copy */ | 1564 | keyp->ir_startino = rrp->ir_startino; /* INT_: direct copy */ |
1565 | } | 1565 | } |
1566 | /* | 1566 | /* |
1567 | * Find the left block number by looking in the buffer. | 1567 | * Find the left block number by looking in the buffer. |
1568 | * Adjust numrecs, sibling pointers. | 1568 | * Adjust numrecs, sibling pointers. |
1569 | */ | 1569 | */ |
1570 | INT_MOD(left->bb_numrecs, ARCH_CONVERT, -(INT_GET(right->bb_numrecs, ARCH_CONVERT))); | 1570 | be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); |
1571 | right->bb_rightsib = left->bb_rightsib; /* INT_: direct copy */ | 1571 | right->bb_rightsib = left->bb_rightsib; |
1572 | INT_SET(left->bb_rightsib, ARCH_CONVERT, args.agbno); | 1572 | left->bb_rightsib = cpu_to_be32(args.agbno); |
1573 | INT_SET(right->bb_leftsib, ARCH_CONVERT, lbno); | 1573 | right->bb_leftsib = cpu_to_be32(lbno); |
1574 | xfs_inobt_log_block(args.tp, rbp, XFS_BB_ALL_BITS); | 1574 | xfs_inobt_log_block(args.tp, rbp, XFS_BB_ALL_BITS); |
1575 | xfs_inobt_log_block(args.tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); | 1575 | xfs_inobt_log_block(args.tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); |
1576 | /* | 1576 | /* |
1577 | * If there's a block to the new block's right, make that block | 1577 | * If there's a block to the new block's right, make that block |
1578 | * point back to right instead of to left. | 1578 | * point back to right instead of to left. |
1579 | */ | 1579 | */ |
1580 | if (INT_GET(right->bb_rightsib, ARCH_CONVERT) != NULLAGBLOCK) { | 1580 | if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) { |
1581 | xfs_inobt_block_t *rrblock; /* rr btree block */ | 1581 | xfs_inobt_block_t *rrblock; /* rr btree block */ |
1582 | xfs_buf_t *rrbp; /* buffer for rrblock */ | 1582 | xfs_buf_t *rrbp; /* buffer for rrblock */ |
1583 | 1583 | ||
1584 | if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, | 1584 | if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, |
1585 | INT_GET(right->bb_rightsib, ARCH_CONVERT), 0, &rrbp, | 1585 | be32_to_cpu(right->bb_rightsib), 0, &rrbp, |
1586 | XFS_INO_BTREE_REF))) | 1586 | XFS_INO_BTREE_REF))) |
1587 | return error; | 1587 | return error; |
1588 | rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); | 1588 | rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); |
1589 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) | 1589 | if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) |
1590 | return error; | 1590 | return error; |
1591 | INT_SET(rrblock->bb_leftsib, ARCH_CONVERT, args.agbno); | 1591 | rrblock->bb_leftsib = cpu_to_be32(args.agbno); |
1592 | xfs_inobt_log_block(args.tp, rrbp, XFS_BB_LEFTSIB); | 1592 | xfs_inobt_log_block(args.tp, rrbp, XFS_BB_LEFTSIB); |
1593 | } | 1593 | } |
1594 | /* | 1594 | /* |
@@ -1596,9 +1596,9 @@ xfs_inobt_split( | |||
1596 | * If it's just pointing past the last entry in left, then we'll | 1596 | * If it's just pointing past the last entry in left, then we'll |
1597 | * insert there, so don't change anything in that case. | 1597 | * insert there, so don't change anything in that case. |
1598 | */ | 1598 | */ |
1599 | if (cur->bc_ptrs[level] > INT_GET(left->bb_numrecs, ARCH_CONVERT) + 1) { | 1599 | if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) { |
1600 | xfs_btree_setbuf(cur, level, rbp); | 1600 | xfs_btree_setbuf(cur, level, rbp); |
1601 | cur->bc_ptrs[level] -= INT_GET(left->bb_numrecs, ARCH_CONVERT); | 1601 | cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs); |
1602 | } | 1602 | } |
1603 | /* | 1603 | /* |
1604 | * If there are more levels, we'll need another cursor which refers | 1604 | * If there are more levels, we'll need another cursor which refers |
@@ -1696,7 +1696,7 @@ xfs_inobt_decrement( | |||
1696 | /* | 1696 | /* |
1697 | * If we just went off the left edge of the tree, return failure. | 1697 | * If we just went off the left edge of the tree, return failure. |
1698 | */ | 1698 | */ |
1699 | if (INT_GET(block->bb_leftsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1699 | if (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK) { |
1700 | *stat = 0; | 1700 | *stat = 0; |
1701 | return 0; | 1701 | return 0; |
1702 | } | 1702 | } |
@@ -1725,7 +1725,7 @@ xfs_inobt_decrement( | |||
1725 | xfs_agblock_t agbno; /* block number of btree block */ | 1725 | xfs_agblock_t agbno; /* block number of btree block */ |
1726 | xfs_buf_t *bp; /* buffer containing btree block */ | 1726 | xfs_buf_t *bp; /* buffer containing btree block */ |
1727 | 1727 | ||
1728 | agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); | 1728 | agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); |
1729 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1729 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1730 | cur->bc_private.i.agno, agbno, 0, &bp, | 1730 | cur->bc_private.i.agno, agbno, 0, &bp, |
1731 | XFS_INO_BTREE_REF))) | 1731 | XFS_INO_BTREE_REF))) |
@@ -1735,7 +1735,7 @@ xfs_inobt_decrement( | |||
1735 | block = XFS_BUF_TO_INOBT_BLOCK(bp); | 1735 | block = XFS_BUF_TO_INOBT_BLOCK(bp); |
1736 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) | 1736 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) |
1737 | return error; | 1737 | return error; |
1738 | cur->bc_ptrs[lev] = INT_GET(block->bb_numrecs, ARCH_CONVERT); | 1738 | cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs); |
1739 | } | 1739 | } |
1740 | *stat = 1; | 1740 | *stat = 1; |
1741 | return 0; | 1741 | return 0; |
@@ -1807,7 +1807,7 @@ xfs_inobt_get_rec( | |||
1807 | /* | 1807 | /* |
1808 | * Off the right end or left end, return failure. | 1808 | * Off the right end or left end, return failure. |
1809 | */ | 1809 | */ |
1810 | if (ptr > INT_GET(block->bb_numrecs, ARCH_CONVERT) || ptr <= 0) { | 1810 | if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) { |
1811 | *stat = 0; | 1811 | *stat = 0; |
1812 | return 0; | 1812 | return 0; |
1813 | } | 1813 | } |
@@ -1855,14 +1855,14 @@ xfs_inobt_increment( | |||
1855 | * Increment the ptr at this level. If we're still in the block | 1855 | * Increment the ptr at this level. If we're still in the block |
1856 | * then we're done. | 1856 | * then we're done. |
1857 | */ | 1857 | */ |
1858 | if (++cur->bc_ptrs[level] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) { | 1858 | if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) { |
1859 | *stat = 1; | 1859 | *stat = 1; |
1860 | return 0; | 1860 | return 0; |
1861 | } | 1861 | } |
1862 | /* | 1862 | /* |
1863 | * If we just went off the right edge of the tree, return failure. | 1863 | * If we just went off the right edge of the tree, return failure. |
1864 | */ | 1864 | */ |
1865 | if (INT_GET(block->bb_rightsib, ARCH_CONVERT) == NULLAGBLOCK) { | 1865 | if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK) { |
1866 | *stat = 0; | 1866 | *stat = 0; |
1867 | return 0; | 1867 | return 0; |
1868 | } | 1868 | } |
@@ -1877,7 +1877,7 @@ xfs_inobt_increment( | |||
1877 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) | 1877 | if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) |
1878 | return error; | 1878 | return error; |
1879 | #endif | 1879 | #endif |
1880 | if (++cur->bc_ptrs[lev] <= INT_GET(block->bb_numrecs, ARCH_CONVERT)) | 1880 | if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) |
1881 | break; | 1881 | break; |
1882 | /* | 1882 | /* |
1883 | * Read-ahead the right block, we're going to read it | 1883 | * Read-ahead the right block, we're going to read it |
@@ -1897,7 +1897,7 @@ xfs_inobt_increment( | |||
1897 | lev > level; ) { | 1897 | lev > level; ) { |
1898 | xfs_agblock_t agbno; /* block number of btree block */ | 1898 | xfs_agblock_t agbno; /* block number of btree block */ |
1899 | 1899 | ||
1900 | agbno = INT_GET(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); | 1900 | agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); |
1901 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1901 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1902 | cur->bc_private.i.agno, agbno, 0, &bp, | 1902 | cur->bc_private.i.agno, agbno, 0, &bp, |
1903 | XFS_INO_BTREE_REF))) | 1903 | XFS_INO_BTREE_REF))) |
diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index 86ed749d4cc1..ae3904cb1ee8 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h | |||
@@ -62,8 +62,10 @@ typedef struct xfs_inobt_key | |||
62 | xfs_agino_t ir_startino; /* starting inode number */ | 62 | xfs_agino_t ir_startino; /* starting inode number */ |
63 | } xfs_inobt_key_t; | 63 | } xfs_inobt_key_t; |
64 | 64 | ||
65 | typedef xfs_agblock_t xfs_inobt_ptr_t; /* btree pointer type */ | 65 | /* btree pointer type */ |
66 | /* btree block header type */ | 66 | typedef __be32 xfs_inobt_ptr_t; |
67 | |||
68 | /* btree block header type */ | ||
67 | typedef struct xfs_btree_sblock xfs_inobt_block_t; | 69 | typedef struct xfs_btree_sblock xfs_inobt_block_t; |
68 | 70 | ||
69 | #define XFS_BUF_TO_INOBT_BLOCK(bp) ((xfs_inobt_block_t *)XFS_BUF_PTR(bp)) | 71 | #define XFS_BUF_TO_INOBT_BLOCK(bp) ((xfs_inobt_block_t *)XFS_BUF_PTR(bp)) |
@@ -86,7 +88,7 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t; | |||
86 | #define XFS_INOBT_BLOCK_MAXRECS(lev,cur) ((cur)->bc_mp->m_inobt_mxr[lev != 0]) | 88 | #define XFS_INOBT_BLOCK_MAXRECS(lev,cur) ((cur)->bc_mp->m_inobt_mxr[lev != 0]) |
87 | #define XFS_INOBT_BLOCK_MINRECS(lev,cur) ((cur)->bc_mp->m_inobt_mnr[lev != 0]) | 89 | #define XFS_INOBT_BLOCK_MINRECS(lev,cur) ((cur)->bc_mp->m_inobt_mnr[lev != 0]) |
88 | #define XFS_INOBT_IS_LAST_REC(cur) \ | 90 | #define XFS_INOBT_IS_LAST_REC(cur) \ |
89 | ((cur)->bc_ptrs[0] == INT_GET(XFS_BUF_TO_INOBT_BLOCK((cur)->bc_bufs[0])->bb_numrecs, ARCH_CONVERT)) | 91 | ((cur)->bc_ptrs[0] == be16_to_cpu(XFS_BUF_TO_INOBT_BLOCK((cur)->bc_bufs[0])->bb_numrecs)) |
90 | 92 | ||
91 | /* | 93 | /* |
92 | * Maximum number of inode btree levels. | 94 | * Maximum number of inode btree levels. |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 281cbd45b3d3..df0d4572d70a 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -1864,8 +1864,8 @@ xfs_iunlink( | |||
1864 | */ | 1864 | */ |
1865 | agi = XFS_BUF_TO_AGI(agibp); | 1865 | agi = XFS_BUF_TO_AGI(agibp); |
1866 | agi_ok = | 1866 | agi_ok = |
1867 | INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC && | 1867 | be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && |
1868 | XFS_AGI_GOOD_VERSION(INT_GET(agi->agi_versionnum, ARCH_CONVERT)); | 1868 | XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); |
1869 | if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK, | 1869 | if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK, |
1870 | XFS_RANDOM_IUNLINK))) { | 1870 | XFS_RANDOM_IUNLINK))) { |
1871 | XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi); | 1871 | XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi); |
@@ -1880,9 +1880,9 @@ xfs_iunlink( | |||
1880 | ASSERT(agino != 0); | 1880 | ASSERT(agino != 0); |
1881 | bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; | 1881 | bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; |
1882 | ASSERT(agi->agi_unlinked[bucket_index]); | 1882 | ASSERT(agi->agi_unlinked[bucket_index]); |
1883 | ASSERT(INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != agino); | 1883 | ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); |
1884 | 1884 | ||
1885 | if (INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != NULLAGINO) { | 1885 | if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) { |
1886 | /* | 1886 | /* |
1887 | * There is already another inode in the bucket we need | 1887 | * There is already another inode in the bucket we need |
1888 | * to add ourselves to. Add us at the front of the list. | 1888 | * to add ourselves to. Add us at the front of the list. |
@@ -1909,7 +1909,7 @@ xfs_iunlink( | |||
1909 | * Point the bucket head pointer at the inode being inserted. | 1909 | * Point the bucket head pointer at the inode being inserted. |
1910 | */ | 1910 | */ |
1911 | ASSERT(agino != 0); | 1911 | ASSERT(agino != 0); |
1912 | INT_SET(agi->agi_unlinked[bucket_index], ARCH_CONVERT, agino); | 1912 | agi->agi_unlinked[bucket_index] = cpu_to_be32(agino); |
1913 | offset = offsetof(xfs_agi_t, agi_unlinked) + | 1913 | offset = offsetof(xfs_agi_t, agi_unlinked) + |
1914 | (sizeof(xfs_agino_t) * bucket_index); | 1914 | (sizeof(xfs_agino_t) * bucket_index); |
1915 | xfs_trans_log_buf(tp, agibp, offset, | 1915 | xfs_trans_log_buf(tp, agibp, offset, |
@@ -1967,8 +1967,8 @@ xfs_iunlink_remove( | |||
1967 | */ | 1967 | */ |
1968 | agi = XFS_BUF_TO_AGI(agibp); | 1968 | agi = XFS_BUF_TO_AGI(agibp); |
1969 | agi_ok = | 1969 | agi_ok = |
1970 | INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC && | 1970 | be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && |
1971 | XFS_AGI_GOOD_VERSION(INT_GET(agi->agi_versionnum, ARCH_CONVERT)); | 1971 | XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); |
1972 | if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE, | 1972 | if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE, |
1973 | XFS_RANDOM_IUNLINK_REMOVE))) { | 1973 | XFS_RANDOM_IUNLINK_REMOVE))) { |
1974 | XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW, | 1974 | XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW, |
@@ -1986,10 +1986,10 @@ xfs_iunlink_remove( | |||
1986 | agino = XFS_INO_TO_AGINO(mp, ip->i_ino); | 1986 | agino = XFS_INO_TO_AGINO(mp, ip->i_ino); |
1987 | ASSERT(agino != 0); | 1987 | ASSERT(agino != 0); |
1988 | bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; | 1988 | bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; |
1989 | ASSERT(INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) != NULLAGINO); | 1989 | ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO); |
1990 | ASSERT(agi->agi_unlinked[bucket_index]); | 1990 | ASSERT(agi->agi_unlinked[bucket_index]); |
1991 | 1991 | ||
1992 | if (INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT) == agino) { | 1992 | if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { |
1993 | /* | 1993 | /* |
1994 | * We're at the head of the list. Get the inode's | 1994 | * We're at the head of the list. Get the inode's |
1995 | * on-disk buffer to see if there is anyone after us | 1995 | * on-disk buffer to see if there is anyone after us |
@@ -2023,7 +2023,7 @@ xfs_iunlink_remove( | |||
2023 | */ | 2023 | */ |
2024 | ASSERT(next_agino != 0); | 2024 | ASSERT(next_agino != 0); |
2025 | ASSERT(next_agino != agino); | 2025 | ASSERT(next_agino != agino); |
2026 | INT_SET(agi->agi_unlinked[bucket_index], ARCH_CONVERT, next_agino); | 2026 | agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino); |
2027 | offset = offsetof(xfs_agi_t, agi_unlinked) + | 2027 | offset = offsetof(xfs_agi_t, agi_unlinked) + |
2028 | (sizeof(xfs_agino_t) * bucket_index); | 2028 | (sizeof(xfs_agino_t) * bucket_index); |
2029 | xfs_trans_log_buf(tp, agibp, offset, | 2029 | xfs_trans_log_buf(tp, agibp, offset, |
@@ -2032,7 +2032,7 @@ xfs_iunlink_remove( | |||
2032 | /* | 2032 | /* |
2033 | * We need to search the list for the inode being freed. | 2033 | * We need to search the list for the inode being freed. |
2034 | */ | 2034 | */ |
2035 | next_agino = INT_GET(agi->agi_unlinked[bucket_index], ARCH_CONVERT); | 2035 | next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); |
2036 | last_ibp = NULL; | 2036 | last_ibp = NULL; |
2037 | while (next_agino != agino) { | 2037 | while (next_agino != agino) { |
2038 | /* | 2038 | /* |
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index a4d186d2db47..f63646ead816 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
@@ -448,7 +448,7 @@ xfs_bulkstat( | |||
448 | while (error) { | 448 | while (error) { |
449 | agino += XFS_INODES_PER_CHUNK; | 449 | agino += XFS_INODES_PER_CHUNK; |
450 | if (XFS_AGINO_TO_AGBNO(mp, agino) >= | 450 | if (XFS_AGINO_TO_AGBNO(mp, agino) >= |
451 | INT_GET(agi->agi_length, ARCH_CONVERT)) | 451 | be32_to_cpu(agi->agi_length)) |
452 | break; | 452 | break; |
453 | error = xfs_inobt_lookup_ge(cur, agino, 0, 0, | 453 | error = xfs_inobt_lookup_ge(cur, agino, 0, 0, |
454 | &tmp); | 454 | &tmp); |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 63268984762a..8ab7df768063 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -3160,13 +3160,12 @@ xlog_recover_clear_agi_bucket( | |||
3160 | } | 3160 | } |
3161 | 3161 | ||
3162 | agi = XFS_BUF_TO_AGI(agibp); | 3162 | agi = XFS_BUF_TO_AGI(agibp); |
3163 | if (INT_GET(agi->agi_magicnum, ARCH_CONVERT) != XFS_AGI_MAGIC) { | 3163 | if (be32_to_cpu(agi->agi_magicnum) != XFS_AGI_MAGIC) { |
3164 | xfs_trans_cancel(tp, XFS_TRANS_ABORT); | 3164 | xfs_trans_cancel(tp, XFS_TRANS_ABORT); |
3165 | return; | 3165 | return; |
3166 | } | 3166 | } |
3167 | ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC); | ||
3168 | 3167 | ||
3169 | INT_SET(agi->agi_unlinked[bucket], ARCH_CONVERT, NULLAGINO); | 3168 | agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); |
3170 | offset = offsetof(xfs_agi_t, agi_unlinked) + | 3169 | offset = offsetof(xfs_agi_t, agi_unlinked) + |
3171 | (sizeof(xfs_agino_t) * bucket); | 3170 | (sizeof(xfs_agino_t) * bucket); |
3172 | xfs_trans_log_buf(tp, agibp, offset, | 3171 | xfs_trans_log_buf(tp, agibp, offset, |
@@ -3225,12 +3224,11 @@ xlog_recover_process_iunlinks( | |||
3225 | XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp))); | 3224 | XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp))); |
3226 | } | 3225 | } |
3227 | agi = XFS_BUF_TO_AGI(agibp); | 3226 | agi = XFS_BUF_TO_AGI(agibp); |
3228 | ASSERT(XFS_AGI_MAGIC == | 3227 | ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agi->agi_magicnum)); |
3229 | INT_GET(agi->agi_magicnum, ARCH_CONVERT)); | ||
3230 | 3228 | ||
3231 | for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { | 3229 | for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { |
3232 | 3230 | ||
3233 | agino = INT_GET(agi->agi_unlinked[bucket], ARCH_CONVERT); | 3231 | agino = be32_to_cpu(agi->agi_unlinked[bucket]); |
3234 | while (agino != NULLAGINO) { | 3232 | while (agino != NULLAGINO) { |
3235 | 3233 | ||
3236 | /* | 3234 | /* |
@@ -3318,8 +3316,8 @@ xlog_recover_process_iunlinks( | |||
3318 | XFS_AGI_DADDR(mp))); | 3316 | XFS_AGI_DADDR(mp))); |
3319 | } | 3317 | } |
3320 | agi = XFS_BUF_TO_AGI(agibp); | 3318 | agi = XFS_BUF_TO_AGI(agibp); |
3321 | ASSERT(XFS_AGI_MAGIC == INT_GET( | 3319 | ASSERT(XFS_AGI_MAGIC == be32_to_cpu( |
3322 | agi->agi_magicnum, ARCH_CONVERT)); | 3320 | agi->agi_magicnum)); |
3323 | } | 3321 | } |
3324 | } | 3322 | } |
3325 | 3323 | ||
@@ -4022,14 +4020,12 @@ xlog_recover_check_summary( | |||
4022 | mp, agfbp, agfdaddr); | 4020 | mp, agfbp, agfdaddr); |
4023 | } | 4021 | } |
4024 | agfp = XFS_BUF_TO_AGF(agfbp); | 4022 | agfp = XFS_BUF_TO_AGF(agfbp); |
4025 | ASSERT(XFS_AGF_MAGIC == | 4023 | ASSERT(XFS_AGF_MAGIC == be32_to_cpu(agfp->agf_magicnum)); |
4026 | INT_GET(agfp->agf_magicnum, ARCH_CONVERT)); | 4024 | ASSERT(XFS_AGF_GOOD_VERSION(be32_to_cpu(agfp->agf_versionnum))); |
4027 | ASSERT(XFS_AGF_GOOD_VERSION( | 4025 | ASSERT(be32_to_cpu(agfp->agf_seqno) == agno); |
4028 | INT_GET(agfp->agf_versionnum, ARCH_CONVERT))); | 4026 | |
4029 | ASSERT(INT_GET(agfp->agf_seqno, ARCH_CONVERT) == agno); | 4027 | freeblks += be32_to_cpu(agfp->agf_freeblks) + |
4030 | 4028 | be32_to_cpu(agfp->agf_flcount); | |
4031 | freeblks += INT_GET(agfp->agf_freeblks, ARCH_CONVERT) + | ||
4032 | INT_GET(agfp->agf_flcount, ARCH_CONVERT); | ||
4033 | xfs_buf_relse(agfbp); | 4029 | xfs_buf_relse(agfbp); |
4034 | 4030 | ||
4035 | agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); | 4031 | agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); |
@@ -4040,14 +4036,12 @@ xlog_recover_check_summary( | |||
4040 | mp, agibp, agidaddr); | 4036 | mp, agibp, agidaddr); |
4041 | } | 4037 | } |
4042 | agip = XFS_BUF_TO_AGI(agibp); | 4038 | agip = XFS_BUF_TO_AGI(agibp); |
4043 | ASSERT(XFS_AGI_MAGIC == | 4039 | ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agip->agi_magicnum)); |
4044 | INT_GET(agip->agi_magicnum, ARCH_CONVERT)); | 4040 | ASSERT(XFS_AGI_GOOD_VERSION(be32_to_cpu(agip->agi_versionnum))); |
4045 | ASSERT(XFS_AGI_GOOD_VERSION( | 4041 | ASSERT(be32_to_cpu(agip->agi_seqno) == agno); |
4046 | INT_GET(agip->agi_versionnum, ARCH_CONVERT))); | 4042 | |
4047 | ASSERT(INT_GET(agip->agi_seqno, ARCH_CONVERT) == agno); | 4043 | itotal += be32_to_cpu(agip->agi_count); |
4048 | 4044 | ifree += be32_to_cpu(agip->agi_freecount); | |
4049 | itotal += INT_GET(agip->agi_count, ARCH_CONVERT); | ||
4050 | ifree += INT_GET(agip->agi_freecount, ARCH_CONVERT); | ||
4051 | xfs_buf_relse(agibp); | 4045 | xfs_buf_relse(agibp); |
4052 | } | 4046 | } |
4053 | 4047 | ||