aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_btree.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2013-04-21 15:53:46 -0400
committerBen Myers <bpm@sgi.com>2013-04-21 15:53:46 -0400
commitee1a47ab0e77600fcbdf1c87d461bd8f3f63150d (patch)
tree6340d9f4b8b53c0d18045da1372599645375efce /fs/xfs/xfs_btree.c
parenta2050646f655a90400cbb66c3866d2e0137eee0c (diff)
xfs: add support for large btree blocks
Add support for larger btree blocks that contains a CRC32C checksum, a filesystem uuid and block number for detecting filesystem consistency and out of place writes. [dchinner@redhat.com] Also include an owner field to allow reverse mappings to be implemented for improved repairability and a LSN field to so that log recovery can easily determine the last modification that made it to disk for each buffer. [dchinner@redhat.com] Add buffer log format flags to indicate the type of buffer to recovery so that we don't have to do blind magic number tests to determine what the buffer is. [dchinner@redhat.com] Modified to fit into the verifier structure. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Ben Myers <bpm@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_btree.c')
-rw-r--r--fs/xfs/xfs_btree.c256
1 files changed, 211 insertions, 45 deletions
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index db010408d701..ec77036f13b5 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -30,9 +30,11 @@
30#include "xfs_dinode.h" 30#include "xfs_dinode.h"
31#include "xfs_inode.h" 31#include "xfs_inode.h"
32#include "xfs_inode_item.h" 32#include "xfs_inode_item.h"
33#include "xfs_buf_item.h"
33#include "xfs_btree.h" 34#include "xfs_btree.h"
34#include "xfs_error.h" 35#include "xfs_error.h"
35#include "xfs_trace.h" 36#include "xfs_trace.h"
37#include "xfs_cksum.h"
36 38
37/* 39/*
38 * Cursor allocation zone. 40 * Cursor allocation zone.
@@ -42,9 +44,13 @@ kmem_zone_t *xfs_btree_cur_zone;
42/* 44/*
43 * Btree magic numbers. 45 * Btree magic numbers.
44 */ 46 */
45const __uint32_t xfs_magics[XFS_BTNUM_MAX] = { 47static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
46 XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC 48 { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC },
49 { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC,
50 XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC }
47}; 51};
52#define xfs_btree_magic(cur) \
53 xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum]
48 54
49 55
50STATIC int /* error (0 or EFSCORRUPTED) */ 56STATIC int /* error (0 or EFSCORRUPTED) */
@@ -54,30 +60,38 @@ xfs_btree_check_lblock(
54 int level, /* level of the btree block */ 60 int level, /* level of the btree block */
55 struct xfs_buf *bp) /* buffer for block, if any */ 61 struct xfs_buf *bp) /* buffer for block, if any */
56{ 62{
57 int lblock_ok; /* block passes checks */ 63 int lblock_ok = 1; /* block passes checks */
58 struct xfs_mount *mp; /* file system mount point */ 64 struct xfs_mount *mp; /* file system mount point */
59 65
60 mp = cur->bc_mp; 66 mp = cur->bc_mp;
61 lblock_ok = 67
62 be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] && 68 if (xfs_sb_version_hascrc(&mp->m_sb)) {
69 lblock_ok = lblock_ok &&
70 uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid) &&
71 block->bb_u.l.bb_blkno == cpu_to_be64(
72 bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
73 }
74
75 lblock_ok = lblock_ok &&
76 be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) &&
63 be16_to_cpu(block->bb_level) == level && 77 be16_to_cpu(block->bb_level) == level &&
64 be16_to_cpu(block->bb_numrecs) <= 78 be16_to_cpu(block->bb_numrecs) <=
65 cur->bc_ops->get_maxrecs(cur, level) && 79 cur->bc_ops->get_maxrecs(cur, level) &&
66 block->bb_u.l.bb_leftsib && 80 block->bb_u.l.bb_leftsib &&
67 (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO) || 81 (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO) ||
68 XFS_FSB_SANITY_CHECK(mp, 82 XFS_FSB_SANITY_CHECK(mp,
69 be64_to_cpu(block->bb_u.l.bb_leftsib))) && 83 be64_to_cpu(block->bb_u.l.bb_leftsib))) &&
70 block->bb_u.l.bb_rightsib && 84 block->bb_u.l.bb_rightsib &&
71 (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO) || 85 (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO) ||
72 XFS_FSB_SANITY_CHECK(mp, 86 XFS_FSB_SANITY_CHECK(mp,
73 be64_to_cpu(block->bb_u.l.bb_rightsib))); 87 be64_to_cpu(block->bb_u.l.bb_rightsib)));
88
74 if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, 89 if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
75 XFS_ERRTAG_BTREE_CHECK_LBLOCK, 90 XFS_ERRTAG_BTREE_CHECK_LBLOCK,
76 XFS_RANDOM_BTREE_CHECK_LBLOCK))) { 91 XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
77 if (bp) 92 if (bp)
78 trace_xfs_btree_corrupt(bp, _RET_IP_); 93 trace_xfs_btree_corrupt(bp, _RET_IP_);
79 XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW, 94 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
80 mp);
81 return XFS_ERROR(EFSCORRUPTED); 95 return XFS_ERROR(EFSCORRUPTED);
82 } 96 }
83 return 0; 97 return 0;
@@ -90,16 +104,26 @@ xfs_btree_check_sblock(
90 int level, /* level of the btree block */ 104 int level, /* level of the btree block */
91 struct xfs_buf *bp) /* buffer containing block */ 105 struct xfs_buf *bp) /* buffer containing block */
92{ 106{
107 struct xfs_mount *mp; /* file system mount point */
93 struct xfs_buf *agbp; /* buffer for ag. freespace struct */ 108 struct xfs_buf *agbp; /* buffer for ag. freespace struct */
94 struct xfs_agf *agf; /* ag. freespace structure */ 109 struct xfs_agf *agf; /* ag. freespace structure */
95 xfs_agblock_t agflen; /* native ag. freespace length */ 110 xfs_agblock_t agflen; /* native ag. freespace length */
96 int sblock_ok; /* block passes checks */ 111 int sblock_ok = 1; /* block passes checks */
97 112
113 mp = cur->bc_mp;
98 agbp = cur->bc_private.a.agbp; 114 agbp = cur->bc_private.a.agbp;
99 agf = XFS_BUF_TO_AGF(agbp); 115 agf = XFS_BUF_TO_AGF(agbp);
100 agflen = be32_to_cpu(agf->agf_length); 116 agflen = be32_to_cpu(agf->agf_length);
101 sblock_ok = 117
102 be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] && 118 if (xfs_sb_version_hascrc(&mp->m_sb)) {
119 sblock_ok = sblock_ok &&
120 uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid) &&
121 block->bb_u.s.bb_blkno == cpu_to_be64(
122 bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
123 }
124
125 sblock_ok = sblock_ok &&
126 be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) &&
103 be16_to_cpu(block->bb_level) == level && 127 be16_to_cpu(block->bb_level) == level &&
104 be16_to_cpu(block->bb_numrecs) <= 128 be16_to_cpu(block->bb_numrecs) <=
105 cur->bc_ops->get_maxrecs(cur, level) && 129 cur->bc_ops->get_maxrecs(cur, level) &&
@@ -109,13 +133,13 @@ xfs_btree_check_sblock(
109 (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) || 133 (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) ||
110 be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) && 134 be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) &&
111 block->bb_u.s.bb_rightsib; 135 block->bb_u.s.bb_rightsib;
112 if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp, 136
137 if (unlikely(XFS_TEST_ERROR(!sblock_ok, mp,
113 XFS_ERRTAG_BTREE_CHECK_SBLOCK, 138 XFS_ERRTAG_BTREE_CHECK_SBLOCK,
114 XFS_RANDOM_BTREE_CHECK_SBLOCK))) { 139 XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
115 if (bp) 140 if (bp)
116 trace_xfs_btree_corrupt(bp, _RET_IP_); 141 trace_xfs_btree_corrupt(bp, _RET_IP_);
117 XFS_CORRUPTION_ERROR("xfs_btree_check_sblock", 142 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
118 XFS_ERRLEVEL_LOW, cur->bc_mp, block);
119 return XFS_ERROR(EFSCORRUPTED); 143 return XFS_ERROR(EFSCORRUPTED);
120 } 144 }
121 return 0; 145 return 0;
@@ -194,6 +218,72 @@ xfs_btree_check_ptr(
194#endif 218#endif
195 219
196/* 220/*
221 * Calculate CRC on the whole btree block and stuff it into the
222 * long-form btree header.
223 *
224 * Prior to calculting the CRC, pull the LSN out of the buffer log item and put
225 * it into the buffer so recovery knows what the last modifcation was that made
226 * it to disk.
227 */
228void
229xfs_btree_lblock_calc_crc(
230 struct xfs_buf *bp)
231{
232 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
233 struct xfs_buf_log_item *bip = bp->b_fspriv;
234
235 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
236 return;
237 if (bip)
238 block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
239 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
240 XFS_BTREE_LBLOCK_CRC_OFF);
241}
242
243bool
244xfs_btree_lblock_verify_crc(
245 struct xfs_buf *bp)
246{
247 if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
248 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
249 XFS_BTREE_LBLOCK_CRC_OFF);
250 return true;
251}
252
253/*
254 * Calculate CRC on the whole btree block and stuff it into the
255 * short-form btree header.
256 *
257 * Prior to calculting the CRC, pull the LSN out of the buffer log item and put
258 * it into the buffer so recovery knows what the last modifcation was that made
259 * it to disk.
260 */
261void
262xfs_btree_sblock_calc_crc(
263 struct xfs_buf *bp)
264{
265 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
266 struct xfs_buf_log_item *bip = bp->b_fspriv;
267
268 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
269 return;
270 if (bip)
271 block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
272 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
273 XFS_BTREE_SBLOCK_CRC_OFF);
274}
275
276bool
277xfs_btree_sblock_verify_crc(
278 struct xfs_buf *bp)
279{
280 if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
281 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
282 XFS_BTREE_SBLOCK_CRC_OFF);
283 return true;
284}
285
286/*
197 * Delete the btree cursor. 287 * Delete the btree cursor.
198 */ 288 */
199void 289void
@@ -277,10 +367,8 @@ xfs_btree_dup_cursor(
277 *ncur = NULL; 367 *ncur = NULL;
278 return error; 368 return error;
279 } 369 }
280 new->bc_bufs[i] = bp; 370 }
281 ASSERT(!xfs_buf_geterror(bp)); 371 new->bc_bufs[i] = bp;
282 } else
283 new->bc_bufs[i] = NULL;
284 } 372 }
285 *ncur = new; 373 *ncur = new;
286 return 0; 374 return 0;
@@ -321,9 +409,14 @@ xfs_btree_dup_cursor(
321 */ 409 */
322static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur) 410static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
323{ 411{
324 return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ? 412 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
325 XFS_BTREE_LBLOCK_LEN : 413 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
326 XFS_BTREE_SBLOCK_LEN; 414 return XFS_BTREE_LBLOCK_CRC_LEN;
415 return XFS_BTREE_LBLOCK_LEN;
416 }
417 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
418 return XFS_BTREE_SBLOCK_CRC_LEN;
419 return XFS_BTREE_SBLOCK_LEN;
327} 420}
328 421
329/* 422/*
@@ -863,43 +956,85 @@ xfs_btree_set_sibling(
863} 956}
864 957
865void 958void
959xfs_btree_init_block_int(
960 struct xfs_mount *mp,
961 struct xfs_btree_block *buf,
962 xfs_daddr_t blkno,
963 __u32 magic,
964 __u16 level,
965 __u16 numrecs,
966 __u64 owner,
967 unsigned int flags)
968{
969 buf->bb_magic = cpu_to_be32(magic);
970 buf->bb_level = cpu_to_be16(level);
971 buf->bb_numrecs = cpu_to_be16(numrecs);
972
973 if (flags & XFS_BTREE_LONG_PTRS) {
974 buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
975 buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
976 if (flags & XFS_BTREE_CRC_BLOCKS) {
977 buf->bb_u.l.bb_blkno = cpu_to_be64(blkno);
978 buf->bb_u.l.bb_owner = cpu_to_be64(owner);
979 uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid);
980 buf->bb_u.l.bb_pad = 0;
981 }
982 } else {
983 /* owner is a 32 bit value on short blocks */
984 __u32 __owner = (__u32)owner;
985
986 buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
987 buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
988 if (flags & XFS_BTREE_CRC_BLOCKS) {
989 buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
990 buf->bb_u.s.bb_owner = cpu_to_be32(__owner);
991 uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid);
992 }
993 }
994}
995
996void
866xfs_btree_init_block( 997xfs_btree_init_block(
867 struct xfs_mount *mp, 998 struct xfs_mount *mp,
868 struct xfs_buf *bp, 999 struct xfs_buf *bp,
869 __u32 magic, 1000 __u32 magic,
870 __u16 level, 1001 __u16 level,
871 __u16 numrecs, 1002 __u16 numrecs,
1003 __u64 owner,
872 unsigned int flags) 1004 unsigned int flags)
873{ 1005{
874 struct xfs_btree_block *new = XFS_BUF_TO_BLOCK(bp); 1006 xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
875 1007 magic, level, numrecs, owner, flags);
876 new->bb_magic = cpu_to_be32(magic);
877 new->bb_level = cpu_to_be16(level);
878 new->bb_numrecs = cpu_to_be16(numrecs);
879
880 if (flags & XFS_BTREE_LONG_PTRS) {
881 new->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
882 new->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
883 } else {
884 new->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
885 new->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
886 }
887} 1008}
888 1009
889STATIC void 1010STATIC void
890xfs_btree_init_block_cur( 1011xfs_btree_init_block_cur(
891 struct xfs_btree_cur *cur, 1012 struct xfs_btree_cur *cur,
1013 struct xfs_buf *bp,
892 int level, 1014 int level,
893 int numrecs, 1015 int numrecs)
894 struct xfs_buf *bp)
895{ 1016{
896 xfs_btree_init_block(cur->bc_mp, bp, xfs_magics[cur->bc_btnum], 1017 __u64 owner;
897 level, numrecs, cur->bc_flags); 1018
1019 /*
1020 * we can pull the owner from the cursor right now as the different
1021 * owners align directly with the pointer size of the btree. This may
1022 * change in future, but is safe for current users of the generic btree
1023 * code.
1024 */
1025 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1026 owner = cur->bc_private.b.ip->i_ino;
1027 else
1028 owner = cur->bc_private.a.agno;
1029
1030 xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1031 xfs_btree_magic(cur), level, numrecs,
1032 owner, cur->bc_flags);
898} 1033}
899 1034
900/* 1035/*
901 * Return true if ptr is the last record in the btree and 1036 * Return true if ptr is the last record in the btree and
902 * we need to track updateѕ to this record. The decision 1037 * we need to track updates to this record. The decision
903 * will be further refined in the update_lastrec method. 1038 * will be further refined in the update_lastrec method.
904 */ 1039 */
905STATIC int 1040STATIC int
@@ -1147,6 +1282,7 @@ xfs_btree_log_keys(
1147 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); 1282 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
1148 1283
1149 if (bp) { 1284 if (bp) {
1285 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLF_BTREE_BUF);
1150 xfs_trans_log_buf(cur->bc_tp, bp, 1286 xfs_trans_log_buf(cur->bc_tp, bp,
1151 xfs_btree_key_offset(cur, first), 1287 xfs_btree_key_offset(cur, first),
1152 xfs_btree_key_offset(cur, last + 1) - 1); 1288 xfs_btree_key_offset(cur, last + 1) - 1);
@@ -1171,6 +1307,7 @@ xfs_btree_log_recs(
1171 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 1307 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1172 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); 1308 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
1173 1309
1310 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLF_BTREE_BUF);
1174 xfs_trans_log_buf(cur->bc_tp, bp, 1311 xfs_trans_log_buf(cur->bc_tp, bp,
1175 xfs_btree_rec_offset(cur, first), 1312 xfs_btree_rec_offset(cur, first),
1176 xfs_btree_rec_offset(cur, last + 1) - 1); 1313 xfs_btree_rec_offset(cur, last + 1) - 1);
@@ -1195,6 +1332,7 @@ xfs_btree_log_ptrs(
1195 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 1332 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
1196 int level = xfs_btree_get_level(block); 1333 int level = xfs_btree_get_level(block);
1197 1334
1335 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLF_BTREE_BUF);
1198 xfs_trans_log_buf(cur->bc_tp, bp, 1336 xfs_trans_log_buf(cur->bc_tp, bp,
1199 xfs_btree_ptr_offset(cur, first, level), 1337 xfs_btree_ptr_offset(cur, first, level),
1200 xfs_btree_ptr_offset(cur, last + 1, level) - 1); 1338 xfs_btree_ptr_offset(cur, last + 1, level) - 1);
@@ -1223,7 +1361,12 @@ xfs_btree_log_block(
1223 offsetof(struct xfs_btree_block, bb_numrecs), 1361 offsetof(struct xfs_btree_block, bb_numrecs),
1224 offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib), 1362 offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
1225 offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib), 1363 offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
1226 XFS_BTREE_SBLOCK_LEN 1364 offsetof(struct xfs_btree_block, bb_u.s.bb_blkno),
1365 offsetof(struct xfs_btree_block, bb_u.s.bb_lsn),
1366 offsetof(struct xfs_btree_block, bb_u.s.bb_uuid),
1367 offsetof(struct xfs_btree_block, bb_u.s.bb_owner),
1368 offsetof(struct xfs_btree_block, bb_u.s.bb_crc),
1369 XFS_BTREE_SBLOCK_CRC_LEN
1227 }; 1370 };
1228 static const short loffsets[] = { /* table of offsets (long) */ 1371 static const short loffsets[] = { /* table of offsets (long) */
1229 offsetof(struct xfs_btree_block, bb_magic), 1372 offsetof(struct xfs_btree_block, bb_magic),
@@ -1231,17 +1374,40 @@ xfs_btree_log_block(
1231 offsetof(struct xfs_btree_block, bb_numrecs), 1374 offsetof(struct xfs_btree_block, bb_numrecs),
1232 offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib), 1375 offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
1233 offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib), 1376 offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
1234 XFS_BTREE_LBLOCK_LEN 1377 offsetof(struct xfs_btree_block, bb_u.l.bb_blkno),
1378 offsetof(struct xfs_btree_block, bb_u.l.bb_lsn),
1379 offsetof(struct xfs_btree_block, bb_u.l.bb_uuid),
1380 offsetof(struct xfs_btree_block, bb_u.l.bb_owner),
1381 offsetof(struct xfs_btree_block, bb_u.l.bb_crc),
1382 offsetof(struct xfs_btree_block, bb_u.l.bb_pad),
1383 XFS_BTREE_LBLOCK_CRC_LEN
1235 }; 1384 };
1236 1385
1237 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 1386 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1238 XFS_BTREE_TRACE_ARGBI(cur, bp, fields); 1387 XFS_BTREE_TRACE_ARGBI(cur, bp, fields);
1239 1388
1240 if (bp) { 1389 if (bp) {
1390 int nbits;
1391
1392 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
1393 /*
1394 * We don't log the CRC when updating a btree
1395 * block but instead recreate it during log
1396 * recovery. As the log buffers have checksums
1397 * of their own this is safe and avoids logging a crc
1398 * update in a lot of places.
1399 */
1400 if (fields == XFS_BB_ALL_BITS)
1401 fields = XFS_BB_ALL_BITS_CRC;
1402 nbits = XFS_BB_NUM_BITS_CRC;
1403 } else {
1404 nbits = XFS_BB_NUM_BITS;
1405 }
1241 xfs_btree_offsets(fields, 1406 xfs_btree_offsets(fields,
1242 (cur->bc_flags & XFS_BTREE_LONG_PTRS) ? 1407 (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
1243 loffsets : soffsets, 1408 loffsets : soffsets,
1244 XFS_BB_NUM_BITS, &first, &last); 1409 nbits, &first, &last);
1410 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLF_BTREE_BUF);
1245 xfs_trans_log_buf(cur->bc_tp, bp, first, last); 1411 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
1246 } else { 1412 } else {
1247 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, 1413 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
@@ -2204,7 +2370,7 @@ xfs_btree_split(
2204 goto error0; 2370 goto error0;
2205 2371
2206 /* Fill in the btree header for the new right block. */ 2372 /* Fill in the btree header for the new right block. */
2207 xfs_btree_init_block_cur(cur, xfs_btree_get_level(left), 0, rbp); 2373 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0);
2208 2374
2209 /* 2375 /*
2210 * Split the entries between the old and the new block evenly. 2376 * Split the entries between the old and the new block evenly.
@@ -2513,7 +2679,7 @@ xfs_btree_new_root(
2513 nptr = 2; 2679 nptr = 2;
2514 } 2680 }
2515 /* Fill in the new block's btree header and log it. */ 2681 /* Fill in the new block's btree header and log it. */
2516 xfs_btree_init_block_cur(cur, cur->bc_nlevels, 2, nbp); 2682 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
2517 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS); 2683 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
2518 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) && 2684 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
2519 !xfs_btree_ptr_is_null(cur, &rptr)); 2685 !xfs_btree_ptr_is_null(cur, &rptr));