aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2007-10-11 20:58:05 -0400
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-07 02:10:31 -0500
commit03bea6fe6c38c502c815432999eacfa2eccb0a12 (patch)
tree9800a1633a53dafdceb8799155a192d65be0b111 /fs/xfs
parent9909c4aa1a3e5b1f23cbc1bc2f0db025a7f75f85 (diff)
[XFS] clean up some xfs_log_priv.h macros
- the various assign lsn macros are replaced by a single inline, xlog_assign_lsn, which is equivalent to ASSIGN_ANY_LSN_HOST except for a more sane calling convention. ASSIGN_LSN_DISK is replaced by xlog_assign_lsn and a manual bytespap, and ASSIGN_LSN by the same, except we pass the cycle and block arguments explicitly instead of a log paramter. The latter two variants only had 2, respectively one user anyway. - the GET_CYCLE is replaced by a xlog_get_cycle inline with exactly the same calling conventions. - GET_CLIENT_ID is replaced by xlog_get_client_id which leaves away the unused arch argument. Instead of conditional defintions depending on host endianess we now do an unconditional swap and shift then, which generates equal code. - the unused XLOG_SET macro is removed. SGI-PV: 971186 SGI-Modid: xfs-linux-melb:xfs-kern:29819a Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_log.c14
-rw-r--r--fs/xfs/xfs_log_priv.h47
-rw-r--r--fs/xfs/xfs_log_recover.c29
3 files changed, 40 insertions, 50 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 615638201284..90d96caf7200 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1161,7 +1161,7 @@ xlog_alloc_log(xfs_mount_t *mp,
1161 log->l_flags |= XLOG_ACTIVE_RECOVERY; 1161 log->l_flags |= XLOG_ACTIVE_RECOVERY;
1162 1162
1163 log->l_prev_block = -1; 1163 log->l_prev_block = -1;
1164 ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, 1, 0); 1164 log->l_tail_lsn = xlog_assign_lsn(1, 0);
1165 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1165 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1166 log->l_last_sync_lsn = log->l_tail_lsn; 1166 log->l_last_sync_lsn = log->l_tail_lsn;
1167 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1167 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
@@ -1326,8 +1326,7 @@ xlog_grant_push_ail(xfs_mount_t *mp,
1326 threshold_block -= log->l_logBBsize; 1326 threshold_block -= log->l_logBBsize;
1327 threshold_cycle += 1; 1327 threshold_cycle += 1;
1328 } 1328 }
1329 ASSIGN_ANY_LSN_HOST(threshold_lsn, threshold_cycle, 1329 threshold_lsn = xlog_assign_lsn(threshold_cycle, threshold_block);
1330 threshold_block);
1331 1330
1332 /* Don't pass in an lsn greater than the lsn of the last 1331 /* Don't pass in an lsn greater than the lsn of the last
1333 * log record known to be on disk. 1332 * log record known to be on disk.
@@ -2393,7 +2392,8 @@ restart:
2393 log->l_iclog_hsize, 2392 log->l_iclog_hsize,
2394 XLOG_REG_TYPE_LRHEADER); 2393 XLOG_REG_TYPE_LRHEADER);
2395 INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle); 2394 INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle);
2396 ASSIGN_LSN(head->h_lsn, log); 2395 INT_SET(head->h_lsn, ARCH_CONVERT,
2396 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2397 ASSERT(log->l_curr_block >= 0); 2397 ASSERT(log->l_curr_block >= 0);
2398 } 2398 }
2399 2399
@@ -3488,9 +3488,11 @@ xlog_verify_iclog(xlog_t *log,
3488 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3488 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3489 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3489 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3490 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3490 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3491 clientid = GET_CLIENT_ID(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT); 3491 clientid = xlog_get_client_id(
3492 xhdr[j].hic_xheader.xh_cycle_data[k]);
3492 } else { 3493 } else {
3493 clientid = GET_CLIENT_ID(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT); 3494 clientid = xlog_get_client_id(
3495 iclog->ic_header.h_cycle_data[idx]);
3494 } 3496 }
3495 } 3497 }
3496 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) 3498 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 07da6a72628a..e391f58deae1 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -55,32 +55,21 @@ struct xfs_mount;
55 BTOBB(XLOG_MAX_ICLOGS << (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? \ 55 BTOBB(XLOG_MAX_ICLOGS << (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? \
56 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT)) 56 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
57 57
58/*
59 * set lsns
60 */
61 58
62#define ASSIGN_ANY_LSN_HOST(lsn,cycle,block) \ 59static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
63 { \ 60{
64 (lsn) = ((xfs_lsn_t)(cycle)<<32)|(block); \ 61 return ((xfs_lsn_t)cycle << 32) | block;
65 } 62}
66#define ASSIGN_ANY_LSN_DISK(lsn,cycle,block) \
67 { \
68 INT_SET(((uint *)&(lsn))[0], ARCH_CONVERT, (cycle)); \
69 INT_SET(((uint *)&(lsn))[1], ARCH_CONVERT, (block)); \
70 }
71#define ASSIGN_LSN(lsn,log) \
72 ASSIGN_ANY_LSN_DISK(lsn,(log)->l_curr_cycle,(log)->l_curr_block);
73
74#define XLOG_SET(f,b) (((f) & (b)) == (b))
75
76#define GET_CYCLE(ptr, arch) \
77 (INT_GET(*(uint *)(ptr), arch) == XLOG_HEADER_MAGIC_NUM ? \
78 INT_GET(*((uint *)(ptr)+1), arch) : \
79 INT_GET(*(uint *)(ptr), arch) \
80 )
81 63
82#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) 64static inline uint xlog_get_cycle(char *ptr)
65{
66 if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM)
67 return INT_GET(*((uint *)ptr + 1), ARCH_CONVERT);
68 else
69 return INT_GET(*(uint *)ptr, ARCH_CONVERT);
70}
83 71
72#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
84 73
85#ifdef __KERNEL__ 74#ifdef __KERNEL__
86 75
@@ -96,14 +85,10 @@ struct xfs_mount;
96 * 85 *
97 * this has endian issues, of course. 86 * this has endian issues, of course.
98 */ 87 */
99 88static inline uint xlog_get_client_id(uint i)
100#ifndef XFS_NATIVE_HOST 89{
101#define GET_CLIENT_ID(i,arch) \ 90 return INT_GET(i, ARCH_CONVERT) >> 24;
102 ((i) & 0xff) 91}
103#else
104#define GET_CLIENT_ID(i,arch) \
105 ((i) >> 24)
106#endif
107 92
108#define xlog_panic(args...) cmn_err(CE_PANIC, ## args) 93#define xlog_panic(args...) cmn_err(CE_PANIC, ## args)
109#define xlog_exit(args...) cmn_err(CE_PANIC, ## args) 94#define xlog_exit(args...) cmn_err(CE_PANIC, ## args)
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 4c39dc0e6841..e603591b9922 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -311,7 +311,7 @@ xlog_find_cycle_start(
311 if ((error = xlog_bread(log, mid_blk, 1, bp))) 311 if ((error = xlog_bread(log, mid_blk, 1, bp)))
312 return error; 312 return error;
313 offset = xlog_align(log, mid_blk, 1, bp); 313 offset = xlog_align(log, mid_blk, 1, bp);
314 mid_cycle = GET_CYCLE(offset, ARCH_CONVERT); 314 mid_cycle = xlog_get_cycle(offset);
315 if (mid_cycle == cycle) { 315 if (mid_cycle == cycle) {
316 *last_blk = mid_blk; 316 *last_blk = mid_blk;
317 /* last_half_cycle == mid_cycle */ 317 /* last_half_cycle == mid_cycle */
@@ -371,7 +371,7 @@ xlog_find_verify_cycle(
371 371
372 buf = xlog_align(log, i, bcount, bp); 372 buf = xlog_align(log, i, bcount, bp);
373 for (j = 0; j < bcount; j++) { 373 for (j = 0; j < bcount; j++) {
374 cycle = GET_CYCLE(buf, ARCH_CONVERT); 374 cycle = xlog_get_cycle(buf);
375 if (cycle == stop_on_cycle_no) { 375 if (cycle == stop_on_cycle_no) {
376 *new_blk = i+j; 376 *new_blk = i+j;
377 goto out; 377 goto out;
@@ -550,13 +550,13 @@ xlog_find_head(
550 if ((error = xlog_bread(log, 0, 1, bp))) 550 if ((error = xlog_bread(log, 0, 1, bp)))
551 goto bp_err; 551 goto bp_err;
552 offset = xlog_align(log, 0, 1, bp); 552 offset = xlog_align(log, 0, 1, bp);
553 first_half_cycle = GET_CYCLE(offset, ARCH_CONVERT); 553 first_half_cycle = xlog_get_cycle(offset);
554 554
555 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ 555 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
556 if ((error = xlog_bread(log, last_blk, 1, bp))) 556 if ((error = xlog_bread(log, last_blk, 1, bp)))
557 goto bp_err; 557 goto bp_err;
558 offset = xlog_align(log, last_blk, 1, bp); 558 offset = xlog_align(log, last_blk, 1, bp);
559 last_half_cycle = GET_CYCLE(offset, ARCH_CONVERT); 559 last_half_cycle = xlog_get_cycle(offset);
560 ASSERT(last_half_cycle != 0); 560 ASSERT(last_half_cycle != 0);
561 561
562 /* 562 /*
@@ -808,7 +808,7 @@ xlog_find_tail(
808 if ((error = xlog_bread(log, 0, 1, bp))) 808 if ((error = xlog_bread(log, 0, 1, bp)))
809 goto bread_err; 809 goto bread_err;
810 offset = xlog_align(log, 0, 1, bp); 810 offset = xlog_align(log, 0, 1, bp);
811 if (GET_CYCLE(offset, ARCH_CONVERT) == 0) { 811 if (xlog_get_cycle(offset) == 0) {
812 *tail_blk = 0; 812 *tail_blk = 0;
813 /* leave all other log inited values alone */ 813 /* leave all other log inited values alone */
814 goto exit; 814 goto exit;
@@ -922,10 +922,12 @@ xlog_find_tail(
922 * log records will point recovery to after the 922 * log records will point recovery to after the
923 * current unmount record. 923 * current unmount record.
924 */ 924 */
925 ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, log->l_curr_cycle, 925 log->l_tail_lsn =
926 after_umount_blk); 926 xlog_assign_lsn(log->l_curr_cycle,
927 ASSIGN_ANY_LSN_HOST(log->l_last_sync_lsn, log->l_curr_cycle, 927 after_umount_blk);
928 after_umount_blk); 928 log->l_last_sync_lsn =
929 xlog_assign_lsn(log->l_curr_cycle,
930 after_umount_blk);
929 *tail_blk = after_umount_blk; 931 *tail_blk = after_umount_blk;
930 932
931 /* 933 /*
@@ -1007,7 +1009,7 @@ xlog_find_zeroed(
1007 if ((error = xlog_bread(log, 0, 1, bp))) 1009 if ((error = xlog_bread(log, 0, 1, bp)))
1008 goto bp_err; 1010 goto bp_err;
1009 offset = xlog_align(log, 0, 1, bp); 1011 offset = xlog_align(log, 0, 1, bp);
1010 first_cycle = GET_CYCLE(offset, ARCH_CONVERT); 1012 first_cycle = xlog_get_cycle(offset);
1011 if (first_cycle == 0) { /* completely zeroed log */ 1013 if (first_cycle == 0) { /* completely zeroed log */
1012 *blk_no = 0; 1014 *blk_no = 0;
1013 xlog_put_bp(bp); 1015 xlog_put_bp(bp);
@@ -1018,7 +1020,7 @@ xlog_find_zeroed(
1018 if ((error = xlog_bread(log, log_bbnum-1, 1, bp))) 1020 if ((error = xlog_bread(log, log_bbnum-1, 1, bp)))
1019 goto bp_err; 1021 goto bp_err;
1020 offset = xlog_align(log, log_bbnum-1, 1, bp); 1022 offset = xlog_align(log, log_bbnum-1, 1, bp);
1021 last_cycle = GET_CYCLE(offset, ARCH_CONVERT); 1023 last_cycle = xlog_get_cycle(offset);
1022 if (last_cycle != 0) { /* log completely written to */ 1024 if (last_cycle != 0) { /* log completely written to */
1023 xlog_put_bp(bp); 1025 xlog_put_bp(bp);
1024 return 0; 1026 return 0;
@@ -1102,8 +1104,9 @@ xlog_add_record(
1102 INT_SET(recp->h_cycle, ARCH_CONVERT, cycle); 1104 INT_SET(recp->h_cycle, ARCH_CONVERT, cycle);
1103 INT_SET(recp->h_version, ARCH_CONVERT, 1105 INT_SET(recp->h_version, ARCH_CONVERT,
1104 XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); 1106 XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
1105 ASSIGN_ANY_LSN_DISK(recp->h_lsn, cycle, block); 1107 INT_SET(recp->h_lsn, ARCH_CONVERT, xlog_assign_lsn(cycle, block));
1106 ASSIGN_ANY_LSN_DISK(recp->h_tail_lsn, tail_cycle, tail_block); 1108 INT_SET(recp->h_tail_lsn, ARCH_CONVERT,
1109 xlog_assign_lsn(tail_cycle, tail_block));
1107 INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT); 1110 INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT);
1108 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); 1111 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1109} 1112}