aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log_recover.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2007-10-11 20:58:05 -0400
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-07 02:10:31 -0500
commit03bea6fe6c38c502c815432999eacfa2eccb0a12 (patch)
tree9800a1633a53dafdceb8799155a192d65be0b111 /fs/xfs/xfs_log_recover.c
parent9909c4aa1a3e5b1f23cbc1bc2f0db025a7f75f85 (diff)
[XFS] clean up some xfs_log_priv.h macros
- the various assign lsn macros are replaced by a single inline, xlog_assign_lsn, which is equivalent to ASSIGN_ANY_LSN_HOST except for a more sane calling convention. ASSIGN_LSN_DISK is replaced by xlog_assign_lsn and a manual bytespap, and ASSIGN_LSN by the same, except we pass the cycle and block arguments explicitly instead of a log paramter. The latter two variants only had 2, respectively one user anyway. - the GET_CYCLE is replaced by a xlog_get_cycle inline with exactly the same calling conventions. - GET_CLIENT_ID is replaced by xlog_get_client_id which leaves away the unused arch argument. Instead of conditional defintions depending on host endianess we now do an unconditional swap and shift then, which generates equal code. - the unused XLOG_SET macro is removed. SGI-PV: 971186 SGI-Modid: xfs-linux-melb:xfs-kern:29819a Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_log_recover.c')
-rw-r--r--fs/xfs/xfs_log_recover.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 4c39dc0e6841..e603591b9922 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -311,7 +311,7 @@ xlog_find_cycle_start(
311 if ((error = xlog_bread(log, mid_blk, 1, bp))) 311 if ((error = xlog_bread(log, mid_blk, 1, bp)))
312 return error; 312 return error;
313 offset = xlog_align(log, mid_blk, 1, bp); 313 offset = xlog_align(log, mid_blk, 1, bp);
314 mid_cycle = GET_CYCLE(offset, ARCH_CONVERT); 314 mid_cycle = xlog_get_cycle(offset);
315 if (mid_cycle == cycle) { 315 if (mid_cycle == cycle) {
316 *last_blk = mid_blk; 316 *last_blk = mid_blk;
317 /* last_half_cycle == mid_cycle */ 317 /* last_half_cycle == mid_cycle */
@@ -371,7 +371,7 @@ xlog_find_verify_cycle(
371 371
372 buf = xlog_align(log, i, bcount, bp); 372 buf = xlog_align(log, i, bcount, bp);
373 for (j = 0; j < bcount; j++) { 373 for (j = 0; j < bcount; j++) {
374 cycle = GET_CYCLE(buf, ARCH_CONVERT); 374 cycle = xlog_get_cycle(buf);
375 if (cycle == stop_on_cycle_no) { 375 if (cycle == stop_on_cycle_no) {
376 *new_blk = i+j; 376 *new_blk = i+j;
377 goto out; 377 goto out;
@@ -550,13 +550,13 @@ xlog_find_head(
550 if ((error = xlog_bread(log, 0, 1, bp))) 550 if ((error = xlog_bread(log, 0, 1, bp)))
551 goto bp_err; 551 goto bp_err;
552 offset = xlog_align(log, 0, 1, bp); 552 offset = xlog_align(log, 0, 1, bp);
553 first_half_cycle = GET_CYCLE(offset, ARCH_CONVERT); 553 first_half_cycle = xlog_get_cycle(offset);
554 554
555 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ 555 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
556 if ((error = xlog_bread(log, last_blk, 1, bp))) 556 if ((error = xlog_bread(log, last_blk, 1, bp)))
557 goto bp_err; 557 goto bp_err;
558 offset = xlog_align(log, last_blk, 1, bp); 558 offset = xlog_align(log, last_blk, 1, bp);
559 last_half_cycle = GET_CYCLE(offset, ARCH_CONVERT); 559 last_half_cycle = xlog_get_cycle(offset);
560 ASSERT(last_half_cycle != 0); 560 ASSERT(last_half_cycle != 0);
561 561
562 /* 562 /*
@@ -808,7 +808,7 @@ xlog_find_tail(
808 if ((error = xlog_bread(log, 0, 1, bp))) 808 if ((error = xlog_bread(log, 0, 1, bp)))
809 goto bread_err; 809 goto bread_err;
810 offset = xlog_align(log, 0, 1, bp); 810 offset = xlog_align(log, 0, 1, bp);
811 if (GET_CYCLE(offset, ARCH_CONVERT) == 0) { 811 if (xlog_get_cycle(offset) == 0) {
812 *tail_blk = 0; 812 *tail_blk = 0;
813 /* leave all other log inited values alone */ 813 /* leave all other log inited values alone */
814 goto exit; 814 goto exit;
@@ -922,10 +922,12 @@ xlog_find_tail(
922 * log records will point recovery to after the 922 * log records will point recovery to after the
923 * current unmount record. 923 * current unmount record.
924 */ 924 */
925 ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, log->l_curr_cycle, 925 log->l_tail_lsn =
926 after_umount_blk); 926 xlog_assign_lsn(log->l_curr_cycle,
927 ASSIGN_ANY_LSN_HOST(log->l_last_sync_lsn, log->l_curr_cycle, 927 after_umount_blk);
928 after_umount_blk); 928 log->l_last_sync_lsn =
929 xlog_assign_lsn(log->l_curr_cycle,
930 after_umount_blk);
929 *tail_blk = after_umount_blk; 931 *tail_blk = after_umount_blk;
930 932
931 /* 933 /*
@@ -1007,7 +1009,7 @@ xlog_find_zeroed(
1007 if ((error = xlog_bread(log, 0, 1, bp))) 1009 if ((error = xlog_bread(log, 0, 1, bp)))
1008 goto bp_err; 1010 goto bp_err;
1009 offset = xlog_align(log, 0, 1, bp); 1011 offset = xlog_align(log, 0, 1, bp);
1010 first_cycle = GET_CYCLE(offset, ARCH_CONVERT); 1012 first_cycle = xlog_get_cycle(offset);
1011 if (first_cycle == 0) { /* completely zeroed log */ 1013 if (first_cycle == 0) { /* completely zeroed log */
1012 *blk_no = 0; 1014 *blk_no = 0;
1013 xlog_put_bp(bp); 1015 xlog_put_bp(bp);
@@ -1018,7 +1020,7 @@ xlog_find_zeroed(
1018 if ((error = xlog_bread(log, log_bbnum-1, 1, bp))) 1020 if ((error = xlog_bread(log, log_bbnum-1, 1, bp)))
1019 goto bp_err; 1021 goto bp_err;
1020 offset = xlog_align(log, log_bbnum-1, 1, bp); 1022 offset = xlog_align(log, log_bbnum-1, 1, bp);
1021 last_cycle = GET_CYCLE(offset, ARCH_CONVERT); 1023 last_cycle = xlog_get_cycle(offset);
1022 if (last_cycle != 0) { /* log completely written to */ 1024 if (last_cycle != 0) { /* log completely written to */
1023 xlog_put_bp(bp); 1025 xlog_put_bp(bp);
1024 return 0; 1026 return 0;
@@ -1102,8 +1104,9 @@ xlog_add_record(
1102 INT_SET(recp->h_cycle, ARCH_CONVERT, cycle); 1104 INT_SET(recp->h_cycle, ARCH_CONVERT, cycle);
1103 INT_SET(recp->h_version, ARCH_CONVERT, 1105 INT_SET(recp->h_version, ARCH_CONVERT,
1104 XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); 1106 XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
1105 ASSIGN_ANY_LSN_DISK(recp->h_lsn, cycle, block); 1107 INT_SET(recp->h_lsn, ARCH_CONVERT, xlog_assign_lsn(cycle, block));
1106 ASSIGN_ANY_LSN_DISK(recp->h_tail_lsn, tail_cycle, tail_block); 1108 INT_SET(recp->h_tail_lsn, ARCH_CONVERT,
1109 xlog_assign_lsn(tail_cycle, tail_block));
1107 INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT); 1110 INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT);
1108 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); 1111 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1109} 1112}