diff options
author | Christoph Hellwig <hch@infradead.org> | 2007-10-11 20:59:34 -0400 |
---|---|---|
committer | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-02-07 02:11:47 -0500 |
commit | b53e675dc868c4844ecbcce9149cf68e4299231d (patch) | |
tree | e49928f2faa73d2f59b80647814835343c9379e5 | |
parent | 67fcb7bfb69eb1072c7e2dd6b46fa34db11dd587 (diff) |
[XFS] xlog_rec_header/xlog_rec_ext_header endianess annotations
Mostly trivial conversion with one exceptions: h_num_logops was kept in
native endian previously and only converted to big endian in xlog_sync,
but we always keep it big endian now. With todays cpus fast byteswap
instructions that's not an issue but the new variant keeps the code clean
and maintainable.
SGI-PV: 971186
SGI-Modid: xfs-linux-melb:xfs-kern:29821a
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
-rw-r--r-- | fs/xfs/xfs_log.c | 90 | ||||
-rw-r--r-- | fs/xfs/xfs_log.h | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_log_priv.h | 40 | ||||
-rw-r--r-- | fs/xfs/xfs_log_recover.c | 120 |
4 files changed, 120 insertions, 133 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 5ff4643b72c7..6e3d8084d8b3 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -1227,12 +1227,12 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1227 | 1227 | ||
1228 | head = &iclog->ic_header; | 1228 | head = &iclog->ic_header; |
1229 | memset(head, 0, sizeof(xlog_rec_header_t)); | 1229 | memset(head, 0, sizeof(xlog_rec_header_t)); |
1230 | INT_SET(head->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM); | 1230 | head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); |
1231 | INT_SET(head->h_version, ARCH_CONVERT, | 1231 | head->h_version = cpu_to_be32( |
1232 | XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); | 1232 | XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); |
1233 | INT_SET(head->h_size, ARCH_CONVERT, log->l_iclog_size); | 1233 | head->h_size = cpu_to_be32(log->l_iclog_size); |
1234 | /* new fields */ | 1234 | /* new fields */ |
1235 | INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT); | 1235 | head->h_fmt = cpu_to_be32(XLOG_FMT); |
1236 | memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); | 1236 | memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); |
1237 | 1237 | ||
1238 | 1238 | ||
@@ -1378,7 +1378,7 @@ xlog_sync(xlog_t *log, | |||
1378 | { | 1378 | { |
1379 | xfs_caddr_t dptr; /* pointer to byte sized element */ | 1379 | xfs_caddr_t dptr; /* pointer to byte sized element */ |
1380 | xfs_buf_t *bp; | 1380 | xfs_buf_t *bp; |
1381 | int i, ops; | 1381 | int i; |
1382 | uint count; /* byte count of bwrite */ | 1382 | uint count; /* byte count of bwrite */ |
1383 | uint count_init; /* initial count before roundup */ | 1383 | uint count_init; /* initial count before roundup */ |
1384 | int roundoff; /* roundoff to BB or stripe */ | 1384 | int roundoff; /* roundoff to BB or stripe */ |
@@ -1417,21 +1417,17 @@ xlog_sync(xlog_t *log, | |||
1417 | 1417 | ||
1418 | /* real byte length */ | 1418 | /* real byte length */ |
1419 | if (v2) { | 1419 | if (v2) { |
1420 | INT_SET(iclog->ic_header.h_len, | 1420 | iclog->ic_header.h_len = |
1421 | ARCH_CONVERT, | 1421 | cpu_to_be32(iclog->ic_offset + roundoff); |
1422 | iclog->ic_offset + roundoff); | ||
1423 | } else { | 1422 | } else { |
1424 | INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset); | 1423 | iclog->ic_header.h_len = |
1424 | cpu_to_be32(iclog->ic_offset); | ||
1425 | } | 1425 | } |
1426 | 1426 | ||
1427 | /* put ops count in correct order */ | ||
1428 | ops = iclog->ic_header.h_num_logops; | ||
1429 | INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); | ||
1430 | |||
1431 | bp = iclog->ic_bp; | 1427 | bp = iclog->ic_bp; |
1432 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); | 1428 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); |
1433 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); | 1429 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); |
1434 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); | 1430 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); |
1435 | 1431 | ||
1436 | XFS_STATS_ADD(xs_log_blocks, BTOBB(count)); | 1432 | XFS_STATS_ADD(xs_log_blocks, BTOBB(count)); |
1437 | 1433 | ||
@@ -1494,10 +1490,10 @@ xlog_sync(xlog_t *log, | |||
1494 | * a new cycle. Watch out for the header magic number | 1490 | * a new cycle. Watch out for the header magic number |
1495 | * case, though. | 1491 | * case, though. |
1496 | */ | 1492 | */ |
1497 | for (i=0; i<split; i += BBSIZE) { | 1493 | for (i = 0; i < split; i += BBSIZE) { |
1498 | INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1); | 1494 | be32_add((__be32 *)dptr, 1); |
1499 | if (INT_GET(*(uint *)dptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) | 1495 | if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM) |
1500 | INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1); | 1496 | be32_add((__be32 *)dptr, 1); |
1501 | dptr += BBSIZE; | 1497 | dptr += BBSIZE; |
1502 | } | 1498 | } |
1503 | 1499 | ||
@@ -1586,7 +1582,7 @@ xlog_state_finish_copy(xlog_t *log, | |||
1586 | { | 1582 | { |
1587 | spin_lock(&log->l_icloglock); | 1583 | spin_lock(&log->l_icloglock); |
1588 | 1584 | ||
1589 | iclog->ic_header.h_num_logops += record_cnt; | 1585 | be32_add(&iclog->ic_header.h_num_logops, record_cnt); |
1590 | iclog->ic_offset += copy_bytes; | 1586 | iclog->ic_offset += copy_bytes; |
1591 | 1587 | ||
1592 | spin_unlock(&log->l_icloglock); | 1588 | spin_unlock(&log->l_icloglock); |
@@ -1813,7 +1809,7 @@ xlog_write(xfs_mount_t * mp, | |||
1813 | 1809 | ||
1814 | /* start_lsn is the first lsn written to. That's all we need. */ | 1810 | /* start_lsn is the first lsn written to. That's all we need. */ |
1815 | if (! *start_lsn) | 1811 | if (! *start_lsn) |
1816 | *start_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); | 1812 | *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
1817 | 1813 | ||
1818 | /* This loop writes out as many regions as can fit in the amount | 1814 | /* This loop writes out as many regions as can fit in the amount |
1819 | * of space which was allocated by xlog_state_get_iclog_space(). | 1815 | * of space which was allocated by xlog_state_get_iclog_space(). |
@@ -1983,7 +1979,8 @@ xlog_state_clean_log(xlog_t *log) | |||
1983 | * We don't need to cover the dummy. | 1979 | * We don't need to cover the dummy. |
1984 | */ | 1980 | */ |
1985 | if (!changed && | 1981 | if (!changed && |
1986 | (INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT) == XLOG_COVER_OPS)) { | 1982 | (be32_to_cpu(iclog->ic_header.h_num_logops) == |
1983 | XLOG_COVER_OPS)) { | ||
1987 | changed = 1; | 1984 | changed = 1; |
1988 | } else { | 1985 | } else { |
1989 | /* | 1986 | /* |
@@ -2051,7 +2048,7 @@ xlog_get_lowest_lsn( | |||
2051 | lowest_lsn = 0; | 2048 | lowest_lsn = 0; |
2052 | do { | 2049 | do { |
2053 | if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { | 2050 | if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { |
2054 | lsn = INT_GET(lsn_log->ic_header.h_lsn, ARCH_CONVERT); | 2051 | lsn = be64_to_cpu(lsn_log->ic_header.h_lsn); |
2055 | if ((lsn && !lowest_lsn) || | 2052 | if ((lsn && !lowest_lsn) || |
2056 | (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { | 2053 | (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { |
2057 | lowest_lsn = lsn; | 2054 | lowest_lsn = lsn; |
@@ -2152,11 +2149,9 @@ xlog_state_do_callback( | |||
2152 | */ | 2149 | */ |
2153 | 2150 | ||
2154 | lowest_lsn = xlog_get_lowest_lsn(log); | 2151 | lowest_lsn = xlog_get_lowest_lsn(log); |
2155 | if (lowest_lsn && ( | 2152 | if (lowest_lsn && |
2156 | XFS_LSN_CMP( | 2153 | XFS_LSN_CMP(lowest_lsn, |
2157 | lowest_lsn, | 2154 | be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { |
2158 | INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) | ||
2159 | )<0)) { | ||
2160 | iclog = iclog->ic_next; | 2155 | iclog = iclog->ic_next; |
2161 | continue; /* Leave this iclog for | 2156 | continue; /* Leave this iclog for |
2162 | * another thread */ | 2157 | * another thread */ |
@@ -2171,11 +2166,10 @@ xlog_state_do_callback( | |||
2171 | * No one else can be here except us. | 2166 | * No one else can be here except us. |
2172 | */ | 2167 | */ |
2173 | spin_lock(&log->l_grant_lock); | 2168 | spin_lock(&log->l_grant_lock); |
2174 | ASSERT(XFS_LSN_CMP( | 2169 | ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn, |
2175 | log->l_last_sync_lsn, | 2170 | be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); |
2176 | INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) | 2171 | log->l_last_sync_lsn = |
2177 | )<=0); | 2172 | be64_to_cpu(iclog->ic_header.h_lsn); |
2178 | log->l_last_sync_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); | ||
2179 | spin_unlock(&log->l_grant_lock); | 2173 | spin_unlock(&log->l_grant_lock); |
2180 | 2174 | ||
2181 | /* | 2175 | /* |
@@ -2392,8 +2386,8 @@ restart: | |||
2392 | xlog_tic_add_region(ticket, | 2386 | xlog_tic_add_region(ticket, |
2393 | log->l_iclog_hsize, | 2387 | log->l_iclog_hsize, |
2394 | XLOG_REG_TYPE_LRHEADER); | 2388 | XLOG_REG_TYPE_LRHEADER); |
2395 | INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle); | 2389 | head->h_cycle = cpu_to_be32(log->l_curr_cycle); |
2396 | INT_SET(head->h_lsn, ARCH_CONVERT, | 2390 | head->h_lsn = cpu_to_be64( |
2397 | xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); | 2391 | xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); |
2398 | ASSERT(log->l_curr_block >= 0); | 2392 | ASSERT(log->l_curr_block >= 0); |
2399 | } | 2393 | } |
@@ -2823,7 +2817,7 @@ xlog_state_release_iclog(xlog_t *log, | |||
2823 | iclog->ic_state == XLOG_STATE_WANT_SYNC) { | 2817 | iclog->ic_state == XLOG_STATE_WANT_SYNC) { |
2824 | sync++; | 2818 | sync++; |
2825 | iclog->ic_state = XLOG_STATE_SYNCING; | 2819 | iclog->ic_state = XLOG_STATE_SYNCING; |
2826 | INT_SET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT, log->l_tail_lsn); | 2820 | iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn); |
2827 | xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); | 2821 | xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); |
2828 | /* cycle incremented when incrementing curr_block */ | 2822 | /* cycle incremented when incrementing curr_block */ |
2829 | } | 2823 | } |
@@ -2861,7 +2855,7 @@ xlog_state_switch_iclogs(xlog_t *log, | |||
2861 | if (!eventual_size) | 2855 | if (!eventual_size) |
2862 | eventual_size = iclog->ic_offset; | 2856 | eventual_size = iclog->ic_offset; |
2863 | iclog->ic_state = XLOG_STATE_WANT_SYNC; | 2857 | iclog->ic_state = XLOG_STATE_WANT_SYNC; |
2864 | INT_SET(iclog->ic_header.h_prev_block, ARCH_CONVERT, log->l_prev_block); | 2858 | iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); |
2865 | log->l_prev_block = log->l_curr_block; | 2859 | log->l_prev_block = log->l_curr_block; |
2866 | log->l_prev_cycle = log->l_curr_cycle; | 2860 | log->l_prev_cycle = log->l_curr_cycle; |
2867 | 2861 | ||
@@ -2957,7 +2951,7 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) | |||
2957 | * the previous sync. | 2951 | * the previous sync. |
2958 | */ | 2952 | */ |
2959 | iclog->ic_refcnt++; | 2953 | iclog->ic_refcnt++; |
2960 | lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); | 2954 | lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
2961 | xlog_state_switch_iclogs(log, iclog, 0); | 2955 | xlog_state_switch_iclogs(log, iclog, 0); |
2962 | spin_unlock(&log->l_icloglock); | 2956 | spin_unlock(&log->l_icloglock); |
2963 | 2957 | ||
@@ -2965,7 +2959,7 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) | |||
2965 | return XFS_ERROR(EIO); | 2959 | return XFS_ERROR(EIO); |
2966 | *log_flushed = 1; | 2960 | *log_flushed = 1; |
2967 | spin_lock(&log->l_icloglock); | 2961 | spin_lock(&log->l_icloglock); |
2968 | if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn && | 2962 | if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && |
2969 | iclog->ic_state != XLOG_STATE_DIRTY) | 2963 | iclog->ic_state != XLOG_STATE_DIRTY) |
2970 | goto maybe_sleep; | 2964 | goto maybe_sleep; |
2971 | else | 2965 | else |
@@ -3049,9 +3043,9 @@ try_again: | |||
3049 | } | 3043 | } |
3050 | 3044 | ||
3051 | do { | 3045 | do { |
3052 | if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) != lsn) { | 3046 | if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { |
3053 | iclog = iclog->ic_next; | 3047 | iclog = iclog->ic_next; |
3054 | continue; | 3048 | continue; |
3055 | } | 3049 | } |
3056 | 3050 | ||
3057 | if (iclog->ic_state == XLOG_STATE_DIRTY) { | 3051 | if (iclog->ic_state == XLOG_STATE_DIRTY) { |
@@ -3460,18 +3454,18 @@ xlog_verify_iclog(xlog_t *log, | |||
3460 | spin_unlock(&log->l_icloglock); | 3454 | spin_unlock(&log->l_icloglock); |
3461 | 3455 | ||
3462 | /* check log magic numbers */ | 3456 | /* check log magic numbers */ |
3463 | ptr = (xfs_caddr_t) &(iclog->ic_header); | 3457 | if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM) |
3464 | if (INT_GET(*(uint *)ptr, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM) | ||
3465 | xlog_panic("xlog_verify_iclog: invalid magic num"); | 3458 | xlog_panic("xlog_verify_iclog: invalid magic num"); |
3466 | 3459 | ||
3467 | for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&(iclog->ic_header))+count; | 3460 | ptr = (xfs_caddr_t) &iclog->ic_header; |
3461 | for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count; | ||
3468 | ptr += BBSIZE) { | 3462 | ptr += BBSIZE) { |
3469 | if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) | 3463 | if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM) |
3470 | xlog_panic("xlog_verify_iclog: unexpected magic num"); | 3464 | xlog_panic("xlog_verify_iclog: unexpected magic num"); |
3471 | } | 3465 | } |
3472 | 3466 | ||
3473 | /* check fields */ | 3467 | /* check fields */ |
3474 | len = INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT); | 3468 | len = be32_to_cpu(iclog->ic_header.h_num_logops); |
3475 | ptr = iclog->ic_datap; | 3469 | ptr = iclog->ic_datap; |
3476 | base_ptr = ptr; | 3470 | base_ptr = ptr; |
3477 | ophead = (xlog_op_header_t *)ptr; | 3471 | ophead = (xlog_op_header_t *)ptr; |
@@ -3512,9 +3506,9 @@ xlog_verify_iclog(xlog_t *log, | |||
3512 | if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { | 3506 | if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { |
3513 | j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3507 | j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3514 | k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3508 | k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3515 | op_len = INT_GET(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT); | 3509 | op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); |
3516 | } else { | 3510 | } else { |
3517 | op_len = INT_GET(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT); | 3511 | op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); |
3518 | } | 3512 | } |
3519 | } | 3513 | } |
3520 | ptr += sizeof(xlog_op_header_t) + op_len; | 3514 | ptr += sizeof(xlog_op_header_t) + op_len; |
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index ebbe93f4f97b..4cdac048df5e 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h | |||
@@ -22,8 +22,9 @@ | |||
22 | 22 | ||
23 | #define CYCLE_LSN(lsn) ((uint)((lsn)>>32)) | 23 | #define CYCLE_LSN(lsn) ((uint)((lsn)>>32)) |
24 | #define BLOCK_LSN(lsn) ((uint)(lsn)) | 24 | #define BLOCK_LSN(lsn) ((uint)(lsn)) |
25 | |||
25 | /* this is used in a spot where we might otherwise double-endian-flip */ | 26 | /* this is used in a spot where we might otherwise double-endian-flip */ |
26 | #define CYCLE_LSN_DISK(lsn) (((uint *)&(lsn))[0]) | 27 | #define CYCLE_LSN_DISK(lsn) (((__be32 *)&(lsn))[0]) |
27 | 28 | ||
28 | #ifdef __KERNEL__ | 29 | #ifdef __KERNEL__ |
29 | /* | 30 | /* |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index d555aebca9bc..e008233ee249 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -63,10 +63,10 @@ static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block) | |||
63 | 63 | ||
64 | static inline uint xlog_get_cycle(char *ptr) | 64 | static inline uint xlog_get_cycle(char *ptr) |
65 | { | 65 | { |
66 | if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) | 66 | if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM) |
67 | return INT_GET(*((uint *)ptr + 1), ARCH_CONVERT); | 67 | return be32_to_cpu(*((__be32 *)ptr + 1)); |
68 | else | 68 | else |
69 | return INT_GET(*(uint *)ptr, ARCH_CONVERT); | 69 | return be32_to_cpu(*(__be32 *)ptr); |
70 | } | 70 | } |
71 | 71 | ||
72 | #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) | 72 | #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) |
@@ -85,9 +85,9 @@ static inline uint xlog_get_cycle(char *ptr) | |||
85 | * | 85 | * |
86 | * this has endian issues, of course. | 86 | * this has endian issues, of course. |
87 | */ | 87 | */ |
88 | static inline uint xlog_get_client_id(uint i) | 88 | static inline uint xlog_get_client_id(__be32 i) |
89 | { | 89 | { |
90 | return INT_GET(i, ARCH_CONVERT) >> 24; | 90 | return be32_to_cpu(i) >> 24; |
91 | } | 91 | } |
92 | 92 | ||
93 | #define xlog_panic(args...) cmn_err(CE_PANIC, ## args) | 93 | #define xlog_panic(args...) cmn_err(CE_PANIC, ## args) |
@@ -287,25 +287,25 @@ typedef struct xlog_op_header { | |||
287 | #endif | 287 | #endif |
288 | 288 | ||
289 | typedef struct xlog_rec_header { | 289 | typedef struct xlog_rec_header { |
290 | uint h_magicno; /* log record (LR) identifier : 4 */ | 290 | __be32 h_magicno; /* log record (LR) identifier : 4 */ |
291 | uint h_cycle; /* write cycle of log : 4 */ | 291 | __be32 h_cycle; /* write cycle of log : 4 */ |
292 | int h_version; /* LR version : 4 */ | 292 | __be32 h_version; /* LR version : 4 */ |
293 | int h_len; /* len in bytes; should be 64-bit aligned: 4 */ | 293 | __be32 h_len; /* len in bytes; should be 64-bit aligned: 4 */ |
294 | xfs_lsn_t h_lsn; /* lsn of this LR : 8 */ | 294 | __be64 h_lsn; /* lsn of this LR : 8 */ |
295 | xfs_lsn_t h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */ | 295 | __be64 h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */ |
296 | uint h_chksum; /* may not be used; non-zero if used : 4 */ | 296 | __be32 h_chksum; /* may not be used; non-zero if used : 4 */ |
297 | int h_prev_block; /* block number to previous LR : 4 */ | 297 | __be32 h_prev_block; /* block number to previous LR : 4 */ |
298 | int h_num_logops; /* number of log operations in this LR : 4 */ | 298 | __be32 h_num_logops; /* number of log operations in this LR : 4 */ |
299 | uint h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; | 299 | __be32 h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; |
300 | /* new fields */ | 300 | /* new fields */ |
301 | int h_fmt; /* format of log record : 4 */ | 301 | __be32 h_fmt; /* format of log record : 4 */ |
302 | uuid_t h_fs_uuid; /* uuid of FS : 16 */ | 302 | uuid_t h_fs_uuid; /* uuid of FS : 16 */ |
303 | int h_size; /* iclog size : 4 */ | 303 | __be32 h_size; /* iclog size : 4 */ |
304 | } xlog_rec_header_t; | 304 | } xlog_rec_header_t; |
305 | 305 | ||
306 | typedef struct xlog_rec_ext_header { | 306 | typedef struct xlog_rec_ext_header { |
307 | uint xh_cycle; /* write cycle of log : 4 */ | 307 | __be32 xh_cycle; /* write cycle of log : 4 */ |
308 | uint xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */ | 308 | __be32 xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */ |
309 | } xlog_rec_ext_header_t; | 309 | } xlog_rec_ext_header_t; |
310 | 310 | ||
311 | #ifdef __KERNEL__ | 311 | #ifdef __KERNEL__ |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index f27b6e93a682..35e098d4d749 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -198,7 +198,7 @@ xlog_header_check_dump( | |||
198 | cmn_err(CE_DEBUG, " log : uuid = "); | 198 | cmn_err(CE_DEBUG, " log : uuid = "); |
199 | for (b = 0; b < 16; b++) | 199 | for (b = 0; b < 16; b++) |
200 | cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]); | 200 | cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]); |
201 | cmn_err(CE_DEBUG, ", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT)); | 201 | cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt)); |
202 | } | 202 | } |
203 | #else | 203 | #else |
204 | #define xlog_header_check_dump(mp, head) | 204 | #define xlog_header_check_dump(mp, head) |
@@ -212,14 +212,14 @@ xlog_header_check_recover( | |||
212 | xfs_mount_t *mp, | 212 | xfs_mount_t *mp, |
213 | xlog_rec_header_t *head) | 213 | xlog_rec_header_t *head) |
214 | { | 214 | { |
215 | ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); | 215 | ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM); |
216 | 216 | ||
217 | /* | 217 | /* |
218 | * IRIX doesn't write the h_fmt field and leaves it zeroed | 218 | * IRIX doesn't write the h_fmt field and leaves it zeroed |
219 | * (XLOG_FMT_UNKNOWN). This stops us from trying to recover | 219 | * (XLOG_FMT_UNKNOWN). This stops us from trying to recover |
220 | * a dirty log created in IRIX. | 220 | * a dirty log created in IRIX. |
221 | */ | 221 | */ |
222 | if (unlikely(INT_GET(head->h_fmt, ARCH_CONVERT) != XLOG_FMT)) { | 222 | if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) { |
223 | xlog_warn( | 223 | xlog_warn( |
224 | "XFS: dirty log written in incompatible format - can't recover"); | 224 | "XFS: dirty log written in incompatible format - can't recover"); |
225 | xlog_header_check_dump(mp, head); | 225 | xlog_header_check_dump(mp, head); |
@@ -245,7 +245,7 @@ xlog_header_check_mount( | |||
245 | xfs_mount_t *mp, | 245 | xfs_mount_t *mp, |
246 | xlog_rec_header_t *head) | 246 | xlog_rec_header_t *head) |
247 | { | 247 | { |
248 | ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); | 248 | ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM); |
249 | 249 | ||
250 | if (uuid_is_nil(&head->h_fs_uuid)) { | 250 | if (uuid_is_nil(&head->h_fs_uuid)) { |
251 | /* | 251 | /* |
@@ -447,8 +447,7 @@ xlog_find_verify_log_record( | |||
447 | 447 | ||
448 | head = (xlog_rec_header_t *)offset; | 448 | head = (xlog_rec_header_t *)offset; |
449 | 449 | ||
450 | if (XLOG_HEADER_MAGIC_NUM == | 450 | if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno)) |
451 | INT_GET(head->h_magicno, ARCH_CONVERT)) | ||
452 | break; | 451 | break; |
453 | 452 | ||
454 | if (!smallmem) | 453 | if (!smallmem) |
@@ -480,7 +479,7 @@ xlog_find_verify_log_record( | |||
480 | * record do we update last_blk. | 479 | * record do we update last_blk. |
481 | */ | 480 | */ |
482 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { | 481 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { |
483 | uint h_size = INT_GET(head->h_size, ARCH_CONVERT); | 482 | uint h_size = be32_to_cpu(head->h_size); |
484 | 483 | ||
485 | xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; | 484 | xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; |
486 | if (h_size % XLOG_HEADER_CYCLE_SIZE) | 485 | if (h_size % XLOG_HEADER_CYCLE_SIZE) |
@@ -489,8 +488,8 @@ xlog_find_verify_log_record( | |||
489 | xhdrs = 1; | 488 | xhdrs = 1; |
490 | } | 489 | } |
491 | 490 | ||
492 | if (*last_blk - i + extra_bblks | 491 | if (*last_blk - i + extra_bblks != |
493 | != BTOBB(INT_GET(head->h_len, ARCH_CONVERT)) + xhdrs) | 492 | BTOBB(be32_to_cpu(head->h_len)) + xhdrs) |
494 | *last_blk = i; | 493 | *last_blk = i; |
495 | 494 | ||
496 | out: | 495 | out: |
@@ -823,8 +822,7 @@ xlog_find_tail( | |||
823 | if ((error = xlog_bread(log, i, 1, bp))) | 822 | if ((error = xlog_bread(log, i, 1, bp))) |
824 | goto bread_err; | 823 | goto bread_err; |
825 | offset = xlog_align(log, i, 1, bp); | 824 | offset = xlog_align(log, i, 1, bp); |
826 | if (XLOG_HEADER_MAGIC_NUM == | 825 | if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) { |
827 | INT_GET(*(uint *)offset, ARCH_CONVERT)) { | ||
828 | found = 1; | 826 | found = 1; |
829 | break; | 827 | break; |
830 | } | 828 | } |
@@ -841,7 +839,7 @@ xlog_find_tail( | |||
841 | goto bread_err; | 839 | goto bread_err; |
842 | offset = xlog_align(log, i, 1, bp); | 840 | offset = xlog_align(log, i, 1, bp); |
843 | if (XLOG_HEADER_MAGIC_NUM == | 841 | if (XLOG_HEADER_MAGIC_NUM == |
844 | INT_GET(*(uint*)offset, ARCH_CONVERT)) { | 842 | be32_to_cpu(*(__be32 *)offset)) { |
845 | found = 2; | 843 | found = 2; |
846 | break; | 844 | break; |
847 | } | 845 | } |
@@ -855,7 +853,7 @@ xlog_find_tail( | |||
855 | 853 | ||
856 | /* find blk_no of tail of log */ | 854 | /* find blk_no of tail of log */ |
857 | rhead = (xlog_rec_header_t *)offset; | 855 | rhead = (xlog_rec_header_t *)offset; |
858 | *tail_blk = BLOCK_LSN(INT_GET(rhead->h_tail_lsn, ARCH_CONVERT)); | 856 | *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); |
859 | 857 | ||
860 | /* | 858 | /* |
861 | * Reset log values according to the state of the log when we | 859 | * Reset log values according to the state of the log when we |
@@ -869,11 +867,11 @@ xlog_find_tail( | |||
869 | */ | 867 | */ |
870 | log->l_prev_block = i; | 868 | log->l_prev_block = i; |
871 | log->l_curr_block = (int)*head_blk; | 869 | log->l_curr_block = (int)*head_blk; |
872 | log->l_curr_cycle = INT_GET(rhead->h_cycle, ARCH_CONVERT); | 870 | log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); |
873 | if (found == 2) | 871 | if (found == 2) |
874 | log->l_curr_cycle++; | 872 | log->l_curr_cycle++; |
875 | log->l_tail_lsn = INT_GET(rhead->h_tail_lsn, ARCH_CONVERT); | 873 | log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); |
876 | log->l_last_sync_lsn = INT_GET(rhead->h_lsn, ARCH_CONVERT); | 874 | log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn); |
877 | log->l_grant_reserve_cycle = log->l_curr_cycle; | 875 | log->l_grant_reserve_cycle = log->l_curr_cycle; |
878 | log->l_grant_reserve_bytes = BBTOB(log->l_curr_block); | 876 | log->l_grant_reserve_bytes = BBTOB(log->l_curr_block); |
879 | log->l_grant_write_cycle = log->l_curr_cycle; | 877 | log->l_grant_write_cycle = log->l_curr_cycle; |
@@ -891,8 +889,8 @@ xlog_find_tail( | |||
891 | * unmount record rather than the block after it. | 889 | * unmount record rather than the block after it. |
892 | */ | 890 | */ |
893 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { | 891 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { |
894 | int h_size = INT_GET(rhead->h_size, ARCH_CONVERT); | 892 | int h_size = be32_to_cpu(rhead->h_size); |
895 | int h_version = INT_GET(rhead->h_version, ARCH_CONVERT); | 893 | int h_version = be32_to_cpu(rhead->h_version); |
896 | 894 | ||
897 | if ((h_version & XLOG_VERSION_2) && | 895 | if ((h_version & XLOG_VERSION_2) && |
898 | (h_size > XLOG_HEADER_CYCLE_SIZE)) { | 896 | (h_size > XLOG_HEADER_CYCLE_SIZE)) { |
@@ -906,10 +904,10 @@ xlog_find_tail( | |||
906 | hblks = 1; | 904 | hblks = 1; |
907 | } | 905 | } |
908 | after_umount_blk = (i + hblks + (int) | 906 | after_umount_blk = (i + hblks + (int) |
909 | BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT))) % log->l_logBBsize; | 907 | BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; |
910 | tail_lsn = log->l_tail_lsn; | 908 | tail_lsn = log->l_tail_lsn; |
911 | if (*head_blk == after_umount_blk && | 909 | if (*head_blk == after_umount_blk && |
912 | INT_GET(rhead->h_num_logops, ARCH_CONVERT) == 1) { | 910 | be32_to_cpu(rhead->h_num_logops) == 1) { |
913 | umount_data_blk = (i + hblks) % log->l_logBBsize; | 911 | umount_data_blk = (i + hblks) % log->l_logBBsize; |
914 | if ((error = xlog_bread(log, umount_data_blk, 1, bp))) { | 912 | if ((error = xlog_bread(log, umount_data_blk, 1, bp))) { |
915 | goto bread_err; | 913 | goto bread_err; |
@@ -1100,14 +1098,13 @@ xlog_add_record( | |||
1100 | xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; | 1098 | xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; |
1101 | 1099 | ||
1102 | memset(buf, 0, BBSIZE); | 1100 | memset(buf, 0, BBSIZE); |
1103 | INT_SET(recp->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM); | 1101 | recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); |
1104 | INT_SET(recp->h_cycle, ARCH_CONVERT, cycle); | 1102 | recp->h_cycle = cpu_to_be32(cycle); |
1105 | INT_SET(recp->h_version, ARCH_CONVERT, | 1103 | recp->h_version = cpu_to_be32( |
1106 | XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); | 1104 | XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); |
1107 | INT_SET(recp->h_lsn, ARCH_CONVERT, xlog_assign_lsn(cycle, block)); | 1105 | recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block)); |
1108 | INT_SET(recp->h_tail_lsn, ARCH_CONVERT, | 1106 | recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block)); |
1109 | xlog_assign_lsn(tail_cycle, tail_block)); | 1107 | recp->h_fmt = cpu_to_be32(XLOG_FMT); |
1110 | INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT); | ||
1111 | memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); | 1108 | memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); |
1112 | } | 1109 | } |
1113 | 1110 | ||
@@ -2214,7 +2211,7 @@ xlog_recover_do_buffer_trans( | |||
2214 | * overlap with future reads of those inodes. | 2211 | * overlap with future reads of those inodes. |
2215 | */ | 2212 | */ |
2216 | if (XFS_DINODE_MAGIC == | 2213 | if (XFS_DINODE_MAGIC == |
2217 | INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) && | 2214 | be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && |
2218 | (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize, | 2215 | (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize, |
2219 | (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { | 2216 | (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { |
2220 | XFS_BUF_STALE(bp); | 2217 | XFS_BUF_STALE(bp); |
@@ -2584,8 +2581,7 @@ xlog_recover_do_dquot_trans( | |||
2584 | /* | 2581 | /* |
2585 | * This type of quotas was turned off, so ignore this record. | 2582 | * This type of quotas was turned off, so ignore this record. |
2586 | */ | 2583 | */ |
2587 | type = INT_GET(recddq->d_flags, ARCH_CONVERT) & | 2584 | type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); |
2588 | (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); | ||
2589 | ASSERT(type); | 2585 | ASSERT(type); |
2590 | if (log->l_quotaoffs_flag & type) | 2586 | if (log->l_quotaoffs_flag & type) |
2591 | return (0); | 2587 | return (0); |
@@ -2898,8 +2894,8 @@ xlog_recover_process_data( | |||
2898 | unsigned long hash; | 2894 | unsigned long hash; |
2899 | uint flags; | 2895 | uint flags; |
2900 | 2896 | ||
2901 | lp = dp + INT_GET(rhead->h_len, ARCH_CONVERT); | 2897 | lp = dp + be32_to_cpu(rhead->h_len); |
2902 | num_logops = INT_GET(rhead->h_num_logops, ARCH_CONVERT); | 2898 | num_logops = be32_to_cpu(rhead->h_num_logops); |
2903 | 2899 | ||
2904 | /* check the log format matches our own - else we can't recover */ | 2900 | /* check the log format matches our own - else we can't recover */ |
2905 | if (xlog_header_check_recover(log->l_mp, rhead)) | 2901 | if (xlog_header_check_recover(log->l_mp, rhead)) |
@@ -2922,7 +2918,7 @@ xlog_recover_process_data( | |||
2922 | if (trans == NULL) { /* not found; add new tid */ | 2918 | if (trans == NULL) { /* not found; add new tid */ |
2923 | if (ohead->oh_flags & XLOG_START_TRANS) | 2919 | if (ohead->oh_flags & XLOG_START_TRANS) |
2924 | xlog_recover_new_tid(&rhash[hash], tid, | 2920 | xlog_recover_new_tid(&rhash[hash], tid, |
2925 | INT_GET(rhead->h_lsn, ARCH_CONVERT)); | 2921 | be64_to_cpu(rhead->h_lsn)); |
2926 | } else { | 2922 | } else { |
2927 | ASSERT(dp + be32_to_cpu(ohead->oh_len) <= lp); | 2923 | ASSERT(dp + be32_to_cpu(ohead->oh_len) <= lp); |
2928 | flags = ohead->oh_flags & ~XLOG_END_TRANS; | 2924 | flags = ohead->oh_flags & ~XLOG_END_TRANS; |
@@ -3313,16 +3309,16 @@ xlog_pack_data_checksum( | |||
3313 | int size) | 3309 | int size) |
3314 | { | 3310 | { |
3315 | int i; | 3311 | int i; |
3316 | uint *up; | 3312 | __be32 *up; |
3317 | uint chksum = 0; | 3313 | uint chksum = 0; |
3318 | 3314 | ||
3319 | up = (uint *)iclog->ic_datap; | 3315 | up = (__be32 *)iclog->ic_datap; |
3320 | /* divide length by 4 to get # words */ | 3316 | /* divide length by 4 to get # words */ |
3321 | for (i = 0; i < (size >> 2); i++) { | 3317 | for (i = 0; i < (size >> 2); i++) { |
3322 | chksum ^= INT_GET(*up, ARCH_CONVERT); | 3318 | chksum ^= be32_to_cpu(*up); |
3323 | up++; | 3319 | up++; |
3324 | } | 3320 | } |
3325 | INT_SET(iclog->ic_header.h_chksum, ARCH_CONVERT, chksum); | 3321 | iclog->ic_header.h_chksum = cpu_to_be32(chksum); |
3326 | } | 3322 | } |
3327 | #else | 3323 | #else |
3328 | #define xlog_pack_data_checksum(log, iclog, size) | 3324 | #define xlog_pack_data_checksum(log, iclog, size) |
@@ -3339,7 +3335,7 @@ xlog_pack_data( | |||
3339 | { | 3335 | { |
3340 | int i, j, k; | 3336 | int i, j, k; |
3341 | int size = iclog->ic_offset + roundoff; | 3337 | int size = iclog->ic_offset + roundoff; |
3342 | uint cycle_lsn; | 3338 | __be32 cycle_lsn; |
3343 | xfs_caddr_t dp; | 3339 | xfs_caddr_t dp; |
3344 | xlog_in_core_2_t *xhdr; | 3340 | xlog_in_core_2_t *xhdr; |
3345 | 3341 | ||
@@ -3350,8 +3346,8 @@ xlog_pack_data( | |||
3350 | dp = iclog->ic_datap; | 3346 | dp = iclog->ic_datap; |
3351 | for (i = 0; i < BTOBB(size) && | 3347 | for (i = 0; i < BTOBB(size) && |
3352 | i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { | 3348 | i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { |
3353 | iclog->ic_header.h_cycle_data[i] = *(uint *)dp; | 3349 | iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; |
3354 | *(uint *)dp = cycle_lsn; | 3350 | *(__be32 *)dp = cycle_lsn; |
3355 | dp += BBSIZE; | 3351 | dp += BBSIZE; |
3356 | } | 3352 | } |
3357 | 3353 | ||
@@ -3360,8 +3356,8 @@ xlog_pack_data( | |||
3360 | for ( ; i < BTOBB(size); i++) { | 3356 | for ( ; i < BTOBB(size); i++) { |
3361 | j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3357 | j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3362 | k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3358 | k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3363 | xhdr[j].hic_xheader.xh_cycle_data[k] = *(uint *)dp; | 3359 | xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; |
3364 | *(uint *)dp = cycle_lsn; | 3360 | *(__be32 *)dp = cycle_lsn; |
3365 | dp += BBSIZE; | 3361 | dp += BBSIZE; |
3366 | } | 3362 | } |
3367 | 3363 | ||
@@ -3378,21 +3374,21 @@ xlog_unpack_data_checksum( | |||
3378 | xfs_caddr_t dp, | 3374 | xfs_caddr_t dp, |
3379 | xlog_t *log) | 3375 | xlog_t *log) |
3380 | { | 3376 | { |
3381 | uint *up = (uint *)dp; | 3377 | __be32 *up = (__be32 *)dp; |
3382 | uint chksum = 0; | 3378 | uint chksum = 0; |
3383 | int i; | 3379 | int i; |
3384 | 3380 | ||
3385 | /* divide length by 4 to get # words */ | 3381 | /* divide length by 4 to get # words */ |
3386 | for (i=0; i < INT_GET(rhead->h_len, ARCH_CONVERT) >> 2; i++) { | 3382 | for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) { |
3387 | chksum ^= INT_GET(*up, ARCH_CONVERT); | 3383 | chksum ^= be32_to_cpu(*up); |
3388 | up++; | 3384 | up++; |
3389 | } | 3385 | } |
3390 | if (chksum != INT_GET(rhead->h_chksum, ARCH_CONVERT)) { | 3386 | if (chksum != be32_to_cpu(rhead->h_chksum)) { |
3391 | if (rhead->h_chksum || | 3387 | if (rhead->h_chksum || |
3392 | ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) { | 3388 | ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) { |
3393 | cmn_err(CE_DEBUG, | 3389 | cmn_err(CE_DEBUG, |
3394 | "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n", | 3390 | "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n", |
3395 | INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum); | 3391 | be32_to_cpu(rhead->h_chksum), chksum); |
3396 | cmn_err(CE_DEBUG, | 3392 | cmn_err(CE_DEBUG, |
3397 | "XFS: Disregard message if filesystem was created with non-DEBUG kernel"); | 3393 | "XFS: Disregard message if filesystem was created with non-DEBUG kernel"); |
3398 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { | 3394 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { |
@@ -3416,18 +3412,18 @@ xlog_unpack_data( | |||
3416 | int i, j, k; | 3412 | int i, j, k; |
3417 | xlog_in_core_2_t *xhdr; | 3413 | xlog_in_core_2_t *xhdr; |
3418 | 3414 | ||
3419 | for (i = 0; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)) && | 3415 | for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && |
3420 | i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { | 3416 | i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { |
3421 | *(uint *)dp = *(uint *)&rhead->h_cycle_data[i]; | 3417 | *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i]; |
3422 | dp += BBSIZE; | 3418 | dp += BBSIZE; |
3423 | } | 3419 | } |
3424 | 3420 | ||
3425 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { | 3421 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { |
3426 | xhdr = (xlog_in_core_2_t *)rhead; | 3422 | xhdr = (xlog_in_core_2_t *)rhead; |
3427 | for ( ; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); i++) { | 3423 | for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { |
3428 | j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3424 | j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3429 | k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3425 | k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3430 | *(uint *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; | 3426 | *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; |
3431 | dp += BBSIZE; | 3427 | dp += BBSIZE; |
3432 | } | 3428 | } |
3433 | } | 3429 | } |
@@ -3443,24 +3439,21 @@ xlog_valid_rec_header( | |||
3443 | { | 3439 | { |
3444 | int hlen; | 3440 | int hlen; |
3445 | 3441 | ||
3446 | if (unlikely( | 3442 | if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) { |
3447 | (INT_GET(rhead->h_magicno, ARCH_CONVERT) != | ||
3448 | XLOG_HEADER_MAGIC_NUM))) { | ||
3449 | XFS_ERROR_REPORT("xlog_valid_rec_header(1)", | 3443 | XFS_ERROR_REPORT("xlog_valid_rec_header(1)", |
3450 | XFS_ERRLEVEL_LOW, log->l_mp); | 3444 | XFS_ERRLEVEL_LOW, log->l_mp); |
3451 | return XFS_ERROR(EFSCORRUPTED); | 3445 | return XFS_ERROR(EFSCORRUPTED); |
3452 | } | 3446 | } |
3453 | if (unlikely( | 3447 | if (unlikely( |
3454 | (!rhead->h_version || | 3448 | (!rhead->h_version || |
3455 | (INT_GET(rhead->h_version, ARCH_CONVERT) & | 3449 | (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { |
3456 | (~XLOG_VERSION_OKBITS)) != 0))) { | ||
3457 | xlog_warn("XFS: %s: unrecognised log version (%d).", | 3450 | xlog_warn("XFS: %s: unrecognised log version (%d).", |
3458 | __FUNCTION__, INT_GET(rhead->h_version, ARCH_CONVERT)); | 3451 | __FUNCTION__, be32_to_cpu(rhead->h_version)); |
3459 | return XFS_ERROR(EIO); | 3452 | return XFS_ERROR(EIO); |
3460 | } | 3453 | } |
3461 | 3454 | ||
3462 | /* LR body must have data or it wouldn't have been written */ | 3455 | /* LR body must have data or it wouldn't have been written */ |
3463 | hlen = INT_GET(rhead->h_len, ARCH_CONVERT); | 3456 | hlen = be32_to_cpu(rhead->h_len); |
3464 | if (unlikely( hlen <= 0 || hlen > INT_MAX )) { | 3457 | if (unlikely( hlen <= 0 || hlen > INT_MAX )) { |
3465 | XFS_ERROR_REPORT("xlog_valid_rec_header(2)", | 3458 | XFS_ERROR_REPORT("xlog_valid_rec_header(2)", |
3466 | XFS_ERRLEVEL_LOW, log->l_mp); | 3459 | XFS_ERRLEVEL_LOW, log->l_mp); |
@@ -3520,9 +3513,8 @@ xlog_do_recovery_pass( | |||
3520 | error = xlog_valid_rec_header(log, rhead, tail_blk); | 3513 | error = xlog_valid_rec_header(log, rhead, tail_blk); |
3521 | if (error) | 3514 | if (error) |
3522 | goto bread_err1; | 3515 | goto bread_err1; |
3523 | h_size = INT_GET(rhead->h_size, ARCH_CONVERT); | 3516 | h_size = be32_to_cpu(rhead->h_size); |
3524 | if ((INT_GET(rhead->h_version, ARCH_CONVERT) | 3517 | if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) && |
3525 | & XLOG_VERSION_2) && | ||
3526 | (h_size > XLOG_HEADER_CYCLE_SIZE)) { | 3518 | (h_size > XLOG_HEADER_CYCLE_SIZE)) { |
3527 | hblks = h_size / XLOG_HEADER_CYCLE_SIZE; | 3519 | hblks = h_size / XLOG_HEADER_CYCLE_SIZE; |
3528 | if (h_size % XLOG_HEADER_CYCLE_SIZE) | 3520 | if (h_size % XLOG_HEADER_CYCLE_SIZE) |
@@ -3559,7 +3551,7 @@ xlog_do_recovery_pass( | |||
3559 | goto bread_err2; | 3551 | goto bread_err2; |
3560 | 3552 | ||
3561 | /* blocks in data section */ | 3553 | /* blocks in data section */ |
3562 | bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); | 3554 | bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); |
3563 | error = xlog_bread(log, blk_no + hblks, bblks, dbp); | 3555 | error = xlog_bread(log, blk_no + hblks, bblks, dbp); |
3564 | if (error) | 3556 | if (error) |
3565 | goto bread_err2; | 3557 | goto bread_err2; |
@@ -3634,7 +3626,7 @@ xlog_do_recovery_pass( | |||
3634 | if (error) | 3626 | if (error) |
3635 | goto bread_err2; | 3627 | goto bread_err2; |
3636 | 3628 | ||
3637 | bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); | 3629 | bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); |
3638 | blk_no += hblks; | 3630 | blk_no += hblks; |
3639 | 3631 | ||
3640 | /* Read in data for log record */ | 3632 | /* Read in data for log record */ |
@@ -3705,7 +3697,7 @@ xlog_do_recovery_pass( | |||
3705 | error = xlog_valid_rec_header(log, rhead, blk_no); | 3697 | error = xlog_valid_rec_header(log, rhead, blk_no); |
3706 | if (error) | 3698 | if (error) |
3707 | goto bread_err2; | 3699 | goto bread_err2; |
3708 | bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); | 3700 | bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); |
3709 | if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp))) | 3701 | if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp))) |
3710 | goto bread_err2; | 3702 | goto bread_err2; |
3711 | offset = xlog_align(log, blk_no+hblks, bblks, dbp); | 3703 | offset = xlog_align(log, blk_no+hblks, bblks, dbp); |