diff options
author | Christoph Hellwig <hch@infradead.org> | 2007-10-11 20:59:34 -0400 |
---|---|---|
committer | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-02-07 02:11:47 -0500 |
commit | b53e675dc868c4844ecbcce9149cf68e4299231d (patch) | |
tree | e49928f2faa73d2f59b80647814835343c9379e5 /fs/xfs/xfs_log.c | |
parent | 67fcb7bfb69eb1072c7e2dd6b46fa34db11dd587 (diff) |
[XFS] xlog_rec_header/xlog_rec_ext_header endianess annotations
Mostly trivial conversion with one exceptions: h_num_logops was kept in
native endian previously and only converted to big endian in xlog_sync,
but we always keep it big endian now. With todays cpus fast byteswap
instructions that's not an issue but the new variant keeps the code clean
and maintainable.
SGI-PV: 971186
SGI-Modid: xfs-linux-melb:xfs-kern:29821a
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r-- | fs/xfs/xfs_log.c | 90 |
1 files changed, 42 insertions, 48 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 5ff4643b72c7..6e3d8084d8b3 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -1227,12 +1227,12 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1227 | 1227 | ||
1228 | head = &iclog->ic_header; | 1228 | head = &iclog->ic_header; |
1229 | memset(head, 0, sizeof(xlog_rec_header_t)); | 1229 | memset(head, 0, sizeof(xlog_rec_header_t)); |
1230 | INT_SET(head->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM); | 1230 | head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); |
1231 | INT_SET(head->h_version, ARCH_CONVERT, | 1231 | head->h_version = cpu_to_be32( |
1232 | XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); | 1232 | XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); |
1233 | INT_SET(head->h_size, ARCH_CONVERT, log->l_iclog_size); | 1233 | head->h_size = cpu_to_be32(log->l_iclog_size); |
1234 | /* new fields */ | 1234 | /* new fields */ |
1235 | INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT); | 1235 | head->h_fmt = cpu_to_be32(XLOG_FMT); |
1236 | memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); | 1236 | memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); |
1237 | 1237 | ||
1238 | 1238 | ||
@@ -1378,7 +1378,7 @@ xlog_sync(xlog_t *log, | |||
1378 | { | 1378 | { |
1379 | xfs_caddr_t dptr; /* pointer to byte sized element */ | 1379 | xfs_caddr_t dptr; /* pointer to byte sized element */ |
1380 | xfs_buf_t *bp; | 1380 | xfs_buf_t *bp; |
1381 | int i, ops; | 1381 | int i; |
1382 | uint count; /* byte count of bwrite */ | 1382 | uint count; /* byte count of bwrite */ |
1383 | uint count_init; /* initial count before roundup */ | 1383 | uint count_init; /* initial count before roundup */ |
1384 | int roundoff; /* roundoff to BB or stripe */ | 1384 | int roundoff; /* roundoff to BB or stripe */ |
@@ -1417,21 +1417,17 @@ xlog_sync(xlog_t *log, | |||
1417 | 1417 | ||
1418 | /* real byte length */ | 1418 | /* real byte length */ |
1419 | if (v2) { | 1419 | if (v2) { |
1420 | INT_SET(iclog->ic_header.h_len, | 1420 | iclog->ic_header.h_len = |
1421 | ARCH_CONVERT, | 1421 | cpu_to_be32(iclog->ic_offset + roundoff); |
1422 | iclog->ic_offset + roundoff); | ||
1423 | } else { | 1422 | } else { |
1424 | INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset); | 1423 | iclog->ic_header.h_len = |
1424 | cpu_to_be32(iclog->ic_offset); | ||
1425 | } | 1425 | } |
1426 | 1426 | ||
1427 | /* put ops count in correct order */ | ||
1428 | ops = iclog->ic_header.h_num_logops; | ||
1429 | INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); | ||
1430 | |||
1431 | bp = iclog->ic_bp; | 1427 | bp = iclog->ic_bp; |
1432 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); | 1428 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); |
1433 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); | 1429 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); |
1434 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); | 1430 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); |
1435 | 1431 | ||
1436 | XFS_STATS_ADD(xs_log_blocks, BTOBB(count)); | 1432 | XFS_STATS_ADD(xs_log_blocks, BTOBB(count)); |
1437 | 1433 | ||
@@ -1494,10 +1490,10 @@ xlog_sync(xlog_t *log, | |||
1494 | * a new cycle. Watch out for the header magic number | 1490 | * a new cycle. Watch out for the header magic number |
1495 | * case, though. | 1491 | * case, though. |
1496 | */ | 1492 | */ |
1497 | for (i=0; i<split; i += BBSIZE) { | 1493 | for (i = 0; i < split; i += BBSIZE) { |
1498 | INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1); | 1494 | be32_add((__be32 *)dptr, 1); |
1499 | if (INT_GET(*(uint *)dptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) | 1495 | if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM) |
1500 | INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1); | 1496 | be32_add((__be32 *)dptr, 1); |
1501 | dptr += BBSIZE; | 1497 | dptr += BBSIZE; |
1502 | } | 1498 | } |
1503 | 1499 | ||
@@ -1586,7 +1582,7 @@ xlog_state_finish_copy(xlog_t *log, | |||
1586 | { | 1582 | { |
1587 | spin_lock(&log->l_icloglock); | 1583 | spin_lock(&log->l_icloglock); |
1588 | 1584 | ||
1589 | iclog->ic_header.h_num_logops += record_cnt; | 1585 | be32_add(&iclog->ic_header.h_num_logops, record_cnt); |
1590 | iclog->ic_offset += copy_bytes; | 1586 | iclog->ic_offset += copy_bytes; |
1591 | 1587 | ||
1592 | spin_unlock(&log->l_icloglock); | 1588 | spin_unlock(&log->l_icloglock); |
@@ -1813,7 +1809,7 @@ xlog_write(xfs_mount_t * mp, | |||
1813 | 1809 | ||
1814 | /* start_lsn is the first lsn written to. That's all we need. */ | 1810 | /* start_lsn is the first lsn written to. That's all we need. */ |
1815 | if (! *start_lsn) | 1811 | if (! *start_lsn) |
1816 | *start_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); | 1812 | *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
1817 | 1813 | ||
1818 | /* This loop writes out as many regions as can fit in the amount | 1814 | /* This loop writes out as many regions as can fit in the amount |
1819 | * of space which was allocated by xlog_state_get_iclog_space(). | 1815 | * of space which was allocated by xlog_state_get_iclog_space(). |
@@ -1983,7 +1979,8 @@ xlog_state_clean_log(xlog_t *log) | |||
1983 | * We don't need to cover the dummy. | 1979 | * We don't need to cover the dummy. |
1984 | */ | 1980 | */ |
1985 | if (!changed && | 1981 | if (!changed && |
1986 | (INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT) == XLOG_COVER_OPS)) { | 1982 | (be32_to_cpu(iclog->ic_header.h_num_logops) == |
1983 | XLOG_COVER_OPS)) { | ||
1987 | changed = 1; | 1984 | changed = 1; |
1988 | } else { | 1985 | } else { |
1989 | /* | 1986 | /* |
@@ -2051,7 +2048,7 @@ xlog_get_lowest_lsn( | |||
2051 | lowest_lsn = 0; | 2048 | lowest_lsn = 0; |
2052 | do { | 2049 | do { |
2053 | if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { | 2050 | if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { |
2054 | lsn = INT_GET(lsn_log->ic_header.h_lsn, ARCH_CONVERT); | 2051 | lsn = be64_to_cpu(lsn_log->ic_header.h_lsn); |
2055 | if ((lsn && !lowest_lsn) || | 2052 | if ((lsn && !lowest_lsn) || |
2056 | (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { | 2053 | (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { |
2057 | lowest_lsn = lsn; | 2054 | lowest_lsn = lsn; |
@@ -2152,11 +2149,9 @@ xlog_state_do_callback( | |||
2152 | */ | 2149 | */ |
2153 | 2150 | ||
2154 | lowest_lsn = xlog_get_lowest_lsn(log); | 2151 | lowest_lsn = xlog_get_lowest_lsn(log); |
2155 | if (lowest_lsn && ( | 2152 | if (lowest_lsn && |
2156 | XFS_LSN_CMP( | 2153 | XFS_LSN_CMP(lowest_lsn, |
2157 | lowest_lsn, | 2154 | be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { |
2158 | INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) | ||
2159 | )<0)) { | ||
2160 | iclog = iclog->ic_next; | 2155 | iclog = iclog->ic_next; |
2161 | continue; /* Leave this iclog for | 2156 | continue; /* Leave this iclog for |
2162 | * another thread */ | 2157 | * another thread */ |
@@ -2171,11 +2166,10 @@ xlog_state_do_callback( | |||
2171 | * No one else can be here except us. | 2166 | * No one else can be here except us. |
2172 | */ | 2167 | */ |
2173 | spin_lock(&log->l_grant_lock); | 2168 | spin_lock(&log->l_grant_lock); |
2174 | ASSERT(XFS_LSN_CMP( | 2169 | ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn, |
2175 | log->l_last_sync_lsn, | 2170 | be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); |
2176 | INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) | 2171 | log->l_last_sync_lsn = |
2177 | )<=0); | 2172 | be64_to_cpu(iclog->ic_header.h_lsn); |
2178 | log->l_last_sync_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); | ||
2179 | spin_unlock(&log->l_grant_lock); | 2173 | spin_unlock(&log->l_grant_lock); |
2180 | 2174 | ||
2181 | /* | 2175 | /* |
@@ -2392,8 +2386,8 @@ restart: | |||
2392 | xlog_tic_add_region(ticket, | 2386 | xlog_tic_add_region(ticket, |
2393 | log->l_iclog_hsize, | 2387 | log->l_iclog_hsize, |
2394 | XLOG_REG_TYPE_LRHEADER); | 2388 | XLOG_REG_TYPE_LRHEADER); |
2395 | INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle); | 2389 | head->h_cycle = cpu_to_be32(log->l_curr_cycle); |
2396 | INT_SET(head->h_lsn, ARCH_CONVERT, | 2390 | head->h_lsn = cpu_to_be64( |
2397 | xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); | 2391 | xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); |
2398 | ASSERT(log->l_curr_block >= 0); | 2392 | ASSERT(log->l_curr_block >= 0); |
2399 | } | 2393 | } |
@@ -2823,7 +2817,7 @@ xlog_state_release_iclog(xlog_t *log, | |||
2823 | iclog->ic_state == XLOG_STATE_WANT_SYNC) { | 2817 | iclog->ic_state == XLOG_STATE_WANT_SYNC) { |
2824 | sync++; | 2818 | sync++; |
2825 | iclog->ic_state = XLOG_STATE_SYNCING; | 2819 | iclog->ic_state = XLOG_STATE_SYNCING; |
2826 | INT_SET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT, log->l_tail_lsn); | 2820 | iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn); |
2827 | xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); | 2821 | xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); |
2828 | /* cycle incremented when incrementing curr_block */ | 2822 | /* cycle incremented when incrementing curr_block */ |
2829 | } | 2823 | } |
@@ -2861,7 +2855,7 @@ xlog_state_switch_iclogs(xlog_t *log, | |||
2861 | if (!eventual_size) | 2855 | if (!eventual_size) |
2862 | eventual_size = iclog->ic_offset; | 2856 | eventual_size = iclog->ic_offset; |
2863 | iclog->ic_state = XLOG_STATE_WANT_SYNC; | 2857 | iclog->ic_state = XLOG_STATE_WANT_SYNC; |
2864 | INT_SET(iclog->ic_header.h_prev_block, ARCH_CONVERT, log->l_prev_block); | 2858 | iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); |
2865 | log->l_prev_block = log->l_curr_block; | 2859 | log->l_prev_block = log->l_curr_block; |
2866 | log->l_prev_cycle = log->l_curr_cycle; | 2860 | log->l_prev_cycle = log->l_curr_cycle; |
2867 | 2861 | ||
@@ -2957,7 +2951,7 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) | |||
2957 | * the previous sync. | 2951 | * the previous sync. |
2958 | */ | 2952 | */ |
2959 | iclog->ic_refcnt++; | 2953 | iclog->ic_refcnt++; |
2960 | lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); | 2954 | lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
2961 | xlog_state_switch_iclogs(log, iclog, 0); | 2955 | xlog_state_switch_iclogs(log, iclog, 0); |
2962 | spin_unlock(&log->l_icloglock); | 2956 | spin_unlock(&log->l_icloglock); |
2963 | 2957 | ||
@@ -2965,7 +2959,7 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) | |||
2965 | return XFS_ERROR(EIO); | 2959 | return XFS_ERROR(EIO); |
2966 | *log_flushed = 1; | 2960 | *log_flushed = 1; |
2967 | spin_lock(&log->l_icloglock); | 2961 | spin_lock(&log->l_icloglock); |
2968 | if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn && | 2962 | if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && |
2969 | iclog->ic_state != XLOG_STATE_DIRTY) | 2963 | iclog->ic_state != XLOG_STATE_DIRTY) |
2970 | goto maybe_sleep; | 2964 | goto maybe_sleep; |
2971 | else | 2965 | else |
@@ -3049,9 +3043,9 @@ try_again: | |||
3049 | } | 3043 | } |
3050 | 3044 | ||
3051 | do { | 3045 | do { |
3052 | if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) != lsn) { | 3046 | if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { |
3053 | iclog = iclog->ic_next; | 3047 | iclog = iclog->ic_next; |
3054 | continue; | 3048 | continue; |
3055 | } | 3049 | } |
3056 | 3050 | ||
3057 | if (iclog->ic_state == XLOG_STATE_DIRTY) { | 3051 | if (iclog->ic_state == XLOG_STATE_DIRTY) { |
@@ -3460,18 +3454,18 @@ xlog_verify_iclog(xlog_t *log, | |||
3460 | spin_unlock(&log->l_icloglock); | 3454 | spin_unlock(&log->l_icloglock); |
3461 | 3455 | ||
3462 | /* check log magic numbers */ | 3456 | /* check log magic numbers */ |
3463 | ptr = (xfs_caddr_t) &(iclog->ic_header); | 3457 | if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM) |
3464 | if (INT_GET(*(uint *)ptr, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM) | ||
3465 | xlog_panic("xlog_verify_iclog: invalid magic num"); | 3458 | xlog_panic("xlog_verify_iclog: invalid magic num"); |
3466 | 3459 | ||
3467 | for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&(iclog->ic_header))+count; | 3460 | ptr = (xfs_caddr_t) &iclog->ic_header; |
3461 | for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count; | ||
3468 | ptr += BBSIZE) { | 3462 | ptr += BBSIZE) { |
3469 | if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) | 3463 | if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM) |
3470 | xlog_panic("xlog_verify_iclog: unexpected magic num"); | 3464 | xlog_panic("xlog_verify_iclog: unexpected magic num"); |
3471 | } | 3465 | } |
3472 | 3466 | ||
3473 | /* check fields */ | 3467 | /* check fields */ |
3474 | len = INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT); | 3468 | len = be32_to_cpu(iclog->ic_header.h_num_logops); |
3475 | ptr = iclog->ic_datap; | 3469 | ptr = iclog->ic_datap; |
3476 | base_ptr = ptr; | 3470 | base_ptr = ptr; |
3477 | ophead = (xlog_op_header_t *)ptr; | 3471 | ophead = (xlog_op_header_t *)ptr; |
@@ -3512,9 +3506,9 @@ xlog_verify_iclog(xlog_t *log, | |||
3512 | if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { | 3506 | if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { |
3513 | j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3507 | j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3514 | k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3508 | k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3515 | op_len = INT_GET(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT); | 3509 | op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); |
3516 | } else { | 3510 | } else { |
3517 | op_len = INT_GET(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT); | 3511 | op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); |
3518 | } | 3512 | } |
3519 | } | 3513 | } |
3520 | ptr += sizeof(xlog_op_header_t) + op_len; | 3514 | ptr += sizeof(xlog_op_header_t) + op_len; |