diff options
author | Dave Chinner <dchinner@redhat.com> | 2010-12-03 06:11:29 -0500 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2010-12-03 06:11:29 -0500 |
commit | 84f3c683c4d3f36d3c3ed320babd960a332ac458 (patch) | |
tree | 5884f4e5a04a7a67c634e003bfa590fd2e643fdc /fs/xfs/xfs_log.c | |
parent | 2ced19cbae5448b720919a494606c62095d4f4db (diff) |
xfs: convert l_last_sync_lsn to an atomic variable
log->l_last_sync_lsn is updated in only one critical spot - log
buffer Io completion - and is protected by the grant lock here. This
requires the grant lock to be taken for every log buffer IO
completion. Converting the l_last_sync_lsn variable to an atomic64_t
means that we do not need to take the grant lock in log buffer IO
completion to update it.
This also removes the need for explicitly holding a spinlock to read
the l_last_sync_lsn on 32 bit platforms.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r-- | fs/xfs/xfs_log.c | 55 |
1 files changed, 25 insertions, 30 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 1e2020d5a8b6..70790eb48336 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -675,12 +675,8 @@ xfs_log_move_tail(xfs_mount_t *mp, | |||
675 | if (XLOG_FORCED_SHUTDOWN(log)) | 675 | if (XLOG_FORCED_SHUTDOWN(log)) |
676 | return; | 676 | return; |
677 | 677 | ||
678 | if (tail_lsn == 0) { | 678 | if (tail_lsn == 0) |
679 | /* needed since sync_lsn is 64 bits */ | 679 | tail_lsn = atomic64_read(&log->l_last_sync_lsn); |
680 | spin_lock(&log->l_icloglock); | ||
681 | tail_lsn = log->l_last_sync_lsn; | ||
682 | spin_unlock(&log->l_icloglock); | ||
683 | } | ||
684 | 680 | ||
685 | spin_lock(&log->l_grant_lock); | 681 | spin_lock(&log->l_grant_lock); |
686 | 682 | ||
@@ -800,11 +796,9 @@ xlog_assign_tail_lsn(xfs_mount_t *mp) | |||
800 | 796 | ||
801 | tail_lsn = xfs_trans_ail_tail(mp->m_ail); | 797 | tail_lsn = xfs_trans_ail_tail(mp->m_ail); |
802 | spin_lock(&log->l_grant_lock); | 798 | spin_lock(&log->l_grant_lock); |
803 | if (tail_lsn != 0) { | 799 | if (!tail_lsn) |
804 | log->l_tail_lsn = tail_lsn; | 800 | tail_lsn = atomic64_read(&log->l_last_sync_lsn); |
805 | } else { | 801 | log->l_tail_lsn = tail_lsn; |
806 | tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn; | ||
807 | } | ||
808 | spin_unlock(&log->l_grant_lock); | 802 | spin_unlock(&log->l_grant_lock); |
809 | 803 | ||
810 | return tail_lsn; | 804 | return tail_lsn; |
@@ -1014,9 +1008,9 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1014 | log->l_flags |= XLOG_ACTIVE_RECOVERY; | 1008 | log->l_flags |= XLOG_ACTIVE_RECOVERY; |
1015 | 1009 | ||
1016 | log->l_prev_block = -1; | 1010 | log->l_prev_block = -1; |
1017 | log->l_tail_lsn = xlog_assign_lsn(1, 0); | ||
1018 | /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ | 1011 | /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ |
1019 | log->l_last_sync_lsn = log->l_tail_lsn; | 1012 | log->l_tail_lsn = xlog_assign_lsn(1, 0); |
1013 | atomic64_set(&log->l_last_sync_lsn, xlog_assign_lsn(1, 0)); | ||
1020 | log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ | 1014 | log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ |
1021 | xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0); | 1015 | xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0); |
1022 | xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); | 1016 | xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); |
@@ -1194,6 +1188,7 @@ xlog_grant_push_ail( | |||
1194 | int need_bytes) | 1188 | int need_bytes) |
1195 | { | 1189 | { |
1196 | xfs_lsn_t threshold_lsn = 0; | 1190 | xfs_lsn_t threshold_lsn = 0; |
1191 | xfs_lsn_t last_sync_lsn; | ||
1197 | xfs_lsn_t tail_lsn; | 1192 | xfs_lsn_t tail_lsn; |
1198 | int free_blocks; | 1193 | int free_blocks; |
1199 | int free_bytes; | 1194 | int free_bytes; |
@@ -1228,10 +1223,12 @@ xlog_grant_push_ail( | |||
1228 | threshold_block); | 1223 | threshold_block); |
1229 | /* | 1224 | /* |
1230 | * Don't pass in an lsn greater than the lsn of the last | 1225 | * Don't pass in an lsn greater than the lsn of the last |
1231 | * log record known to be on disk. | 1226 | * log record known to be on disk. Use a snapshot of the last sync lsn |
1227 | * so that it doesn't change between the compare and the set. | ||
1232 | */ | 1228 | */ |
1233 | if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0) | 1229 | last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); |
1234 | threshold_lsn = log->l_last_sync_lsn; | 1230 | if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) |
1231 | threshold_lsn = last_sync_lsn; | ||
1235 | 1232 | ||
1236 | /* | 1233 | /* |
1237 | * Get the transaction layer to kick the dirty buffers out to | 1234 | * Get the transaction layer to kick the dirty buffers out to |
@@ -2194,7 +2191,7 @@ xlog_state_do_callback( | |||
2194 | lowest_lsn = xlog_get_lowest_lsn(log); | 2191 | lowest_lsn = xlog_get_lowest_lsn(log); |
2195 | if (lowest_lsn && | 2192 | if (lowest_lsn && |
2196 | XFS_LSN_CMP(lowest_lsn, | 2193 | XFS_LSN_CMP(lowest_lsn, |
2197 | be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { | 2194 | be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { |
2198 | iclog = iclog->ic_next; | 2195 | iclog = iclog->ic_next; |
2199 | continue; /* Leave this iclog for | 2196 | continue; /* Leave this iclog for |
2200 | * another thread */ | 2197 | * another thread */ |
@@ -2202,23 +2199,21 @@ xlog_state_do_callback( | |||
2202 | 2199 | ||
2203 | iclog->ic_state = XLOG_STATE_CALLBACK; | 2200 | iclog->ic_state = XLOG_STATE_CALLBACK; |
2204 | 2201 | ||
2205 | spin_unlock(&log->l_icloglock); | ||
2206 | 2202 | ||
2207 | /* l_last_sync_lsn field protected by | 2203 | /* |
2208 | * l_grant_lock. Don't worry about iclog's lsn. | 2204 | * update the last_sync_lsn before we drop the |
2209 | * No one else can be here except us. | 2205 | * icloglock to ensure we are the only one that |
2206 | * can update it. | ||
2210 | */ | 2207 | */ |
2211 | spin_lock(&log->l_grant_lock); | 2208 | ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), |
2212 | ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn, | 2209 | be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); |
2213 | be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); | 2210 | atomic64_set(&log->l_last_sync_lsn, |
2214 | log->l_last_sync_lsn = | 2211 | be64_to_cpu(iclog->ic_header.h_lsn)); |
2215 | be64_to_cpu(iclog->ic_header.h_lsn); | ||
2216 | spin_unlock(&log->l_grant_lock); | ||
2217 | 2212 | ||
2218 | } else { | 2213 | } else |
2219 | spin_unlock(&log->l_icloglock); | ||
2220 | ioerrors++; | 2214 | ioerrors++; |
2221 | } | 2215 | |
2216 | spin_unlock(&log->l_icloglock); | ||
2222 | 2217 | ||
2223 | /* | 2218 | /* |
2224 | * Keep processing entries in the callback list until | 2219 | * Keep processing entries in the callback list until |