aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log_recover.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-12-20 20:28:39 -0500
committerDave Chinner <david@fromorbit.com>2010-12-20 20:28:39 -0500
commit1c3cb9ec07fabf0c0970adc46fd2a1f09c1186dd (patch)
treeb8b6bf968f663723b06aa68ec499e49f3cdfbad0 /fs/xfs/xfs_log_recover.c
parent84f3c683c4d3f36d3c3ed320babd960a332ac458 (diff)
xfs: convert l_tail_lsn to an atomic variable.
log->l_tail_lsn is currently protected by the log grant lock. The lock is only needed for serialising readers against writers, so we don't really need the lock if we make the l_tail_lsn variable an atomic. Converting the l_tail_lsn variable to an atomic64_t means we can start to peel back the grant lock from various operations. Also, provide functions to safely crack an atomic LSN variable into it's component pieces and to recombined the components into an atomic variable. Use them where appropriate. This also removes the need for explicitly holding a spinlock to read the l_tail_lsn on 32 bit platforms. Signed-off-by: Dave Chinner <dchinner@redhat.com>
Diffstat (limited to 'fs/xfs/xfs_log_recover.c')
-rw-r--r--fs/xfs/xfs_log_recover.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 18e1e18d714..204d8e5fa7f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -936,7 +936,7 @@ xlog_find_tail(
936 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); 936 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
937 if (found == 2) 937 if (found == 2)
938 log->l_curr_cycle++; 938 log->l_curr_cycle++;
939 log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); 939 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
940 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); 940 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
941 xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle, 941 xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle,
942 BBTOB(log->l_curr_block)); 942 BBTOB(log->l_curr_block));
@@ -971,7 +971,7 @@ xlog_find_tail(
971 } 971 }
972 after_umount_blk = (i + hblks + (int) 972 after_umount_blk = (i + hblks + (int)
973 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; 973 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
974 tail_lsn = log->l_tail_lsn; 974 tail_lsn = atomic64_read(&log->l_tail_lsn);
975 if (*head_blk == after_umount_blk && 975 if (*head_blk == after_umount_blk &&
976 be32_to_cpu(rhead->h_num_logops) == 1) { 976 be32_to_cpu(rhead->h_num_logops) == 1) {
977 umount_data_blk = (i + hblks) % log->l_logBBsize; 977 umount_data_blk = (i + hblks) % log->l_logBBsize;
@@ -986,12 +986,10 @@ xlog_find_tail(
986 * log records will point recovery to after the 986 * log records will point recovery to after the
987 * current unmount record. 987 * current unmount record.
988 */ 988 */
989 log->l_tail_lsn = 989 xlog_assign_atomic_lsn(&log->l_tail_lsn,
990 xlog_assign_lsn(log->l_curr_cycle, 990 log->l_curr_cycle, after_umount_blk);
991 after_umount_blk); 991 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
992 atomic64_set(&log->l_last_sync_lsn, 992 log->l_curr_cycle, after_umount_blk);
993 xlog_assign_lsn(log->l_curr_cycle,
994 after_umount_blk));
995 *tail_blk = after_umount_blk; 993 *tail_blk = after_umount_blk;
996 994
997 /* 995 /*