diff options
author | Dave Chinner <dchinner@redhat.com> | 2010-12-03 06:11:29 -0500 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2010-12-03 06:11:29 -0500 |
commit | 84f3c683c4d3f36d3c3ed320babd960a332ac458 (patch) | |
tree | 5884f4e5a04a7a67c634e003bfa590fd2e643fdc /fs/xfs/xfs_log_recover.c | |
parent | 2ced19cbae5448b720919a494606c62095d4f4db (diff) |
xfs: convert l_last_sync_lsn to an atomic variable
log->l_last_sync_lsn is updated in only one critical spot - log
buffer Io completion - and is protected by the grant lock here. This
requires the grant lock to be taken for every log buffer IO
completion. Converting the l_last_sync_lsn variable to an atomic64_t
means that we do not need to take the grant lock in log buffer IO
completion to update it.
This also removes the need for explicitly holding a spinlock to read
the l_last_sync_lsn on 32 bit platforms.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/xfs/xfs_log_recover.c')
-rw-r--r-- | fs/xfs/xfs_log_recover.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 1550404a8aeb..18e1e18d7147 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -937,7 +937,7 @@ xlog_find_tail( | |||
937 | if (found == 2) | 937 | if (found == 2) |
938 | log->l_curr_cycle++; | 938 | log->l_curr_cycle++; |
939 | log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); | 939 | log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); |
940 | log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn); | 940 | atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); |
941 | xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle, | 941 | xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle, |
942 | BBTOB(log->l_curr_block)); | 942 | BBTOB(log->l_curr_block)); |
943 | xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle, | 943 | xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle, |
@@ -989,9 +989,9 @@ xlog_find_tail( | |||
989 | log->l_tail_lsn = | 989 | log->l_tail_lsn = |
990 | xlog_assign_lsn(log->l_curr_cycle, | 990 | xlog_assign_lsn(log->l_curr_cycle, |
991 | after_umount_blk); | 991 | after_umount_blk); |
992 | log->l_last_sync_lsn = | 992 | atomic64_set(&log->l_last_sync_lsn, |
993 | xlog_assign_lsn(log->l_curr_cycle, | 993 | xlog_assign_lsn(log->l_curr_cycle, |
994 | after_umount_blk); | 994 | after_umount_blk)); |
995 | *tail_blk = after_umount_blk; | 995 | *tail_blk = after_umount_blk; |
996 | 996 | ||
997 | /* | 997 | /* |