aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log_recover.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_log_recover.c')
-rw-r--r--fs/xfs/xfs_log_recover.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 18e1e18d714..204d8e5fa7f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -936,7 +936,7 @@ xlog_find_tail(
936 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); 936 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
937 if (found == 2) 937 if (found == 2)
938 log->l_curr_cycle++; 938 log->l_curr_cycle++;
939 log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); 939 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
940 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); 940 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
941 xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle, 941 xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle,
942 BBTOB(log->l_curr_block)); 942 BBTOB(log->l_curr_block));
@@ -971,7 +971,7 @@ xlog_find_tail(
971 } 971 }
972 after_umount_blk = (i + hblks + (int) 972 after_umount_blk = (i + hblks + (int)
973 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; 973 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
974 tail_lsn = log->l_tail_lsn; 974 tail_lsn = atomic64_read(&log->l_tail_lsn);
975 if (*head_blk == after_umount_blk && 975 if (*head_blk == after_umount_blk &&
976 be32_to_cpu(rhead->h_num_logops) == 1) { 976 be32_to_cpu(rhead->h_num_logops) == 1) {
977 umount_data_blk = (i + hblks) % log->l_logBBsize; 977 umount_data_blk = (i + hblks) % log->l_logBBsize;
@@ -986,12 +986,10 @@ xlog_find_tail(
986 * log records will point recovery to after the 986 * log records will point recovery to after the
987 * current unmount record. 987 * current unmount record.
988 */ 988 */
989 log->l_tail_lsn = 989 xlog_assign_atomic_lsn(&log->l_tail_lsn,
990 xlog_assign_lsn(log->l_curr_cycle, 990 log->l_curr_cycle, after_umount_blk);
991 after_umount_blk); 991 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
992 atomic64_set(&log->l_last_sync_lsn, 992 log->l_curr_cycle, after_umount_blk);
993 xlog_assign_lsn(log->l_curr_cycle,
994 after_umount_blk));
995 *tail_blk = after_umount_blk; 993 *tail_blk = after_umount_blk;
996 994
997 /* 995 /*