aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorBrian Foster <bfoster@redhat.com>2016-03-06 16:22:22 -0500
committerDave Chinner <david@fromorbit.com>2016-03-06 16:22:22 -0500
commit717bc0ebca0bce9cb3edfc31b49b384a1d55db1c (patch)
treef6d78b56611b3bf7265abb973963ec3627b18c02 /fs
parent65b99a08b350876e8835fc0e7173598165f64dee (diff)
xfs: refactor in-core log state update to helper
Once the record at the head of the log is identified and verified, the in-core log state is updated based on the record. This includes information such as the current head block and cycle, the start block of the last record written to the log, the tail lsn, etc. Once torn write detection is conditional, this logic will need to be reused. Factor the code to update the in-core log data structures into a new helper function. This patch does not change behavior. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/xfs_log_recover.c52
1 files changed, 33 insertions, 19 deletions
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 1aae75608453..9ac8aa8dc38c 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1276,6 +1276,37 @@ xlog_check_unmount_rec(
1276 return 0; 1276 return 0;
1277} 1277}
1278 1278
1279static void
1280xlog_set_state(
1281 struct xlog *log,
1282 xfs_daddr_t head_blk,
1283 struct xlog_rec_header *rhead,
1284 xfs_daddr_t rhead_blk,
1285 bool bump_cycle)
1286{
1287 /*
1288 * Reset log values according to the state of the log when we
1289 * crashed. In the case where head_blk == 0, we bump curr_cycle
1290 * one because the next write starts a new cycle rather than
1291 * continuing the cycle of the last good log record. At this
1292 * point we have guaranteed that all partial log records have been
1293 * accounted for. Therefore, we know that the last good log record
1294 * written was complete and ended exactly on the end boundary
1295 * of the physical log.
1296 */
1297 log->l_prev_block = rhead_blk;
1298 log->l_curr_block = (int)head_blk;
1299 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1300 if (bump_cycle)
1301 log->l_curr_cycle++;
1302 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1303 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1304 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1305 BBTOB(log->l_curr_block));
1306 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1307 BBTOB(log->l_curr_block));
1308}
1309
1279/* 1310/*
1280 * Find the sync block number or the tail of the log. 1311 * Find the sync block number or the tail of the log.
1281 * 1312 *
@@ -1356,26 +1387,9 @@ xlog_find_tail(
1356 goto done; 1387 goto done;
1357 1388
1358 /* 1389 /*
1359 * Reset log values according to the state of the log when we 1390 * Set the log state based on the current head record.
1360 * crashed. In the case where head_blk == 0, we bump curr_cycle
1361 * one because the next write starts a new cycle rather than
1362 * continuing the cycle of the last good log record. At this
1363 * point we have guaranteed that all partial log records have been
1364 * accounted for. Therefore, we know that the last good log record
1365 * written was complete and ended exactly on the end boundary
1366 * of the physical log.
1367 */ 1391 */
1368 log->l_prev_block = rhead_blk; 1392 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1369 log->l_curr_block = (int)*head_blk;
1370 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1371 if (wrapped)
1372 log->l_curr_cycle++;
1373 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1374 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1375 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1376 BBTOB(log->l_curr_block));
1377 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1378 BBTOB(log->l_curr_block));
1379 tail_lsn = atomic64_read(&log->l_tail_lsn); 1393 tail_lsn = atomic64_read(&log->l_tail_lsn);
1380 1394
1381 /* 1395 /*