aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log_priv.h
diff options
context:
space:
mode:
authorBrian Foster <bfoster@redhat.com>2015-10-12 00:59:25 -0400
committerDave Chinner <david@fromorbit.com>2015-10-12 00:59:25 -0400
commita45086e27dfa21a4b39134f7505c8f60a3ecdec4 (patch)
treee55bfa2359246fe65e82da6caf3ccd74afc8c46f /fs/xfs/xfs_log_priv.h
parentb7cdc66be54b64daef593894d12ecc405f117829 (diff)
xfs: validate metadata LSNs against log on v5 superblocks
Since the onset of v5 superblocks, the LSN of the last modification has been included in a variety of on-disk data structures. This LSN is used to provide log recovery ordering guarantees (e.g., to ensure an older log recovery item is not replayed over a newer target data structure). While this works correctly from the point a filesystem is formatted and mounted, userspace tools have some problematic behaviors that defeat this mechanism. For example, xfs_repair historically zeroes out the log unconditionally (regardless of whether corruption is detected). If this occurs, the LSN of the filesystem is reset and the log is now in a problematic state with respect to on-disk metadata structures that might have a larger LSN. Until either the log catches up to the highest previously used metadata LSN or each affected data structure is modified and written out without incident (which resets the metadata LSN), log recovery is susceptible to filesystem corruption. This problem is ultimately addressed and repaired in the associated userspace tools. The kernel is still responsible to detect the problem and notify the user that something is wrong. Check the superblock LSN at mount time and fail the mount if it is invalid. From that point on, trigger verifier failure on any metadata I/O where an invalid LSN is detected. This results in a filesystem shutdown and guarantees that we do not log metadata changes with invalid LSNs on disk. Since this is a known issue with a known recovery path, present a warning to instruct the user how to recover. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs/xfs_log_priv.h')
-rw-r--r--fs/xfs/xfs_log_priv.h51
1 files changed, 51 insertions, 0 deletions
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 950f3f94720c..8daba7491b13 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -560,4 +560,55 @@ static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
560 remove_wait_queue(wq, &wait); 560 remove_wait_queue(wq, &wait);
561} 561}
562 562
563/*
564 * The LSN is valid so long as it is behind the current LSN. If it isn't, this
565 * means that the next log record that includes this metadata could have a
566 * smaller LSN. In turn, this means that the modification in the log would not
567 * replay.
568 */
569static inline bool
570xlog_valid_lsn(
571 struct xlog *log,
572 xfs_lsn_t lsn)
573{
574 int cur_cycle;
575 int cur_block;
576 bool valid = true;
577
578 /*
579 * First, sample the current lsn without locking to avoid added
580 * contention from metadata I/O. The current cycle and block are updated
581 * (in xlog_state_switch_iclogs()) and read here in a particular order
582 * to avoid false negatives (e.g., thinking the metadata LSN is valid
583 * when it is not).
584 *
585 * The current block is always rewound before the cycle is bumped in
586 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
587 * a transiently forward state. Instead, we can see the LSN in a
588 * transiently behind state if we happen to race with a cycle wrap.
589 */
590 cur_cycle = ACCESS_ONCE(log->l_curr_cycle);
591 smp_rmb();
592 cur_block = ACCESS_ONCE(log->l_curr_block);
593
594 if ((CYCLE_LSN(lsn) > cur_cycle) ||
595 (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
596 /*
597 * If the metadata LSN appears invalid, it's possible the check
598 * above raced with a wrap to the next log cycle. Grab the lock
599 * to check for sure.
600 */
601 spin_lock(&log->l_icloglock);
602 cur_cycle = log->l_curr_cycle;
603 cur_block = log->l_curr_block;
604 spin_unlock(&log->l_icloglock);
605
606 if ((CYCLE_LSN(lsn) > cur_cycle) ||
607 (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
608 valid = false;
609 }
610
611 return valid;
612}
613
563#endif /* __XFS_LOG_PRIV_H__ */ 614#endif /* __XFS_LOG_PRIV_H__ */