aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-10-14 18:17:49 -0400
committerBen Myers <bpm@sgi.com>2013-10-17 11:56:17 -0400
commit2c6e24ce1aa6b3b147c75d488c2797ee258eb22b (patch)
tree5aa3cc69daf8a5562b693b8094d835e7cb507fd1 /fs/xfs/xfs_log.c
parent74564fb48cbfcb5b433c1baec1f3158ea638b203 (diff)
xfs: prevent deadlock trying to cover an active log
Recent analysis of a deadlocked XFS filesystem from a kernel crash dump indicated that the filesystem was stuck waiting for log space. The short story of the hang on the RHEL6 kernel is this: - the tail of the log is pinned by an inode - the inode has been pushed by the xfsaild - the inode has been flushed to it's backing buffer and is currently flush locked and hence waiting for backing buffer IO to complete and remove it from the AIL - the backing buffer is marked for write - it is on the delayed write queue - the inode buffer has been modified directly and logged recently due to unlinked inode list modification - the backing buffer is pinned in memory as it is in the active CIL context. - the xfsbufd won't start buffer writeback because it is pinned - xfssyncd won't force the log because it sees the log as needing to be covered and hence wants to issue a dummy transaction to move the log covering state machine along. Hence there is no trigger to force the CIL to the log and hence unpin the inode buffer and therefore complete the inode IO, remove it from the AIL and hence move the tail of the log along, allowing transactions to start again. Mainline kernels also have the same deadlock, though the signature is slightly different - the inode buffer never reaches the delayed write lists because xfs_buf_item_push() sees that it is pinned and hence never adds it to the delayed write list that the xfsaild flushes. There are two possible solutions here. The first is to simply force the log before trying to cover the log and so ensure that the CIL is emptied before we try to reserve space for the dummy transaction in the xfs_log_worker(). While this might work most of the time, it is still racy and is no guarantee that we don't get stuck in xfs_trans_reserve waiting for log space to come free. Hence it's not the best way to solve the problem. The second solution is to modify xfs_log_need_covered() to be aware of the CIL. We only should be attempting to cover the log if there is no current activity in the log - covering the log is the process of ensuring that the head and tail in the log on disk are identical (i.e. the log is clean and at idle). Hence, by definition, if there are items in the CIL then the log is not at idle and so we don't need to attempt to cover it. When we don't need to cover the log because it is active or idle, we issue a log force from xfs_log_worker() - if the log is idle, then this does nothing. However, if the log is active due to there being items in the CIL, it will force the items in the CIL to the log and unpin them. In the case of the above deadlock scenario, instead of xfs_log_worker() getting stuck in xfs_trans_reserve() attempting to cover the log, it will instead force the log, thereby unpinning the inode buffer, allowing IO to be issued and complete and hence removing the inode that was pinning the tail of the log from the AIL. At that point, everything will start moving along again. i.e. the xfs_log_worker turns back into a watchdog that can alleviate deadlocks based around pinned items that prevent the tail of the log from being moved... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Eric Sandeen <sandeen@redhat.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r--fs/xfs/xfs_log.c48
1 files changed, 29 insertions, 19 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a2dea108071a..613ed9414e70 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1000,27 +1000,34 @@ xfs_log_space_wake(
1000} 1000}
1001 1001
1002/* 1002/*
1003 * Determine if we have a transaction that has gone to disk 1003 * Determine if we have a transaction that has gone to disk that needs to be
1004 * that needs to be covered. To begin the transition to the idle state 1004 * covered. To begin the transition to the idle state firstly the log needs to
1005 * firstly the log needs to be idle (no AIL and nothing in the iclogs). 1005 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
1006 * If we are then in a state where covering is needed, the caller is informed 1006 * we start attempting to cover the log.
1007 * that dummy transactions are required to move the log into the idle state.
1008 * 1007 *
1009 * Because this is called as part of the sync process, we should also indicate 1008 * Only if we are then in a state where covering is needed, the caller is
1010 * that dummy transactions should be issued in anything but the covered or 1009 * informed that dummy transactions are required to move the log into the idle
1011 * idle states. This ensures that the log tail is accurately reflected in 1010 * state.
1012 * the log at the end of the sync, hence if a crash occurrs avoids replay 1011 *
1013 * of transactions where the metadata is already on disk. 1012 * If there are any items in the AIl or CIL, then we do not want to attempt to
1013 * cover the log as we may be in a situation where there isn't log space
1014 * available to run a dummy transaction and this can lead to deadlocks when the
1015 * tail of the log is pinned by an item that is modified in the CIL. Hence
1016 * there's no point in running a dummy transaction at this point because we
1017 * can't start trying to idle the log until both the CIL and AIL are empty.
1014 */ 1018 */
1015int 1019int
1016xfs_log_need_covered(xfs_mount_t *mp) 1020xfs_log_need_covered(xfs_mount_t *mp)
1017{ 1021{
1018 int needed = 0;
1019 struct xlog *log = mp->m_log; 1022 struct xlog *log = mp->m_log;
1023 int needed = 0;
1020 1024
1021 if (!xfs_fs_writable(mp)) 1025 if (!xfs_fs_writable(mp))
1022 return 0; 1026 return 0;
1023 1027
1028 if (!xlog_cil_empty(log))
1029 return 0;
1030
1024 spin_lock(&log->l_icloglock); 1031 spin_lock(&log->l_icloglock);
1025 switch (log->l_covered_state) { 1032 switch (log->l_covered_state) {
1026 case XLOG_STATE_COVER_DONE: 1033 case XLOG_STATE_COVER_DONE:
@@ -1029,14 +1036,17 @@ xfs_log_need_covered(xfs_mount_t *mp)
1029 break; 1036 break;
1030 case XLOG_STATE_COVER_NEED: 1037 case XLOG_STATE_COVER_NEED:
1031 case XLOG_STATE_COVER_NEED2: 1038 case XLOG_STATE_COVER_NEED2:
1032 if (!xfs_ail_min_lsn(log->l_ailp) && 1039 if (xfs_ail_min_lsn(log->l_ailp))
1033 xlog_iclogs_empty(log)) { 1040 break;
1034 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 1041 if (!xlog_iclogs_empty(log))
1035 log->l_covered_state = XLOG_STATE_COVER_DONE; 1042 break;
1036 else 1043
1037 log->l_covered_state = XLOG_STATE_COVER_DONE2; 1044 needed = 1;
1038 } 1045 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1039 /* FALLTHRU */ 1046 log->l_covered_state = XLOG_STATE_COVER_DONE;
1047 else
1048 log->l_covered_state = XLOG_STATE_COVER_DONE2;
1049 break;
1040 default: 1050 default:
1041 needed = 1; 1051 needed = 1;
1042 break; 1052 break;