aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_trans_ail.c
diff options
context:
space:
mode:
authorBrian Foster <bfoster@redhat.com>2012-06-28 06:52:56 -0400
committerBen Myers <bpm@sgi.com>2012-07-29 17:27:57 -0400
commit8375f922aaa6e7a880022529202fb486315568c3 (patch)
tree5fd3385ae7860ef9dabc84619f64fb85878fa864 /fs/xfs/xfs_trans_ail.c
parent4f59af758f9092bc7b266ca919ce6067170e5172 (diff)
xfs: re-enable xfsaild idle mode and fix associated races
xfsaild idle mode logic currently leads to a couple hangs: 1.) If xfsaild is rescheduled in during an incremental scan (i.e., tout != 0) and the target has been updated since the previous run, we can hit the new target and go into idle mode with a still populated ail. 2.) A wake up is only issued when the target is pushed forward. The wake up can race with xfsaild if it is currently in the process of entering idle mode, causing future wake up events to be lost. These hangs have been reproduced and verified as fixed by running xfstests 273 in a loop on a slightly modified upstream kernel. The kernel is modified to re-enable idle mode as previously implemented (when count == 0) and with a revert of commit 670ce93f, which includes performance improvements that make this harder to reproduce. The solution, the algorithm for which has been outlined by Dave Chinner, is to modify xfsaild to enter idle mode only when the ail is empty and the push target has not been moved forward since the last push. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_trans_ail.c')
-rw-r--r--fs/xfs/xfs_trans_ail.c35
1 files changed, 32 insertions, 3 deletions
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 9c514483e599..6011ee661339 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -383,6 +383,12 @@ xfsaild_push(
383 } 383 }
384 384
385 spin_lock(&ailp->xa_lock); 385 spin_lock(&ailp->xa_lock);
386
387 /* barrier matches the xa_target update in xfs_ail_push() */
388 smp_rmb();
389 target = ailp->xa_target;
390 ailp->xa_target_prev = target;
391
386 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn); 392 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn);
387 if (!lip) { 393 if (!lip) {
388 /* 394 /*
@@ -397,7 +403,6 @@ xfsaild_push(
397 XFS_STATS_INC(xs_push_ail); 403 XFS_STATS_INC(xs_push_ail);
398 404
399 lsn = lip->li_lsn; 405 lsn = lip->li_lsn;
400 target = ailp->xa_target;
401 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { 406 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
402 int lock_result; 407 int lock_result;
403 408
@@ -527,8 +532,32 @@ xfsaild(
527 __set_current_state(TASK_KILLABLE); 532 __set_current_state(TASK_KILLABLE);
528 else 533 else
529 __set_current_state(TASK_INTERRUPTIBLE); 534 __set_current_state(TASK_INTERRUPTIBLE);
530 schedule_timeout(tout ? 535
531 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); 536 spin_lock(&ailp->xa_lock);
537
538 /*
539 * Idle if the AIL is empty and we are not racing with a target
540 * update. We check the AIL after we set the task to a sleep
541 * state to guarantee that we either catch an xa_target update
542 * or that a wake_up resets the state to TASK_RUNNING.
543 * Otherwise, we run the risk of sleeping indefinitely.
544 *
545 * The barrier matches the xa_target update in xfs_ail_push().
546 */
547 smp_rmb();
548 if (!xfs_ail_min(ailp) &&
549 ailp->xa_target == ailp->xa_target_prev) {
550 spin_unlock(&ailp->xa_lock);
551 schedule();
552 tout = 0;
553 continue;
554 }
555 spin_unlock(&ailp->xa_lock);
556
557 if (tout)
558 schedule_timeout(msecs_to_jiffies(tout));
559
560 __set_current_state(TASK_RUNNING);
532 561
533 try_to_freeze(); 562 try_to_freeze();
534 563