aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2014-02-27 00:40:42 -0500
committerDave Chinner <david@fromorbit.com>2014-02-27 00:40:42 -0500
commitf876e44603ad091c840a5fae5b0753bbb421c037 (patch)
treedb8d96b382301a8cfa4dd3bd0325f9b108eb7827 /fs/xfs
parent38dbfb59d1175ef458d006556061adeaa8751b72 (diff)
xfs: always do log forces via the workqueue
Log forces can occur deep in the call chain when we have relatively little stack free. Log forces can also happen at close to the call chain leaves (e.g. xfs_buf_lock()) and hence we can trigger IO from places where we really don't want to add more stack overhead. This stack overhead occurs because log forces do foreground CIL pushes (xlog_cil_push_foreground()) rather than waking the background push wq and waiting for the for the push to complete. This foreground push was done to avoid confusing the CFQ Io scheduler when fsync()s were issued, as it has trouble dealing with dependent IOs being issued from different process contexts. Avoiding blowing the stack is much more critical than performance optimisations for CFQ, especially as we've been recommending against the use of CFQ for XFS since 3.2 kernels were release because of it's problems with multi-threaded IO workloads. Hence convert xlog_cil_push_foreground() to move the push work to the CIL workqueue. We already do the waiting for the push to complete in xlog_cil_force_lsn(), so there's nothing else we need to modify to make this work. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_log_cil.c52
1 files changed, 39 insertions, 13 deletions
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index cdebd832c3db..1270ded7610d 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -488,13 +488,6 @@ xlog_cil_push(
488 cil->xc_ctx = new_ctx; 488 cil->xc_ctx = new_ctx;
489 489
490 /* 490 /*
491 * mirror the new sequence into the cil structure so that we can do
492 * unlocked checks against the current sequence in log forces without
493 * risking deferencing a freed context pointer.
494 */
495 cil->xc_current_sequence = new_ctx->sequence;
496
497 /*
498 * The switch is now done, so we can drop the context lock and move out 491 * The switch is now done, so we can drop the context lock and move out
499 * of a shared context. We can't just go straight to the commit record, 492 * of a shared context. We can't just go straight to the commit record,
500 * though - we need to synchronise with previous and future commits so 493 * though - we need to synchronise with previous and future commits so
@@ -512,8 +505,15 @@ xlog_cil_push(
512 * Hence we need to add this context to the committing context list so 505 * Hence we need to add this context to the committing context list so
513 * that higher sequences will wait for us to write out a commit record 506 * that higher sequences will wait for us to write out a commit record
514 * before they do. 507 * before they do.
508 *
509 * xfs_log_force_lsn requires us to mirror the new sequence into the cil
510 * structure atomically with the addition of this sequence to the
511 * committing list. This also ensures that we can do unlocked checks
512 * against the current sequence in log forces without risking
513 * deferencing a freed context pointer.
515 */ 514 */
516 spin_lock(&cil->xc_push_lock); 515 spin_lock(&cil->xc_push_lock);
516 cil->xc_current_sequence = new_ctx->sequence;
517 list_add(&ctx->committing, &cil->xc_committing); 517 list_add(&ctx->committing, &cil->xc_committing);
518 spin_unlock(&cil->xc_push_lock); 518 spin_unlock(&cil->xc_push_lock);
519 up_write(&cil->xc_ctx_lock); 519 up_write(&cil->xc_ctx_lock);
@@ -651,8 +651,14 @@ xlog_cil_push_background(
651 651
652} 652}
653 653
654/*
655 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
656 * number that is passed. When it returns, the work will be queued for
657 * @push_seq, but it won't be completed. The caller is expected to do any
658 * waiting for push_seq to complete if it is required.
659 */
654static void 660static void
655xlog_cil_push_foreground( 661xlog_cil_push_now(
656 struct xlog *log, 662 struct xlog *log,
657 xfs_lsn_t push_seq) 663 xfs_lsn_t push_seq)
658{ 664{
@@ -677,10 +683,8 @@ xlog_cil_push_foreground(
677 } 683 }
678 684
679 cil->xc_push_seq = push_seq; 685 cil->xc_push_seq = push_seq;
686 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
680 spin_unlock(&cil->xc_push_lock); 687 spin_unlock(&cil->xc_push_lock);
681
682 /* do the push now */
683 xlog_cil_push(log);
684} 688}
685 689
686bool 690bool
@@ -785,7 +789,8 @@ xlog_cil_force_lsn(
785 * xlog_cil_push() handles racing pushes for the same sequence, 789 * xlog_cil_push() handles racing pushes for the same sequence,
786 * so no need to deal with it here. 790 * so no need to deal with it here.
787 */ 791 */
788 xlog_cil_push_foreground(log, sequence); 792restart:
793 xlog_cil_push_now(log, sequence);
789 794
790 /* 795 /*
791 * See if we can find a previous sequence still committing. 796 * See if we can find a previous sequence still committing.
@@ -793,7 +798,6 @@ xlog_cil_force_lsn(
793 * before allowing the force of push_seq to go ahead. Hence block 798 * before allowing the force of push_seq to go ahead. Hence block
794 * on commits for those as well. 799 * on commits for those as well.
795 */ 800 */
796restart:
797 spin_lock(&cil->xc_push_lock); 801 spin_lock(&cil->xc_push_lock);
798 list_for_each_entry(ctx, &cil->xc_committing, committing) { 802 list_for_each_entry(ctx, &cil->xc_committing, committing) {
799 if (ctx->sequence > sequence) 803 if (ctx->sequence > sequence)
@@ -811,6 +815,28 @@ restart:
811 /* found it! */ 815 /* found it! */
812 commit_lsn = ctx->commit_lsn; 816 commit_lsn = ctx->commit_lsn;
813 } 817 }
818
819 /*
820 * The call to xlog_cil_push_now() executes the push in the background.
821 * Hence by the time we have got here it our sequence may not have been
822 * pushed yet. This is true if the current sequence still matches the
823 * push sequence after the above wait loop and the CIL still contains
824 * dirty objects.
825 *
826 * When the push occurs, it will empty the CIL and
827 * atomically increment the currect sequence past the push sequence and
828 * move it into the committing list. Of course, if the CIL is clean at
829 * the time of the push, it won't have pushed the CIL at all, so in that
830 * case we should try the push for this sequence again from the start
831 * just in case.
832 */
833
834 if (sequence == cil->xc_current_sequence &&
835 !list_empty(&cil->xc_cil)) {
836 spin_unlock(&cil->xc_push_lock);
837 goto restart;
838 }
839
814 spin_unlock(&cil->xc_push_lock); 840 spin_unlock(&cil->xc_push_lock);
815 return commit_lsn; 841 return commit_lsn;
816} 842}