aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_trans_ail.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-02-19 21:31:20 -0500
committerBen Myers <bpm@sgi.com>2012-02-22 23:17:00 -0500
commit09a423a3d6c70905f1090f01aadb8e6abff527ce (patch)
tree7bbe94301c348add4bb9e11b253b3f9617ef3573 /fs/xfs/xfs_trans_ail.c
parent70b5437653d9c6c8de287affd38836cce98ebde5 (diff)
xfs: split tail_lsn assignments from log space wakeups
Currently xfs_log_move_tail has a tail_lsn argument that is horribly overloaded: it may contain either an actual lsn to assign to the log tail, 0 as a special case to use the last sync LSN, or 1 to indicate that no tail LSN assignment should be performed, and we should opportunisticly wake up at one task waiting for log space even if we did not move the LSN. Remove the tail lsn assigned from xfs_log_move_tail and make the two callers use xlog_assign_tail_lsn instead of the current variant of partially using the code in xfs_log_move_tail and partially opencoding it. Note that means we grow an addition lock roundtrip on the AIL lock for each bulk update or delete, which is still far less than what we had before introducing the bulk operations. If this proves to be a problem we can still add a variant of xlog_assign_tail_lsn that expects the lock to be held already. Also rename the remainder of xfs_log_move_tail to xfs_log_space_wake as that name describes its functionality much better. Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_trans_ail.c')
-rw-r--r--fs/xfs/xfs_trans_ail.c45
1 files changed, 11 insertions, 34 deletions
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index ed9252bcdac9..c9234956bcb2 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -643,15 +643,15 @@ xfs_trans_unlocked_item(
643 * at the tail, it doesn't matter what result we get back. This 643 * at the tail, it doesn't matter what result we get back. This
644 * is slightly racy because since we were just unlocked, we could 644 * is slightly racy because since we were just unlocked, we could
645 * go to sleep between the call to xfs_ail_min and the call to 645 * go to sleep between the call to xfs_ail_min and the call to
646 * xfs_log_move_tail, have someone else lock us, commit to us disk, 646 * xfs_log_space_wake, have someone else lock us, commit to us disk,
647 * move us out of the tail of the AIL, and then we wake up. However, 647 * move us out of the tail of the AIL, and then we wake up. However,
648 * the call to xfs_log_move_tail() doesn't do anything if there's 648 * the call to xfs_log_space_wake() doesn't do anything if there's
649 * not enough free space to wake people up so we're safe calling it. 649 * not enough free space to wake people up so we're safe calling it.
650 */ 650 */
651 min_lip = xfs_ail_min(ailp); 651 min_lip = xfs_ail_min(ailp);
652 652
653 if (min_lip == lip) 653 if (min_lip == lip)
654 xfs_log_move_tail(ailp->xa_mount, 1); 654 xfs_log_space_wake(ailp->xa_mount, true);
655} /* xfs_trans_unlocked_item */ 655} /* xfs_trans_unlocked_item */
656 656
657/* 657/*
@@ -685,7 +685,6 @@ xfs_trans_ail_update_bulk(
685 xfs_lsn_t lsn) __releases(ailp->xa_lock) 685 xfs_lsn_t lsn) __releases(ailp->xa_lock)
686{ 686{
687 xfs_log_item_t *mlip; 687 xfs_log_item_t *mlip;
688 xfs_lsn_t tail_lsn;
689 int mlip_changed = 0; 688 int mlip_changed = 0;
690 int i; 689 int i;
691 LIST_HEAD(tmp); 690 LIST_HEAD(tmp);
@@ -712,22 +711,12 @@ xfs_trans_ail_update_bulk(
712 711
713 if (!list_empty(&tmp)) 712 if (!list_empty(&tmp))
714 xfs_ail_splice(ailp, cur, &tmp, lsn); 713 xfs_ail_splice(ailp, cur, &tmp, lsn);
714 spin_unlock(&ailp->xa_lock);
715 715
716 if (!mlip_changed) { 716 if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
717 spin_unlock(&ailp->xa_lock); 717 xlog_assign_tail_lsn(ailp->xa_mount);
718 return; 718 xfs_log_space_wake(ailp->xa_mount, false);
719 } 719 }
720
721 /*
722 * It is not safe to access mlip after the AIL lock is dropped, so we
723 * must get a copy of li_lsn before we do so. This is especially
724 * important on 32-bit platforms where accessing and updating 64-bit
725 * values like li_lsn is not atomic.
726 */
727 mlip = xfs_ail_min(ailp);
728 tail_lsn = mlip->li_lsn;
729 spin_unlock(&ailp->xa_lock);
730 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
731} 720}
732 721
733/* 722/*
@@ -758,7 +747,6 @@ xfs_trans_ail_delete_bulk(
758 int nr_items) __releases(ailp->xa_lock) 747 int nr_items) __releases(ailp->xa_lock)
759{ 748{
760 xfs_log_item_t *mlip; 749 xfs_log_item_t *mlip;
761 xfs_lsn_t tail_lsn;
762 int mlip_changed = 0; 750 int mlip_changed = 0;
763 int i; 751 int i;
764 752
@@ -785,23 +773,12 @@ xfs_trans_ail_delete_bulk(
785 if (mlip == lip) 773 if (mlip == lip)
786 mlip_changed = 1; 774 mlip_changed = 1;
787 } 775 }
776 spin_unlock(&ailp->xa_lock);
788 777
789 if (!mlip_changed) { 778 if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
790 spin_unlock(&ailp->xa_lock); 779 xlog_assign_tail_lsn(ailp->xa_mount);
791 return; 780 xfs_log_space_wake(ailp->xa_mount, false);
792 } 781 }
793
794 /*
795 * It is not safe to access mlip after the AIL lock is dropped, so we
796 * must get a copy of li_lsn before we do so. This is especially
797 * important on 32-bit platforms where accessing and updating 64-bit
798 * values like li_lsn is not atomic. It is possible we've emptied the
799 * AIL here, so if that is the case, pass an LSN of 0 to the tail move.
800 */
801 mlip = xfs_ail_min(ailp);
802 tail_lsn = mlip ? mlip->li_lsn : 0;
803 spin_unlock(&ailp->xa_lock);
804 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
805} 782}
806 783
807/* 784/*