aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-02-19 21:31:20 -0500
committerBen Myers <bpm@sgi.com>2012-02-22 23:17:00 -0500
commit09a423a3d6c70905f1090f01aadb8e6abff527ce (patch)
tree7bbe94301c348add4bb9e11b253b3f9617ef3573 /fs
parent70b5437653d9c6c8de287affd38836cce98ebde5 (diff)
xfs: split tail_lsn assignments from log space wakeups
Currently xfs_log_move_tail has a tail_lsn argument that is horribly overloaded: it may contain either an actual lsn to assign to the log tail, 0 as a special case to use the last sync LSN, or 1 to indicate that no tail LSN assignment should be performed, and we should opportunisticly wake up at one task waiting for log space even if we did not move the LSN. Remove the tail lsn assigned from xfs_log_move_tail and make the two callers use xlog_assign_tail_lsn instead of the current variant of partially using the code in xfs_log_move_tail and partially opencoding it. Note that means we grow an addition lock roundtrip on the AIL lock for each bulk update or delete, which is still far less than what we had before introducing the bulk operations. If this proves to be a problem we can still add a variant of xlog_assign_tail_lsn that expects the lock to be held already. Also rename the remainder of xfs_log_move_tail to xfs_log_space_wake as that name describes its functionality much better. Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/xfs_log.c74
-rw-r--r--fs/xfs/xfs_log.h5
-rw-r--r--fs/xfs/xfs_log_priv.h1
-rw-r--r--fs/xfs/xfs_trans_ail.c45
4 files changed, 45 insertions, 80 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e2cc3568c299..372642d39872 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -760,37 +760,35 @@ xfs_log_item_init(
760 INIT_LIST_HEAD(&item->li_cil); 760 INIT_LIST_HEAD(&item->li_cil);
761} 761}
762 762
763/*
764 * Wake up processes waiting for log space after we have moved the log tail.
765 *
766 * If opportunistic is set wake up one waiter even if we do not have enough
767 * free space by our strict accounting.
768 */
763void 769void
764xfs_log_move_tail(xfs_mount_t *mp, 770xfs_log_space_wake(
765 xfs_lsn_t tail_lsn) 771 struct xfs_mount *mp,
772 bool opportunistic)
766{ 773{
767 xlog_ticket_t *tic; 774 struct xlog_ticket *tic;
768 xlog_t *log = mp->m_log; 775 struct log *log = mp->m_log;
769 int need_bytes, free_bytes; 776 int need_bytes, free_bytes;
770 777
771 if (XLOG_FORCED_SHUTDOWN(log)) 778 if (XLOG_FORCED_SHUTDOWN(log))
772 return; 779 return;
773 780
774 if (tail_lsn == 0)
775 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
776
777 /* tail_lsn == 1 implies that we weren't passed a valid value. */
778 if (tail_lsn != 1)
779 atomic64_set(&log->l_tail_lsn, tail_lsn);
780
781 if (!list_empty_careful(&log->l_writeq)) { 781 if (!list_empty_careful(&log->l_writeq)) {
782#ifdef DEBUG 782 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
783 if (log->l_flags & XLOG_ACTIVE_RECOVERY) 783
784 panic("Recovery problem");
785#endif
786 spin_lock(&log->l_grant_write_lock); 784 spin_lock(&log->l_grant_write_lock);
787 free_bytes = xlog_space_left(log, &log->l_grant_write_head); 785 free_bytes = xlog_space_left(log, &log->l_grant_write_head);
788 list_for_each_entry(tic, &log->l_writeq, t_queue) { 786 list_for_each_entry(tic, &log->l_writeq, t_queue) {
789 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); 787 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
790 788
791 if (free_bytes < tic->t_unit_res && tail_lsn != 1) 789 if (free_bytes < tic->t_unit_res && !opportunistic)
792 break; 790 break;
793 tail_lsn = 0; 791 opportunistic = false;
794 free_bytes -= tic->t_unit_res; 792 free_bytes -= tic->t_unit_res;
795 trace_xfs_log_regrant_write_wake_up(log, tic); 793 trace_xfs_log_regrant_write_wake_up(log, tic);
796 wake_up(&tic->t_wait); 794 wake_up(&tic->t_wait);
@@ -799,10 +797,8 @@ xfs_log_move_tail(xfs_mount_t *mp,
799 } 797 }
800 798
801 if (!list_empty_careful(&log->l_reserveq)) { 799 if (!list_empty_careful(&log->l_reserveq)) {
802#ifdef DEBUG 800 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
803 if (log->l_flags & XLOG_ACTIVE_RECOVERY) 801
804 panic("Recovery problem");
805#endif
806 spin_lock(&log->l_grant_reserve_lock); 802 spin_lock(&log->l_grant_reserve_lock);
807 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); 803 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
808 list_for_each_entry(tic, &log->l_reserveq, t_queue) { 804 list_for_each_entry(tic, &log->l_reserveq, t_queue) {
@@ -810,9 +806,9 @@ xfs_log_move_tail(xfs_mount_t *mp,
810 need_bytes = tic->t_unit_res*tic->t_cnt; 806 need_bytes = tic->t_unit_res*tic->t_cnt;
811 else 807 else
812 need_bytes = tic->t_unit_res; 808 need_bytes = tic->t_unit_res;
813 if (free_bytes < need_bytes && tail_lsn != 1) 809 if (free_bytes < need_bytes && !opportunistic)
814 break; 810 break;
815 tail_lsn = 0; 811 opportunistic = false;
816 free_bytes -= need_bytes; 812 free_bytes -= need_bytes;
817 trace_xfs_log_grant_wake_up(log, tic); 813 trace_xfs_log_grant_wake_up(log, tic);
818 wake_up(&tic->t_wait); 814 wake_up(&tic->t_wait);
@@ -867,21 +863,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
867 return needed; 863 return needed;
868} 864}
869 865
870/****************************************************************************** 866/*
871 *
872 * local routines
873 *
874 ******************************************************************************
875 */
876
877/* xfs_trans_tail_ail returns 0 when there is nothing in the list.
878 * The log manager must keep track of the last LR which was committed
879 * to disk. The lsn of this LR will become the new tail_lsn whenever
880 * xfs_trans_tail_ail returns 0. If we don't do this, we run into
881 * the situation where stuff could be written into the log but nothing
882 * was ever in the AIL when asked. Eventually, we panic since the
883 * tail hits the head.
884 *
885 * We may be holding the log iclog lock upon entering this routine. 867 * We may be holding the log iclog lock upon entering this routine.
886 */ 868 */
887xfs_lsn_t 869xfs_lsn_t
@@ -891,10 +873,17 @@ xlog_assign_tail_lsn(
891 xfs_lsn_t tail_lsn; 873 xfs_lsn_t tail_lsn;
892 struct log *log = mp->m_log; 874 struct log *log = mp->m_log;
893 875
876 /*
877 * To make sure we always have a valid LSN for the log tail we keep
878 * track of the last LSN which was committed in log->l_last_sync_lsn,
879 * and use that when the AIL was empty and xfs_ail_min_lsn returns 0.
880 *
881 * If the AIL has been emptied we also need to wake any process
882 * waiting for this condition.
883 */
894 tail_lsn = xfs_ail_min_lsn(mp->m_ail); 884 tail_lsn = xfs_ail_min_lsn(mp->m_ail);
895 if (!tail_lsn) 885 if (!tail_lsn)
896 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 886 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
897
898 atomic64_set(&log->l_tail_lsn, tail_lsn); 887 atomic64_set(&log->l_tail_lsn, tail_lsn);
899 return tail_lsn; 888 return tail_lsn;
900} 889}
@@ -2759,9 +2748,8 @@ xlog_ungrant_log_space(xlog_t *log,
2759 2748
2760 trace_xfs_log_ungrant_exit(log, ticket); 2749 trace_xfs_log_ungrant_exit(log, ticket);
2761 2750
2762 xfs_log_move_tail(log->l_mp, 1); 2751 xfs_log_space_wake(log->l_mp, true);
2763} /* xlog_ungrant_log_space */ 2752}
2764
2765 2753
2766/* 2754/*
2767 * Flush iclog to disk if this is the last reference to the given iclog and 2755 * Flush iclog to disk if this is the last reference to the given iclog and
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 2aee3b22d29c..58d858074e6b 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -160,8 +160,9 @@ int xfs_log_mount(struct xfs_mount *mp,
160 xfs_daddr_t start_block, 160 xfs_daddr_t start_block,
161 int num_bblocks); 161 int num_bblocks);
162int xfs_log_mount_finish(struct xfs_mount *mp); 162int xfs_log_mount_finish(struct xfs_mount *mp);
163void xfs_log_move_tail(struct xfs_mount *mp, 163xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
164 xfs_lsn_t tail_lsn); 164void xfs_log_space_wake(struct xfs_mount *mp,
165 bool opportunistic);
165int xfs_log_notify(struct xfs_mount *mp, 166int xfs_log_notify(struct xfs_mount *mp,
166 struct xlog_in_core *iclog, 167 struct xlog_in_core *iclog,
167 xfs_log_callback_t *callback_entry); 168 xfs_log_callback_t *callback_entry);
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 2d3b6a498d63..785905e3cf03 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -545,7 +545,6 @@ typedef struct log {
545#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) 545#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
546 546
547/* common routines */ 547/* common routines */
548extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
549extern int xlog_recover(xlog_t *log); 548extern int xlog_recover(xlog_t *log);
550extern int xlog_recover_finish(xlog_t *log); 549extern int xlog_recover_finish(xlog_t *log);
551extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); 550extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index ed9252bcdac9..c9234956bcb2 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -643,15 +643,15 @@ xfs_trans_unlocked_item(
643 * at the tail, it doesn't matter what result we get back. This 643 * at the tail, it doesn't matter what result we get back. This
644 * is slightly racy because since we were just unlocked, we could 644 * is slightly racy because since we were just unlocked, we could
645 * go to sleep between the call to xfs_ail_min and the call to 645 * go to sleep between the call to xfs_ail_min and the call to
646 * xfs_log_move_tail, have someone else lock us, commit to us disk, 646 * xfs_log_space_wake, have someone else lock us, commit to us disk,
647 * move us out of the tail of the AIL, and then we wake up. However, 647 * move us out of the tail of the AIL, and then we wake up. However,
648 * the call to xfs_log_move_tail() doesn't do anything if there's 648 * the call to xfs_log_space_wake() doesn't do anything if there's
649 * not enough free space to wake people up so we're safe calling it. 649 * not enough free space to wake people up so we're safe calling it.
650 */ 650 */
651 min_lip = xfs_ail_min(ailp); 651 min_lip = xfs_ail_min(ailp);
652 652
653 if (min_lip == lip) 653 if (min_lip == lip)
654 xfs_log_move_tail(ailp->xa_mount, 1); 654 xfs_log_space_wake(ailp->xa_mount, true);
655} /* xfs_trans_unlocked_item */ 655} /* xfs_trans_unlocked_item */
656 656
657/* 657/*
@@ -685,7 +685,6 @@ xfs_trans_ail_update_bulk(
685 xfs_lsn_t lsn) __releases(ailp->xa_lock) 685 xfs_lsn_t lsn) __releases(ailp->xa_lock)
686{ 686{
687 xfs_log_item_t *mlip; 687 xfs_log_item_t *mlip;
688 xfs_lsn_t tail_lsn;
689 int mlip_changed = 0; 688 int mlip_changed = 0;
690 int i; 689 int i;
691 LIST_HEAD(tmp); 690 LIST_HEAD(tmp);
@@ -712,22 +711,12 @@ xfs_trans_ail_update_bulk(
712 711
713 if (!list_empty(&tmp)) 712 if (!list_empty(&tmp))
714 xfs_ail_splice(ailp, cur, &tmp, lsn); 713 xfs_ail_splice(ailp, cur, &tmp, lsn);
714 spin_unlock(&ailp->xa_lock);
715 715
716 if (!mlip_changed) { 716 if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
717 spin_unlock(&ailp->xa_lock); 717 xlog_assign_tail_lsn(ailp->xa_mount);
718 return; 718 xfs_log_space_wake(ailp->xa_mount, false);
719 } 719 }
720
721 /*
722 * It is not safe to access mlip after the AIL lock is dropped, so we
723 * must get a copy of li_lsn before we do so. This is especially
724 * important on 32-bit platforms where accessing and updating 64-bit
725 * values like li_lsn is not atomic.
726 */
727 mlip = xfs_ail_min(ailp);
728 tail_lsn = mlip->li_lsn;
729 spin_unlock(&ailp->xa_lock);
730 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
731} 720}
732 721
733/* 722/*
@@ -758,7 +747,6 @@ xfs_trans_ail_delete_bulk(
758 int nr_items) __releases(ailp->xa_lock) 747 int nr_items) __releases(ailp->xa_lock)
759{ 748{
760 xfs_log_item_t *mlip; 749 xfs_log_item_t *mlip;
761 xfs_lsn_t tail_lsn;
762 int mlip_changed = 0; 750 int mlip_changed = 0;
763 int i; 751 int i;
764 752
@@ -785,23 +773,12 @@ xfs_trans_ail_delete_bulk(
785 if (mlip == lip) 773 if (mlip == lip)
786 mlip_changed = 1; 774 mlip_changed = 1;
787 } 775 }
776 spin_unlock(&ailp->xa_lock);
788 777
789 if (!mlip_changed) { 778 if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
790 spin_unlock(&ailp->xa_lock); 779 xlog_assign_tail_lsn(ailp->xa_mount);
791 return; 780 xfs_log_space_wake(ailp->xa_mount, false);
792 } 781 }
793
794 /*
795 * It is not safe to access mlip after the AIL lock is dropped, so we
796 * must get a copy of li_lsn before we do so. This is especially
797 * important on 32-bit platforms where accessing and updating 64-bit
798 * values like li_lsn is not atomic. It is possible we've emptied the
799 * AIL here, so if that is the case, pass an LSN of 0 to the tail move.
800 */
801 mlip = xfs_ail_min(ailp);
802 tail_lsn = mlip ? mlip->li_lsn : 0;
803 spin_unlock(&ailp->xa_lock);
804 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
805} 782}
806 783
807/* 784/*