aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-02-19 21:31:20 -0500
committerBen Myers <bpm@sgi.com>2012-02-22 23:17:00 -0500
commit09a423a3d6c70905f1090f01aadb8e6abff527ce (patch)
tree7bbe94301c348add4bb9e11b253b3f9617ef3573 /fs/xfs/xfs_log.c
parent70b5437653d9c6c8de287affd38836cce98ebde5 (diff)
xfs: split tail_lsn assignments from log space wakeups
Currently xfs_log_move_tail has a tail_lsn argument that is horribly overloaded: it may contain either an actual lsn to assign to the log tail, 0 as a special case to use the last sync LSN, or 1 to indicate that no tail LSN assignment should be performed, and we should opportunisticly wake up at one task waiting for log space even if we did not move the LSN. Remove the tail lsn assigned from xfs_log_move_tail and make the two callers use xlog_assign_tail_lsn instead of the current variant of partially using the code in xfs_log_move_tail and partially opencoding it. Note that means we grow an addition lock roundtrip on the AIL lock for each bulk update or delete, which is still far less than what we had before introducing the bulk operations. If this proves to be a problem we can still add a variant of xlog_assign_tail_lsn that expects the lock to be held already. Also rename the remainder of xfs_log_move_tail to xfs_log_space_wake as that name describes its functionality much better. Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r--fs/xfs/xfs_log.c74
1 files changed, 31 insertions, 43 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e2cc3568c299..372642d39872 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -760,37 +760,35 @@ xfs_log_item_init(
760 INIT_LIST_HEAD(&item->li_cil); 760 INIT_LIST_HEAD(&item->li_cil);
761} 761}
762 762
763/*
764 * Wake up processes waiting for log space after we have moved the log tail.
765 *
766 * If opportunistic is set wake up one waiter even if we do not have enough
767 * free space by our strict accounting.
768 */
763void 769void
764xfs_log_move_tail(xfs_mount_t *mp, 770xfs_log_space_wake(
765 xfs_lsn_t tail_lsn) 771 struct xfs_mount *mp,
772 bool opportunistic)
766{ 773{
767 xlog_ticket_t *tic; 774 struct xlog_ticket *tic;
768 xlog_t *log = mp->m_log; 775 struct log *log = mp->m_log;
769 int need_bytes, free_bytes; 776 int need_bytes, free_bytes;
770 777
771 if (XLOG_FORCED_SHUTDOWN(log)) 778 if (XLOG_FORCED_SHUTDOWN(log))
772 return; 779 return;
773 780
774 if (tail_lsn == 0)
775 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
776
777 /* tail_lsn == 1 implies that we weren't passed a valid value. */
778 if (tail_lsn != 1)
779 atomic64_set(&log->l_tail_lsn, tail_lsn);
780
781 if (!list_empty_careful(&log->l_writeq)) { 781 if (!list_empty_careful(&log->l_writeq)) {
782#ifdef DEBUG 782 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
783 if (log->l_flags & XLOG_ACTIVE_RECOVERY) 783
784 panic("Recovery problem");
785#endif
786 spin_lock(&log->l_grant_write_lock); 784 spin_lock(&log->l_grant_write_lock);
787 free_bytes = xlog_space_left(log, &log->l_grant_write_head); 785 free_bytes = xlog_space_left(log, &log->l_grant_write_head);
788 list_for_each_entry(tic, &log->l_writeq, t_queue) { 786 list_for_each_entry(tic, &log->l_writeq, t_queue) {
789 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); 787 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
790 788
791 if (free_bytes < tic->t_unit_res && tail_lsn != 1) 789 if (free_bytes < tic->t_unit_res && !opportunistic)
792 break; 790 break;
793 tail_lsn = 0; 791 opportunistic = false;
794 free_bytes -= tic->t_unit_res; 792 free_bytes -= tic->t_unit_res;
795 trace_xfs_log_regrant_write_wake_up(log, tic); 793 trace_xfs_log_regrant_write_wake_up(log, tic);
796 wake_up(&tic->t_wait); 794 wake_up(&tic->t_wait);
@@ -799,10 +797,8 @@ xfs_log_move_tail(xfs_mount_t *mp,
799 } 797 }
800 798
801 if (!list_empty_careful(&log->l_reserveq)) { 799 if (!list_empty_careful(&log->l_reserveq)) {
802#ifdef DEBUG 800 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
803 if (log->l_flags & XLOG_ACTIVE_RECOVERY) 801
804 panic("Recovery problem");
805#endif
806 spin_lock(&log->l_grant_reserve_lock); 802 spin_lock(&log->l_grant_reserve_lock);
807 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); 803 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
808 list_for_each_entry(tic, &log->l_reserveq, t_queue) { 804 list_for_each_entry(tic, &log->l_reserveq, t_queue) {
@@ -810,9 +806,9 @@ xfs_log_move_tail(xfs_mount_t *mp,
810 need_bytes = tic->t_unit_res*tic->t_cnt; 806 need_bytes = tic->t_unit_res*tic->t_cnt;
811 else 807 else
812 need_bytes = tic->t_unit_res; 808 need_bytes = tic->t_unit_res;
813 if (free_bytes < need_bytes && tail_lsn != 1) 809 if (free_bytes < need_bytes && !opportunistic)
814 break; 810 break;
815 tail_lsn = 0; 811 opportunistic = false;
816 free_bytes -= need_bytes; 812 free_bytes -= need_bytes;
817 trace_xfs_log_grant_wake_up(log, tic); 813 trace_xfs_log_grant_wake_up(log, tic);
818 wake_up(&tic->t_wait); 814 wake_up(&tic->t_wait);
@@ -867,21 +863,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
867 return needed; 863 return needed;
868} 864}
869 865
870/****************************************************************************** 866/*
871 *
872 * local routines
873 *
874 ******************************************************************************
875 */
876
877/* xfs_trans_tail_ail returns 0 when there is nothing in the list.
878 * The log manager must keep track of the last LR which was committed
879 * to disk. The lsn of this LR will become the new tail_lsn whenever
880 * xfs_trans_tail_ail returns 0. If we don't do this, we run into
881 * the situation where stuff could be written into the log but nothing
882 * was ever in the AIL when asked. Eventually, we panic since the
883 * tail hits the head.
884 *
885 * We may be holding the log iclog lock upon entering this routine. 867 * We may be holding the log iclog lock upon entering this routine.
886 */ 868 */
887xfs_lsn_t 869xfs_lsn_t
@@ -891,10 +873,17 @@ xlog_assign_tail_lsn(
891 xfs_lsn_t tail_lsn; 873 xfs_lsn_t tail_lsn;
892 struct log *log = mp->m_log; 874 struct log *log = mp->m_log;
893 875
876 /*
877 * To make sure we always have a valid LSN for the log tail we keep
878 * track of the last LSN which was committed in log->l_last_sync_lsn,
879 * and use that when the AIL was empty and xfs_ail_min_lsn returns 0.
880 *
881 * If the AIL has been emptied we also need to wake any process
882 * waiting for this condition.
883 */
894 tail_lsn = xfs_ail_min_lsn(mp->m_ail); 884 tail_lsn = xfs_ail_min_lsn(mp->m_ail);
895 if (!tail_lsn) 885 if (!tail_lsn)
896 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 886 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
897
898 atomic64_set(&log->l_tail_lsn, tail_lsn); 887 atomic64_set(&log->l_tail_lsn, tail_lsn);
899 return tail_lsn; 888 return tail_lsn;
900} 889}
@@ -2759,9 +2748,8 @@ xlog_ungrant_log_space(xlog_t *log,
2759 2748
2760 trace_xfs_log_ungrant_exit(log, ticket); 2749 trace_xfs_log_ungrant_exit(log, ticket);
2761 2750
2762 xfs_log_move_tail(log->l_mp, 1); 2751 xfs_log_space_wake(log->l_mp, true);
2763} /* xlog_ungrant_log_space */ 2752}
2764
2765 2753
2766/* 2754/*
2767 * Flush iclog to disk if this is the last reference to the given iclog and 2755 * Flush iclog to disk if this is the last reference to the given iclog and