aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2008-12-03 06:20:28 -0500
committerNiv Sardi <xaiki@sgi.com>2008-12-03 23:39:21 -0500
commit39e2defe73106ca2e1c85e5286038a0a13f49513 (patch)
tree2a2f5c12d92ff8170f8e7f89bda56b1311b51a8d /fs/xfs
parentd9424b3c4a1e96f87c6cfd4d8dd2f8d9bbb4dcc5 (diff)
reduce l_icloglock roundtrips
All but one caller of xlog_state_want_sync drop and re-acquire l_icloglock around the call to it, just so that xlog_state_want_sync can acquire and drop it. Move all lock operation out of l_icloglock and assert that the lock is held when it is called. Note that it would make sense to extende this scheme to xlog_state_release_iclog, but the locking in there is more complicated and we'd like to keep the atomic_dec_and_lock optmization for those callers not having l_icloglock yet. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <david@fromorbit.com> Signed-off-by: Niv Sardi <xaiki@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_log.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index aadaa1472f69..f4726f702a9e 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -729,8 +729,8 @@ xfs_log_unmount_write(xfs_mount_t *mp)
729 spin_lock(&log->l_icloglock); 729 spin_lock(&log->l_icloglock);
730 iclog = log->l_iclog; 730 iclog = log->l_iclog;
731 atomic_inc(&iclog->ic_refcnt); 731 atomic_inc(&iclog->ic_refcnt);
732 spin_unlock(&log->l_icloglock);
733 xlog_state_want_sync(log, iclog); 732 xlog_state_want_sync(log, iclog);
733 spin_unlock(&log->l_icloglock);
734 error = xlog_state_release_iclog(log, iclog); 734 error = xlog_state_release_iclog(log, iclog);
735 735
736 spin_lock(&log->l_icloglock); 736 spin_lock(&log->l_icloglock);
@@ -767,9 +767,9 @@ xfs_log_unmount_write(xfs_mount_t *mp)
767 spin_lock(&log->l_icloglock); 767 spin_lock(&log->l_icloglock);
768 iclog = log->l_iclog; 768 iclog = log->l_iclog;
769 atomic_inc(&iclog->ic_refcnt); 769 atomic_inc(&iclog->ic_refcnt);
770 spin_unlock(&log->l_icloglock);
771 770
772 xlog_state_want_sync(log, iclog); 771 xlog_state_want_sync(log, iclog);
772 spin_unlock(&log->l_icloglock);
773 error = xlog_state_release_iclog(log, iclog); 773 error = xlog_state_release_iclog(log, iclog);
774 774
775 spin_lock(&log->l_icloglock); 775 spin_lock(&log->l_icloglock);
@@ -1984,7 +1984,9 @@ xlog_write(xfs_mount_t * mp,
1984 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { 1984 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
1985 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); 1985 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
1986 record_cnt = data_cnt = 0; 1986 record_cnt = data_cnt = 0;
1987 spin_lock(&log->l_icloglock);
1987 xlog_state_want_sync(log, iclog); 1988 xlog_state_want_sync(log, iclog);
1989 spin_unlock(&log->l_icloglock);
1988 if (commit_iclog) { 1990 if (commit_iclog) {
1989 ASSERT(flags & XLOG_COMMIT_TRANS); 1991 ASSERT(flags & XLOG_COMMIT_TRANS);
1990 *commit_iclog = iclog; 1992 *commit_iclog = iclog;
@@ -3193,7 +3195,7 @@ try_again:
3193STATIC void 3195STATIC void
3194xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) 3196xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
3195{ 3197{
3196 spin_lock(&log->l_icloglock); 3198 ASSERT(spin_is_locked(&log->l_icloglock));
3197 3199
3198 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3200 if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3199 xlog_state_switch_iclogs(log, iclog, 0); 3201 xlog_state_switch_iclogs(log, iclog, 0);
@@ -3201,10 +3203,7 @@ xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
3201 ASSERT(iclog->ic_state & 3203 ASSERT(iclog->ic_state &
3202 (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); 3204 (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR));
3203 } 3205 }
3204 3206}
3205 spin_unlock(&log->l_icloglock);
3206} /* xlog_state_want_sync */
3207
3208 3207
3209 3208
3210/***************************************************************************** 3209/*****************************************************************************