aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2010-01-19 04:56:46 -0500
committerAlex Elder <aelder@sgi.com>2010-01-21 14:44:49 -0500
commita14a348bff2f99471a28e5928eb6801224c053d8 (patch)
tree9a34d7e988cd78bc9b19369cbad3ad009b9a2bfd /fs/xfs
parent4139b3b337cffd106744386c842b89dc86e31d4b (diff)
xfs: cleanup up xfs_log_force calling conventions
Remove the XFS_LOG_FORCE argument which was always set, and the XFS_LOG_URGE define, which was never used. Split xfs_log_force into a two helpers - xfs_log_force which forces the whole log, and xfs_log_force_lsn which forces up to the specified LSN. The underlying implementations already were entirely separate, as were the users. Also re-indent the new _xfs_log_force/_xfs_log_force which previously had a weird coding style. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c17
-rw-r--r--fs/xfs/quota/xfs_dquot.c10
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c9
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c4
-rw-r--r--fs/xfs/xfs_alloc.c2
-rw-r--r--fs/xfs/xfs_inode.c9
-rw-r--r--fs/xfs/xfs_inode_item.c7
-rw-r--r--fs/xfs/xfs_log.c312
-rw-r--r--fs/xfs/xfs_log.h15
-rw-r--r--fs/xfs/xfs_log_recover.c3
-rw-r--r--fs/xfs/xfs_mount.c4
-rw-r--r--fs/xfs/xfs_trans.c5
-rw-r--r--fs/xfs/xfs_trans_ail.c2
-rw-r--r--fs/xfs/xfs_vnodeops.c5
14 files changed, 193 insertions, 211 deletions
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 58c24be72c65..c9b863eacab7 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -296,10 +296,7 @@ xfs_sync_data(
296 if (error) 296 if (error)
297 return XFS_ERROR(error); 297 return XFS_ERROR(error);
298 298
299 xfs_log_force(mp, 0, 299 xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
300 (flags & SYNC_WAIT) ?
301 XFS_LOG_FORCE | XFS_LOG_SYNC :
302 XFS_LOG_FORCE);
303 return 0; 300 return 0;
304} 301}
305 302
@@ -325,10 +322,6 @@ xfs_commit_dummy_trans(
325 struct xfs_inode *ip = mp->m_rootip; 322 struct xfs_inode *ip = mp->m_rootip;
326 struct xfs_trans *tp; 323 struct xfs_trans *tp;
327 int error; 324 int error;
328 int log_flags = XFS_LOG_FORCE;
329
330 if (flags & SYNC_WAIT)
331 log_flags |= XFS_LOG_SYNC;
332 325
333 /* 326 /*
334 * Put a dummy transaction in the log to tell recovery 327 * Put a dummy transaction in the log to tell recovery
@@ -350,7 +343,7 @@ xfs_commit_dummy_trans(
350 xfs_iunlock(ip, XFS_ILOCK_EXCL); 343 xfs_iunlock(ip, XFS_ILOCK_EXCL);
351 344
352 /* the log force ensures this transaction is pushed to disk */ 345 /* the log force ensures this transaction is pushed to disk */
353 xfs_log_force(mp, 0, log_flags); 346 xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
354 return error; 347 return error;
355} 348}
356 349
@@ -390,7 +383,7 @@ xfs_sync_fsdata(
390 * become pinned in between there and here. 383 * become pinned in between there and here.
391 */ 384 */
392 if (XFS_BUF_ISPINNED(bp)) 385 if (XFS_BUF_ISPINNED(bp))
393 xfs_log_force(mp, 0, XFS_LOG_FORCE); 386 xfs_log_force(mp, 0);
394 } 387 }
395 388
396 389
@@ -575,7 +568,7 @@ xfs_flush_inodes(
575 igrab(inode); 568 igrab(inode);
576 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion); 569 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
577 wait_for_completion(&completion); 570 wait_for_completion(&completion);
578 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); 571 xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
579} 572}
580 573
581/* 574/*
@@ -591,7 +584,7 @@ xfs_sync_worker(
591 int error; 584 int error;
592 585
593 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 586 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
594 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 587 xfs_log_force(mp, 0);
595 xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC); 588 xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
596 /* dgc: errors ignored here */ 589 /* dgc: errors ignored here */
597 error = xfs_qm_sync(mp, SYNC_TRYLOCK); 590 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 5756392ffdee..f9baeedbfdfe 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -1248,7 +1248,7 @@ xfs_qm_dqflush(
1248 */ 1248 */
1249 if (XFS_BUF_ISPINNED(bp)) { 1249 if (XFS_BUF_ISPINNED(bp)) {
1250 trace_xfs_dqflush_force(dqp); 1250 trace_xfs_dqflush_force(dqp);
1251 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 1251 xfs_log_force(mp, 0);
1252 } 1252 }
1253 1253
1254 if (flags & XFS_QMOPT_DELWRI) { 1254 if (flags & XFS_QMOPT_DELWRI) {
@@ -1531,11 +1531,9 @@ xfs_qm_dqflock_pushbuf_wait(
1531 if (bp != NULL) { 1531 if (bp != NULL) {
1532 if (XFS_BUF_ISDELAYWRITE(bp)) { 1532 if (XFS_BUF_ISDELAYWRITE(bp)) {
1533 int error; 1533 int error;
1534 if (XFS_BUF_ISPINNED(bp)) { 1534
1535 xfs_log_force(dqp->q_mount, 1535 if (XFS_BUF_ISPINNED(bp))
1536 (xfs_lsn_t)0, 1536 xfs_log_force(dqp->q_mount, 0);
1537 XFS_LOG_FORCE);
1538 }
1539 error = xfs_bawrite(dqp->q_mount, bp); 1537 error = xfs_bawrite(dqp->q_mount, bp);
1540 if (error) 1538 if (error)
1541 xfs_fs_cmn_err(CE_WARN, dqp->q_mount, 1539 xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 116580d52fae..1b564376d50c 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -190,7 +190,7 @@ xfs_qm_dqunpin_wait(
190 /* 190 /*
191 * Give the log a push so we don't wait here too long. 191 * Give the log a push so we don't wait here too long.
192 */ 192 */
193 xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE); 193 xfs_log_force(dqp->q_mount, 0);
194 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); 194 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
195} 195}
196 196
@@ -245,10 +245,9 @@ xfs_qm_dquot_logitem_pushbuf(
245 qip->qli_pushbuf_flag = 0; 245 qip->qli_pushbuf_flag = 0;
246 xfs_dqunlock(dqp); 246 xfs_dqunlock(dqp);
247 247
248 if (XFS_BUF_ISPINNED(bp)) { 248 if (XFS_BUF_ISPINNED(bp))
249 xfs_log_force(mp, (xfs_lsn_t)0, 249 xfs_log_force(mp, 0);
250 XFS_LOG_FORCE); 250
251 }
252 if (dopush) { 251 if (dopush) {
253 int error; 252 int error;
254#ifdef XFSRACEDEBUG 253#ifdef XFSRACEDEBUG
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 873e07e29074..5d0ee8d492db 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -1192,9 +1192,9 @@ xfs_qm_internalqcheck(
1192 if (! XFS_IS_QUOTA_ON(mp)) 1192 if (! XFS_IS_QUOTA_ON(mp))
1193 return XFS_ERROR(ESRCH); 1193 return XFS_ERROR(ESRCH);
1194 1194
1195 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); 1195 xfs_log_force(mp, XFS_LOG_SYNC);
1196 XFS_bflush(mp->m_ddev_targp); 1196 XFS_bflush(mp->m_ddev_targp);
1197 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); 1197 xfs_log_force(mp, XFS_LOG_SYNC);
1198 XFS_bflush(mp->m_ddev_targp); 1198 XFS_bflush(mp->m_ddev_targp);
1199 1199
1200 mutex_lock(&qcheck_lock); 1200 mutex_lock(&qcheck_lock);
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index a27aeb7d9e74..94cddbfb2560 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -2601,5 +2601,5 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
2601 * transaction that freed the block 2601 * transaction that freed the block
2602 */ 2602 */
2603 if (lsn) 2603 if (lsn)
2604 xfs_log_force(tp->t_mountp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); 2604 xfs_log_force_lsn(tp->t_mountp, lsn, XFS_LOG_SYNC);
2605} 2605}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index bbb3bee8e936..d0d1b5a05183 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2484,8 +2484,11 @@ __xfs_iunpin_wait(
2484 return; 2484 return;
2485 2485
2486 /* Give the log a push to start the unpinning I/O */ 2486 /* Give the log a push to start the unpinning I/O */
2487 xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ? 2487 if (iip && iip->ili_last_lsn)
2488 iip->ili_last_lsn : 0, XFS_LOG_FORCE); 2488 xfs_log_force_lsn(ip->i_mount, iip->ili_last_lsn, 0);
2489 else
2490 xfs_log_force(ip->i_mount, 0);
2491
2489 if (wait) 2492 if (wait)
2490 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); 2493 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2491} 2494}
@@ -2970,7 +2973,7 @@ xfs_iflush(
2970 * get stuck waiting in the write for too long. 2973 * get stuck waiting in the write for too long.
2971 */ 2974 */
2972 if (XFS_BUF_ISPINNED(bp)) 2975 if (XFS_BUF_ISPINNED(bp))
2973 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 2976 xfs_log_force(mp, 0);
2974 2977
2975 /* 2978 /*
2976 * inode clustering: 2979 * inode clustering:
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index da4cac67bdae..48ec1c0b23ce 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -804,10 +804,9 @@ xfs_inode_item_pushbuf(
804 804
805 trace_xfs_inode_item_push(bp, _RET_IP_); 805 trace_xfs_inode_item_push(bp, _RET_IP_);
806 806
807 if (XFS_BUF_ISPINNED(bp)) { 807 if (XFS_BUF_ISPINNED(bp))
808 xfs_log_force(mp, (xfs_lsn_t)0, 808 xfs_log_force(mp, 0);
809 XFS_LOG_FORCE); 809
810 }
811 if (dopush) { 810 if (dopush) {
812 int error; 811 int error;
813 error = xfs_bawrite(mp, bp); 812 error = xfs_bawrite(mp, bp);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 20118ddadef6..4f16be4b6ee5 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -79,11 +79,6 @@ STATIC int xlog_state_release_iclog(xlog_t *log,
79STATIC void xlog_state_switch_iclogs(xlog_t *log, 79STATIC void xlog_state_switch_iclogs(xlog_t *log,
80 xlog_in_core_t *iclog, 80 xlog_in_core_t *iclog,
81 int eventual_size); 81 int eventual_size);
82STATIC int xlog_state_sync(xlog_t *log,
83 xfs_lsn_t lsn,
84 uint flags,
85 int *log_flushed);
86STATIC int xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed);
87STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); 82STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
88 83
89/* local functions to manipulate grant head */ 84/* local functions to manipulate grant head */
@@ -296,65 +291,6 @@ xfs_log_done(xfs_mount_t *mp,
296 return lsn; 291 return lsn;
297} /* xfs_log_done */ 292} /* xfs_log_done */
298 293
299
300/*
301 * Force the in-core log to disk. If flags == XFS_LOG_SYNC,
302 * the force is done synchronously.
303 *
304 * Asynchronous forces are implemented by setting the WANT_SYNC
305 * bit in the appropriate in-core log and then returning.
306 *
307 * Synchronous forces are implemented with a signal variable. All callers
308 * to force a given lsn to disk will wait on a the sv attached to the
309 * specific in-core log. When given in-core log finally completes its
310 * write to disk, that thread will wake up all threads waiting on the
311 * sv.
312 */
313int
314_xfs_log_force(
315 xfs_mount_t *mp,
316 xfs_lsn_t lsn,
317 uint flags,
318 int *log_flushed)
319{
320 xlog_t *log = mp->m_log;
321 int dummy;
322
323 if (!log_flushed)
324 log_flushed = &dummy;
325
326 ASSERT(flags & XFS_LOG_FORCE);
327
328 XFS_STATS_INC(xs_log_force);
329
330 if (log->l_flags & XLOG_IO_ERROR)
331 return XFS_ERROR(EIO);
332 if (lsn == 0)
333 return xlog_state_sync_all(log, flags, log_flushed);
334 else
335 return xlog_state_sync(log, lsn, flags, log_flushed);
336} /* _xfs_log_force */
337
338/*
339 * Wrapper for _xfs_log_force(), to be used when caller doesn't care
340 * about errors or whether the log was flushed or not. This is the normal
341 * interface to use when trying to unpin items or move the log forward.
342 */
343void
344xfs_log_force(
345 xfs_mount_t *mp,
346 xfs_lsn_t lsn,
347 uint flags)
348{
349 int error;
350 error = _xfs_log_force(mp, lsn, flags, NULL);
351 if (error) {
352 xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
353 "error %d returned.", error);
354 }
355}
356
357
358/* 294/*
359 * Attaches a new iclog I/O completion callback routine during 295 * Attaches a new iclog I/O completion callback routine during
360 * transaction commit. If the log is in error state, a non-zero 296 * transaction commit. If the log is in error state, a non-zero
@@ -601,7 +537,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
601 if (mp->m_flags & XFS_MOUNT_RDONLY) 537 if (mp->m_flags & XFS_MOUNT_RDONLY)
602 return 0; 538 return 0;
603 539
604 error = _xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC, NULL); 540 error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
605 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); 541 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
606 542
607#ifdef DEBUG 543#ifdef DEBUG
@@ -2853,7 +2789,6 @@ xlog_state_switch_iclogs(xlog_t *log,
2853 log->l_iclog = iclog->ic_next; 2789 log->l_iclog = iclog->ic_next;
2854} /* xlog_state_switch_iclogs */ 2790} /* xlog_state_switch_iclogs */
2855 2791
2856
2857/* 2792/*
2858 * Write out all data in the in-core log as of this exact moment in time. 2793 * Write out all data in the in-core log as of this exact moment in time.
2859 * 2794 *
@@ -2881,11 +2816,17 @@ xlog_state_switch_iclogs(xlog_t *log,
2881 * b) when we return from flushing out this iclog, it is still 2816 * b) when we return from flushing out this iclog, it is still
2882 * not in the active nor dirty state. 2817 * not in the active nor dirty state.
2883 */ 2818 */
2884STATIC int 2819int
2885xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) 2820_xfs_log_force(
2821 struct xfs_mount *mp,
2822 uint flags,
2823 int *log_flushed)
2886{ 2824{
2887 xlog_in_core_t *iclog; 2825 struct log *log = mp->m_log;
2888 xfs_lsn_t lsn; 2826 struct xlog_in_core *iclog;
2827 xfs_lsn_t lsn;
2828
2829 XFS_STATS_INC(xs_log_force);
2889 2830
2890 spin_lock(&log->l_icloglock); 2831 spin_lock(&log->l_icloglock);
2891 2832
@@ -2931,7 +2872,9 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
2931 2872
2932 if (xlog_state_release_iclog(log, iclog)) 2873 if (xlog_state_release_iclog(log, iclog))
2933 return XFS_ERROR(EIO); 2874 return XFS_ERROR(EIO);
2934 *log_flushed = 1; 2875
2876 if (log_flushed)
2877 *log_flushed = 1;
2935 spin_lock(&log->l_icloglock); 2878 spin_lock(&log->l_icloglock);
2936 if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && 2879 if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn &&
2937 iclog->ic_state != XLOG_STATE_DIRTY) 2880 iclog->ic_state != XLOG_STATE_DIRTY)
@@ -2975,19 +2918,37 @@ maybe_sleep:
2975 */ 2918 */
2976 if (iclog->ic_state & XLOG_STATE_IOERROR) 2919 if (iclog->ic_state & XLOG_STATE_IOERROR)
2977 return XFS_ERROR(EIO); 2920 return XFS_ERROR(EIO);
2978 *log_flushed = 1; 2921 if (log_flushed)
2979 2922 *log_flushed = 1;
2980 } else { 2923 } else {
2981 2924
2982no_sleep: 2925no_sleep:
2983 spin_unlock(&log->l_icloglock); 2926 spin_unlock(&log->l_icloglock);
2984 } 2927 }
2985 return 0; 2928 return 0;
2986} /* xlog_state_sync_all */ 2929}
2987 2930
2931/*
2932 * Wrapper for _xfs_log_force(), to be used when caller doesn't care
2933 * about errors or whether the log was flushed or not. This is the normal
2934 * interface to use when trying to unpin items or move the log forward.
2935 */
2936void
2937xfs_log_force(
2938 xfs_mount_t *mp,
2939 uint flags)
2940{
2941 int error;
2942
2943 error = _xfs_log_force(mp, flags, NULL);
2944 if (error) {
2945 xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
2946 "error %d returned.", error);
2947 }
2948}
2988 2949
2989/* 2950/*
2990 * Used by code which implements synchronous log forces. 2951 * Force the in-core log to disk for a specific LSN.
2991 * 2952 *
2992 * Find in-core log with lsn. 2953 * Find in-core log with lsn.
2993 * If it is in the DIRTY state, just return. 2954 * If it is in the DIRTY state, just return.
@@ -2995,109 +2956,142 @@ no_sleep:
2995 * state and go to sleep or return. 2956 * state and go to sleep or return.
2996 * If it is in any other state, go to sleep or return. 2957 * If it is in any other state, go to sleep or return.
2997 * 2958 *
2998 * If filesystem activity goes to zero, the iclog will get flushed only by 2959 * Synchronous forces are implemented with a signal variable. All callers
2999 * bdflush(). 2960 * to force a given lsn to disk will wait on a the sv attached to the
2961 * specific in-core log. When given in-core log finally completes its
2962 * write to disk, that thread will wake up all threads waiting on the
2963 * sv.
3000 */ 2964 */
3001STATIC int 2965int
3002xlog_state_sync(xlog_t *log, 2966_xfs_log_force_lsn(
3003 xfs_lsn_t lsn, 2967 struct xfs_mount *mp,
3004 uint flags, 2968 xfs_lsn_t lsn,
3005 int *log_flushed) 2969 uint flags,
2970 int *log_flushed)
3006{ 2971{
3007 xlog_in_core_t *iclog; 2972 struct log *log = mp->m_log;
3008 int already_slept = 0; 2973 struct xlog_in_core *iclog;
3009 2974 int already_slept = 0;
3010try_again:
3011 spin_lock(&log->l_icloglock);
3012 iclog = log->l_iclog;
3013 2975
3014 if (iclog->ic_state & XLOG_STATE_IOERROR) { 2976 ASSERT(lsn != 0);
3015 spin_unlock(&log->l_icloglock);
3016 return XFS_ERROR(EIO);
3017 }
3018 2977
3019 do { 2978 XFS_STATS_INC(xs_log_force);
3020 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3021 iclog = iclog->ic_next;
3022 continue;
3023 }
3024 2979
3025 if (iclog->ic_state == XLOG_STATE_DIRTY) { 2980try_again:
2981 spin_lock(&log->l_icloglock);
2982 iclog = log->l_iclog;
2983 if (iclog->ic_state & XLOG_STATE_IOERROR) {
3026 spin_unlock(&log->l_icloglock); 2984 spin_unlock(&log->l_icloglock);
3027 return 0; 2985 return XFS_ERROR(EIO);
3028 } 2986 }
3029 2987
3030 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 2988 do {
3031 /* 2989 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3032 * We sleep here if we haven't already slept (e.g. 2990 iclog = iclog->ic_next;
3033 * this is the first time we've looked at the correct 2991 continue;
3034 * iclog buf) and the buffer before us is going to 2992 }
3035 * be sync'ed. The reason for this is that if we 2993
3036 * are doing sync transactions here, by waiting for 2994 if (iclog->ic_state == XLOG_STATE_DIRTY) {
3037 * the previous I/O to complete, we can allow a few 2995 spin_unlock(&log->l_icloglock);
3038 * more transactions into this iclog before we close 2996 return 0;
3039 * it down. 2997 }
3040 * 2998
3041 * Otherwise, we mark the buffer WANT_SYNC, and bump 2999 if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3042 * up the refcnt so we can release the log (which drops 3000 /*
3043 * the ref count). The state switch keeps new transaction 3001 * We sleep here if we haven't already slept (e.g.
3044 * commits from using this buffer. When the current commits 3002 * this is the first time we've looked at the correct
3045 * finish writing into the buffer, the refcount will drop to 3003 * iclog buf) and the buffer before us is going to
3046 * zero and the buffer will go out then. 3004 * be sync'ed. The reason for this is that if we
3047 */ 3005 * are doing sync transactions here, by waiting for
3048 if (!already_slept && 3006 * the previous I/O to complete, we can allow a few
3049 (iclog->ic_prev->ic_state & (XLOG_STATE_WANT_SYNC | 3007 * more transactions into this iclog before we close
3050 XLOG_STATE_SYNCING))) { 3008 * it down.
3051 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); 3009 *
3052 XFS_STATS_INC(xs_log_force_sleep); 3010 * Otherwise, we mark the buffer WANT_SYNC, and bump
3053 sv_wait(&iclog->ic_prev->ic_write_wait, PSWP, 3011 * up the refcnt so we can release the log (which
3054 &log->l_icloglock, s); 3012 * drops the ref count). The state switch keeps new
3055 *log_flushed = 1; 3013 * transaction commits from using this buffer. When
3056 already_slept = 1; 3014 * the current commits finish writing into the buffer,
3057 goto try_again; 3015 * the refcount will drop to zero and the buffer will
3058 } else { 3016 * go out then.
3017 */
3018 if (!already_slept &&
3019 (iclog->ic_prev->ic_state &
3020 (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
3021 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
3022
3023 XFS_STATS_INC(xs_log_force_sleep);
3024
3025 sv_wait(&iclog->ic_prev->ic_write_wait,
3026 PSWP, &log->l_icloglock, s);
3027 if (log_flushed)
3028 *log_flushed = 1;
3029 already_slept = 1;
3030 goto try_again;
3031 }
3059 atomic_inc(&iclog->ic_refcnt); 3032 atomic_inc(&iclog->ic_refcnt);
3060 xlog_state_switch_iclogs(log, iclog, 0); 3033 xlog_state_switch_iclogs(log, iclog, 0);
3061 spin_unlock(&log->l_icloglock); 3034 spin_unlock(&log->l_icloglock);
3062 if (xlog_state_release_iclog(log, iclog)) 3035 if (xlog_state_release_iclog(log, iclog))
3063 return XFS_ERROR(EIO); 3036 return XFS_ERROR(EIO);
3064 *log_flushed = 1; 3037 if (log_flushed)
3038 *log_flushed = 1;
3065 spin_lock(&log->l_icloglock); 3039 spin_lock(&log->l_icloglock);
3066 } 3040 }
3067 }
3068 3041
3069 if ((flags & XFS_LOG_SYNC) && /* sleep */ 3042 if ((flags & XFS_LOG_SYNC) && /* sleep */
3070 !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { 3043 !(iclog->ic_state &
3044 (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
3045 /*
3046 * Don't wait on completion if we know that we've
3047 * gotten a log write error.
3048 */
3049 if (iclog->ic_state & XLOG_STATE_IOERROR) {
3050 spin_unlock(&log->l_icloglock);
3051 return XFS_ERROR(EIO);
3052 }
3053 XFS_STATS_INC(xs_log_force_sleep);
3054 sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
3055 /*
3056 * No need to grab the log lock here since we're
3057 * only deciding whether or not to return EIO
3058 * and the memory read should be atomic.
3059 */
3060 if (iclog->ic_state & XLOG_STATE_IOERROR)
3061 return XFS_ERROR(EIO);
3071 3062
3072 /* 3063 if (log_flushed)
3073 * Don't wait on completion if we know that we've 3064 *log_flushed = 1;
3074 * gotten a log write error. 3065 } else { /* just return */
3075 */
3076 if (iclog->ic_state & XLOG_STATE_IOERROR) {
3077 spin_unlock(&log->l_icloglock); 3066 spin_unlock(&log->l_icloglock);
3078 return XFS_ERROR(EIO);
3079 } 3067 }
3080 XFS_STATS_INC(xs_log_force_sleep);
3081 sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
3082 /*
3083 * No need to grab the log lock here since we're
3084 * only deciding whether or not to return EIO
3085 * and the memory read should be atomic.
3086 */
3087 if (iclog->ic_state & XLOG_STATE_IOERROR)
3088 return XFS_ERROR(EIO);
3089 *log_flushed = 1;
3090 } else { /* just return */
3091 spin_unlock(&log->l_icloglock);
3092 }
3093 return 0;
3094 3068
3095 } while (iclog != log->l_iclog); 3069 return 0;
3070 } while (iclog != log->l_iclog);
3096 3071
3097 spin_unlock(&log->l_icloglock); 3072 spin_unlock(&log->l_icloglock);
3098 return 0; 3073 return 0;
3099} /* xlog_state_sync */ 3074}
3075
3076/*
3077 * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care
3078 * about errors or whether the log was flushed or not. This is the normal
3079 * interface to use when trying to unpin items or move the log forward.
3080 */
3081void
3082xfs_log_force_lsn(
3083 xfs_mount_t *mp,
3084 xfs_lsn_t lsn,
3085 uint flags)
3086{
3087 int error;
3100 3088
3089 error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
3090 if (error) {
3091 xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
3092 "error %d returned.", error);
3093 }
3094}
3101 3095
3102/* 3096/*
3103 * Called when we want to mark the current iclog as being ready to sync to 3097 * Called when we want to mark the current iclog as being ready to sync to
@@ -3462,7 +3456,6 @@ xfs_log_force_umount(
3462 xlog_ticket_t *tic; 3456 xlog_ticket_t *tic;
3463 xlog_t *log; 3457 xlog_t *log;
3464 int retval; 3458 int retval;
3465 int dummy;
3466 3459
3467 log = mp->m_log; 3460 log = mp->m_log;
3468 3461
@@ -3536,13 +3529,14 @@ xfs_log_force_umount(
3536 } 3529 }
3537 spin_unlock(&log->l_grant_lock); 3530 spin_unlock(&log->l_grant_lock);
3538 3531
3539 if (! (log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { 3532 if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
3540 ASSERT(!logerror); 3533 ASSERT(!logerror);
3541 /* 3534 /*
3542 * Force the incore logs to disk before shutting the 3535 * Force the incore logs to disk before shutting the
3543 * log down completely. 3536 * log down completely.
3544 */ 3537 */
3545 xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC, &dummy); 3538 _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
3539
3546 spin_lock(&log->l_icloglock); 3540 spin_lock(&log->l_icloglock);
3547 retval = xlog_state_ioerror(log); 3541 retval = xlog_state_ioerror(log);
3548 spin_unlock(&log->l_icloglock); 3542 spin_unlock(&log->l_icloglock);
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 811ccf4d8b3e..7074be9d13e9 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -70,14 +70,8 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
70 * Flags to xfs_log_force() 70 * Flags to xfs_log_force()
71 * 71 *
72 * XFS_LOG_SYNC: Synchronous force in-core log to disk 72 * XFS_LOG_SYNC: Synchronous force in-core log to disk
73 * XFS_LOG_FORCE: Start in-core log write now.
74 * XFS_LOG_URGE: Start write within some window of time.
75 *
76 * Note: Either XFS_LOG_FORCE or XFS_LOG_URGE must be set.
77 */ 73 */
78#define XFS_LOG_SYNC 0x1 74#define XFS_LOG_SYNC 0x1
79#define XFS_LOG_FORCE 0x2
80#define XFS_LOG_URGE 0x4
81 75
82#endif /* __KERNEL__ */ 76#endif /* __KERNEL__ */
83 77
@@ -138,12 +132,17 @@ xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
138 void **iclog, 132 void **iclog,
139 uint flags); 133 uint flags);
140int _xfs_log_force(struct xfs_mount *mp, 134int _xfs_log_force(struct xfs_mount *mp,
141 xfs_lsn_t lsn,
142 uint flags, 135 uint flags,
143 int *log_forced); 136 int *log_forced);
144void xfs_log_force(struct xfs_mount *mp, 137void xfs_log_force(struct xfs_mount *mp,
145 xfs_lsn_t lsn,
146 uint flags); 138 uint flags);
139int _xfs_log_force_lsn(struct xfs_mount *mp,
140 xfs_lsn_t lsn,
141 uint flags,
142 int *log_forced);
143void xfs_log_force_lsn(struct xfs_mount *mp,
144 xfs_lsn_t lsn,
145 uint flags);
147int xfs_log_mount(struct xfs_mount *mp, 146int xfs_log_mount(struct xfs_mount *mp,
148 struct xfs_buftarg *log_target, 147 struct xfs_buftarg *log_target,
149 xfs_daddr_t start_block, 148 xfs_daddr_t start_block,
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 97148f0c4bdd..22e6efdc17ea 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3913,8 +3913,7 @@ xlog_recover_finish(
3913 * case the unlink transactions would have problems 3913 * case the unlink transactions would have problems
3914 * pushing the EFIs out of the way. 3914 * pushing the EFIs out of the way.
3915 */ 3915 */
3916 xfs_log_force(log->l_mp, (xfs_lsn_t)0, 3916 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3917 (XFS_LOG_FORCE | XFS_LOG_SYNC));
3918 3917
3919 xlog_recover_process_iunlinks(log); 3918 xlog_recover_process_iunlinks(log);
3920 3919
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index bb0154047e85..7f81ed72c875 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1455,7 +1455,7 @@ xfs_unmountfs(
1455 * push out the iclog we will never get that unlocked. hence we 1455 * push out the iclog we will never get that unlocked. hence we
1456 * need to force the log first. 1456 * need to force the log first.
1457 */ 1457 */
1458 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); 1458 xfs_log_force(mp, XFS_LOG_SYNC);
1459 xfs_reclaim_inodes(mp, XFS_IFLUSH_ASYNC); 1459 xfs_reclaim_inodes(mp, XFS_IFLUSH_ASYNC);
1460 1460
1461 xfs_qm_unmount(mp); 1461 xfs_qm_unmount(mp);
@@ -1465,7 +1465,7 @@ xfs_unmountfs(
1465 * that nothing is pinned. This is important because bflush() 1465 * that nothing is pinned. This is important because bflush()
1466 * will skip pinned buffers. 1466 * will skip pinned buffers.
1467 */ 1467 */
1468 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); 1468 xfs_log_force(mp, XFS_LOG_SYNC);
1469 1469
1470 xfs_binval(mp->m_ddev_targp); 1470 xfs_binval(mp->m_ddev_targp);
1471 if (mp->m_rtdev_targp) { 1471 if (mp->m_rtdev_targp) {
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 7dbe3c3051db..be942d4e3324 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -981,9 +981,8 @@ shut_us_down:
981 */ 981 */
982 if (sync) { 982 if (sync) {
983 if (!error) { 983 if (!error) {
984 error = _xfs_log_force(mp, commit_lsn, 984 error = _xfs_log_force_lsn(mp, commit_lsn,
985 XFS_LOG_FORCE | XFS_LOG_SYNC, 985 XFS_LOG_SYNC, log_flushed);
986 log_flushed);
987 } 986 }
988 XFS_STATS_INC(xs_trans_sync); 987 XFS_STATS_INC(xs_trans_sync);
989 } else { 988 } else {
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 063dfbdca94b..d7b1af8a832d 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -371,7 +371,7 @@ xfsaild_push(
371 * move forward in the AIL. 371 * move forward in the AIL.
372 */ 372 */
373 XFS_STATS_INC(xs_push_ail_flush); 373 XFS_STATS_INC(xs_push_ail_flush);
374 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 374 xfs_log_force(mp, 0);
375 } 375 }
376 376
377 if (!count) { 377 if (!count) {
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 4da96cdffb76..fd108b738559 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -631,9 +631,8 @@ xfs_fsync(
631 xfs_iunlock(ip, XFS_ILOCK_SHARED); 631 xfs_iunlock(ip, XFS_ILOCK_SHARED);
632 632
633 if (xfs_ipincount(ip)) { 633 if (xfs_ipincount(ip)) {
634 error = _xfs_log_force(ip->i_mount, (xfs_lsn_t)0, 634 error = _xfs_log_force(ip->i_mount, XFS_LOG_SYNC,
635 XFS_LOG_FORCE | XFS_LOG_SYNC, 635 &log_flushed);
636 &log_flushed);
637 } else { 636 } else {
638 /* 637 /*
639 * If the inode is not pinned and nothing has changed 638 * If the inode is not pinned and nothing has changed