aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@sgi.com>2005-11-01 18:26:59 -0500
committerNathan Scott <nathans@sgi.com>2005-11-01 18:26:59 -0500
commitf538d4da8d521746ca5ebf8c1a8105eb49bfb45e (patch)
tree5516e1d2df01e412709284e379085b348122c501 /fs/xfs/xfs_log.c
parent739cafd316235fc55463849e80710f2ca308b9ae (diff)
[XFS] write barrier support Issue all log sync operations as ordered
writes. In addition flush the disk cache on fsync if the sync cached operation didn't sync the log to disk (this requires some additional bookeping in the transaction and log code). If the device doesn't claim to support barriers, the filesystem has an extern log volume or the trial superblock write with barriers enabled failed we disable barriers and print a warning. We should probably fail the mount completely, but that could lead to nasty boot failures for the root filesystem. Not enabled by default yet, needs more destructive testing first. SGI-PV: 912426 SGI-Modid: xfs-linux:xfs-kern:198723a Signed-off-by: Christoph Hellwig <hch@sgi.com> Signed-off-by: Nathan Scott <nathans@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r--fs/xfs/xfs_log.c65
1 files changed, 37 insertions, 28 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 51814c32eddf..b9d3ad35240e 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -93,8 +93,11 @@ STATIC int xlog_state_release_iclog(xlog_t *log,
93STATIC void xlog_state_switch_iclogs(xlog_t *log, 93STATIC void xlog_state_switch_iclogs(xlog_t *log,
94 xlog_in_core_t *iclog, 94 xlog_in_core_t *iclog,
95 int eventual_size); 95 int eventual_size);
96STATIC int xlog_state_sync(xlog_t *log, xfs_lsn_t lsn, uint flags); 96STATIC int xlog_state_sync(xlog_t *log,
97STATIC int xlog_state_sync_all(xlog_t *log, uint flags); 97 xfs_lsn_t lsn,
98 uint flags,
99 int *log_flushed);
100STATIC int xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed);
98STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); 101STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
99 102
100/* local functions to manipulate grant head */ 103/* local functions to manipulate grant head */
@@ -312,12 +315,17 @@ xfs_log_done(xfs_mount_t *mp,
312 * semaphore. 315 * semaphore.
313 */ 316 */
314int 317int
315xfs_log_force(xfs_mount_t *mp, 318_xfs_log_force(
316 xfs_lsn_t lsn, 319 xfs_mount_t *mp,
317 uint flags) 320 xfs_lsn_t lsn,
321 uint flags,
322 int *log_flushed)
318{ 323{
319 int rval; 324 xlog_t *log = mp->m_log;
320 xlog_t *log = mp->m_log; 325 int dummy;
326
327 if (!log_flushed)
328 log_flushed = &dummy;
321 329
322#if defined(DEBUG) || defined(XLOG_NOLOG) 330#if defined(DEBUG) || defined(XLOG_NOLOG)
323 if (!xlog_debug && xlog_target == log->l_targ) 331 if (!xlog_debug && xlog_target == log->l_targ)
@@ -328,17 +336,12 @@ xfs_log_force(xfs_mount_t *mp,
328 336
329 XFS_STATS_INC(xs_log_force); 337 XFS_STATS_INC(xs_log_force);
330 338
331 if ((log->l_flags & XLOG_IO_ERROR) == 0) { 339 if (log->l_flags & XLOG_IO_ERROR)
332 if (lsn == 0) 340 return XFS_ERROR(EIO);
333 rval = xlog_state_sync_all(log, flags); 341 if (lsn == 0)
334 else 342 return xlog_state_sync_all(log, flags, log_flushed);
335 rval = xlog_state_sync(log, lsn, flags); 343 else
336 } else { 344 return xlog_state_sync(log, lsn, flags, log_flushed);
337 rval = XFS_ERROR(EIO);
338 }
339
340 return rval;
341
342} /* xfs_log_force */ 345} /* xfs_log_force */
343 346
344/* 347/*
@@ -1467,14 +1470,13 @@ xlog_sync(xlog_t *log,
1467 XFS_BUF_BUSY(bp); 1470 XFS_BUF_BUSY(bp);
1468 XFS_BUF_ASYNC(bp); 1471 XFS_BUF_ASYNC(bp);
1469 /* 1472 /*
1470 * Do a disk write cache flush for the log block. 1473 * Do an ordered write for the log block.
1471 * This is a bit of a sledgehammer, it would be better 1474 *
1472 * to use a tag barrier here that just prevents reordering.
1473 * It may not be needed to flush the first split block in the log wrap 1475 * It may not be needed to flush the first split block in the log wrap
1474 * case, but do it anyways to be safe -AK 1476 * case, but do it anyways to be safe -AK
1475 */ 1477 */
1476 if (!(log->l_mp->m_flags & XFS_MOUNT_NOLOGFLUSH)) 1478 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1477 XFS_BUF_FLUSH(bp); 1479 XFS_BUF_ORDERED(bp);
1478 1480
1479 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1481 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1480 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1482 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
@@ -1505,8 +1507,8 @@ xlog_sync(xlog_t *log,
1505 XFS_BUF_SET_FSPRIVATE(bp, iclog); 1507 XFS_BUF_SET_FSPRIVATE(bp, iclog);
1506 XFS_BUF_BUSY(bp); 1508 XFS_BUF_BUSY(bp);
1507 XFS_BUF_ASYNC(bp); 1509 XFS_BUF_ASYNC(bp);
1508 if (!(log->l_mp->m_flags & XFS_MOUNT_NOLOGFLUSH)) 1510 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1509 XFS_BUF_FLUSH(bp); 1511 XFS_BUF_ORDERED(bp);
1510 dptr = XFS_BUF_PTR(bp); 1512 dptr = XFS_BUF_PTR(bp);
1511 /* 1513 /*
1512 * Bump the cycle numbers at the start of each block 1514 * Bump the cycle numbers at the start of each block
@@ -2951,7 +2953,7 @@ xlog_state_switch_iclogs(xlog_t *log,
2951 * not in the active nor dirty state. 2953 * not in the active nor dirty state.
2952 */ 2954 */
2953STATIC int 2955STATIC int
2954xlog_state_sync_all(xlog_t *log, uint flags) 2956xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
2955{ 2957{
2956 xlog_in_core_t *iclog; 2958 xlog_in_core_t *iclog;
2957 xfs_lsn_t lsn; 2959 xfs_lsn_t lsn;
@@ -3000,6 +3002,7 @@ xlog_state_sync_all(xlog_t *log, uint flags)
3000 3002
3001 if (xlog_state_release_iclog(log, iclog)) 3003 if (xlog_state_release_iclog(log, iclog))
3002 return XFS_ERROR(EIO); 3004 return XFS_ERROR(EIO);
3005 *log_flushed = 1;
3003 s = LOG_LOCK(log); 3006 s = LOG_LOCK(log);
3004 if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn && 3007 if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn &&
3005 iclog->ic_state != XLOG_STATE_DIRTY) 3008 iclog->ic_state != XLOG_STATE_DIRTY)
@@ -3043,6 +3046,7 @@ maybe_sleep:
3043 */ 3046 */
3044 if (iclog->ic_state & XLOG_STATE_IOERROR) 3047 if (iclog->ic_state & XLOG_STATE_IOERROR)
3045 return XFS_ERROR(EIO); 3048 return XFS_ERROR(EIO);
3049 *log_flushed = 1;
3046 3050
3047 } else { 3051 } else {
3048 3052
@@ -3068,7 +3072,8 @@ no_sleep:
3068int 3072int
3069xlog_state_sync(xlog_t *log, 3073xlog_state_sync(xlog_t *log,
3070 xfs_lsn_t lsn, 3074 xfs_lsn_t lsn,
3071 uint flags) 3075 uint flags,
3076 int *log_flushed)
3072{ 3077{
3073 xlog_in_core_t *iclog; 3078 xlog_in_core_t *iclog;
3074 int already_slept = 0; 3079 int already_slept = 0;
@@ -3120,6 +3125,7 @@ try_again:
3120 XFS_STATS_INC(xs_log_force_sleep); 3125 XFS_STATS_INC(xs_log_force_sleep);
3121 sv_wait(&iclog->ic_prev->ic_writesema, PSWP, 3126 sv_wait(&iclog->ic_prev->ic_writesema, PSWP,
3122 &log->l_icloglock, s); 3127 &log->l_icloglock, s);
3128 *log_flushed = 1;
3123 already_slept = 1; 3129 already_slept = 1;
3124 goto try_again; 3130 goto try_again;
3125 } else { 3131 } else {
@@ -3128,6 +3134,7 @@ try_again:
3128 LOG_UNLOCK(log, s); 3134 LOG_UNLOCK(log, s);
3129 if (xlog_state_release_iclog(log, iclog)) 3135 if (xlog_state_release_iclog(log, iclog))
3130 return XFS_ERROR(EIO); 3136 return XFS_ERROR(EIO);
3137 *log_flushed = 1;
3131 s = LOG_LOCK(log); 3138 s = LOG_LOCK(log);
3132 } 3139 }
3133 } 3140 }
@@ -3152,6 +3159,7 @@ try_again:
3152 */ 3159 */
3153 if (iclog->ic_state & XLOG_STATE_IOERROR) 3160 if (iclog->ic_state & XLOG_STATE_IOERROR)
3154 return XFS_ERROR(EIO); 3161 return XFS_ERROR(EIO);
3162 *log_flushed = 1;
3155 } else { /* just return */ 3163 } else { /* just return */
3156 LOG_UNLOCK(log, s); 3164 LOG_UNLOCK(log, s);
3157 } 3165 }
@@ -3606,6 +3614,7 @@ xfs_log_force_umount(
3606 xlog_ticket_t *tic; 3614 xlog_ticket_t *tic;
3607 xlog_t *log; 3615 xlog_t *log;
3608 int retval; 3616 int retval;
3617 int dummy;
3609 SPLDECL(s); 3618 SPLDECL(s);
3610 SPLDECL(s2); 3619 SPLDECL(s2);
3611 3620
@@ -3684,7 +3693,7 @@ xfs_log_force_umount(
3684 * Force the incore logs to disk before shutting the 3693 * Force the incore logs to disk before shutting the
3685 * log down completely. 3694 * log down completely.
3686 */ 3695 */
3687 xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC); 3696 xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC, &dummy);
3688 s2 = LOG_LOCK(log); 3697 s2 = LOG_LOCK(log);
3689 retval = xlog_state_ioerror(log); 3698 retval = xlog_state_ioerror(log);
3690 LOG_UNLOCK(log, s2); 3699 LOG_UNLOCK(log, s2);