diff options
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r-- | fs/xfs/xfs_log.c | 63 |
1 files changed, 50 insertions, 13 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 8497a00e399d..a5f8bd9899d3 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -616,11 +616,13 @@ xfs_log_mount( | |||
616 | int error = 0; | 616 | int error = 0; |
617 | int min_logfsbs; | 617 | int min_logfsbs; |
618 | 618 | ||
619 | if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) | 619 | if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { |
620 | xfs_notice(mp, "Mounting Filesystem"); | 620 | xfs_notice(mp, "Mounting V%d Filesystem", |
621 | else { | 621 | XFS_SB_VERSION_NUM(&mp->m_sb)); |
622 | } else { | ||
622 | xfs_notice(mp, | 623 | xfs_notice(mp, |
623 | "Mounting filesystem in no-recovery mode. Filesystem will be inconsistent."); | 624 | "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.", |
625 | XFS_SB_VERSION_NUM(&mp->m_sb)); | ||
624 | ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); | 626 | ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); |
625 | } | 627 | } |
626 | 628 | ||
@@ -1181,11 +1183,14 @@ xlog_iodone(xfs_buf_t *bp) | |||
1181 | /* log I/O is always issued ASYNC */ | 1183 | /* log I/O is always issued ASYNC */ |
1182 | ASSERT(XFS_BUF_ISASYNC(bp)); | 1184 | ASSERT(XFS_BUF_ISASYNC(bp)); |
1183 | xlog_state_done_syncing(iclog, aborted); | 1185 | xlog_state_done_syncing(iclog, aborted); |
1186 | |||
1184 | /* | 1187 | /* |
1185 | * do not reference the buffer (bp) here as we could race | 1188 | * drop the buffer lock now that we are done. Nothing references |
1186 | * with it being freed after writing the unmount record to the | 1189 | * the buffer after this, so an unmount waiting on this lock can now |
1187 | * log. | 1190 | * tear it down safely. As such, it is unsafe to reference the buffer |
1191 | * (bp) after the unlock as we could race with it being freed. | ||
1188 | */ | 1192 | */ |
1193 | xfs_buf_unlock(bp); | ||
1189 | } | 1194 | } |
1190 | 1195 | ||
1191 | /* | 1196 | /* |
@@ -1368,8 +1373,16 @@ xlog_alloc_log( | |||
1368 | bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0); | 1373 | bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0); |
1369 | if (!bp) | 1374 | if (!bp) |
1370 | goto out_free_log; | 1375 | goto out_free_log; |
1371 | bp->b_iodone = xlog_iodone; | 1376 | |
1377 | /* | ||
1378 | * The iclogbuf buffer locks are held over IO but we are not going to do | ||
1379 | * IO yet. Hence unlock the buffer so that the log IO path can grab it | ||
1380 | * when appropriately. | ||
1381 | */ | ||
1372 | ASSERT(xfs_buf_islocked(bp)); | 1382 | ASSERT(xfs_buf_islocked(bp)); |
1383 | xfs_buf_unlock(bp); | ||
1384 | |||
1385 | bp->b_iodone = xlog_iodone; | ||
1373 | log->l_xbuf = bp; | 1386 | log->l_xbuf = bp; |
1374 | 1387 | ||
1375 | spin_lock_init(&log->l_icloglock); | 1388 | spin_lock_init(&log->l_icloglock); |
@@ -1398,6 +1411,9 @@ xlog_alloc_log( | |||
1398 | if (!bp) | 1411 | if (!bp) |
1399 | goto out_free_iclog; | 1412 | goto out_free_iclog; |
1400 | 1413 | ||
1414 | ASSERT(xfs_buf_islocked(bp)); | ||
1415 | xfs_buf_unlock(bp); | ||
1416 | |||
1401 | bp->b_iodone = xlog_iodone; | 1417 | bp->b_iodone = xlog_iodone; |
1402 | iclog->ic_bp = bp; | 1418 | iclog->ic_bp = bp; |
1403 | iclog->ic_data = bp->b_addr; | 1419 | iclog->ic_data = bp->b_addr; |
@@ -1422,7 +1438,6 @@ xlog_alloc_log( | |||
1422 | iclog->ic_callback_tail = &(iclog->ic_callback); | 1438 | iclog->ic_callback_tail = &(iclog->ic_callback); |
1423 | iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; | 1439 | iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; |
1424 | 1440 | ||
1425 | ASSERT(xfs_buf_islocked(iclog->ic_bp)); | ||
1426 | init_waitqueue_head(&iclog->ic_force_wait); | 1441 | init_waitqueue_head(&iclog->ic_force_wait); |
1427 | init_waitqueue_head(&iclog->ic_write_wait); | 1442 | init_waitqueue_head(&iclog->ic_write_wait); |
1428 | 1443 | ||
@@ -1631,6 +1646,12 @@ xlog_cksum( | |||
1631 | * we transition the iclogs to IOERROR state *after* flushing all existing | 1646 | * we transition the iclogs to IOERROR state *after* flushing all existing |
1632 | * iclogs to disk. This is because we don't want anymore new transactions to be | 1647 | * iclogs to disk. This is because we don't want anymore new transactions to be |
1633 | * started or completed afterwards. | 1648 | * started or completed afterwards. |
1649 | * | ||
1650 | * We lock the iclogbufs here so that we can serialise against IO completion | ||
1651 | * during unmount. We might be processing a shutdown triggered during unmount, | ||
1652 | * and that can occur asynchronously to the unmount thread, and hence we need to | ||
1653 | * ensure that completes before tearing down the iclogbufs. Hence we need to | ||
1654 | * hold the buffer lock across the log IO to acheive that. | ||
1634 | */ | 1655 | */ |
1635 | STATIC int | 1656 | STATIC int |
1636 | xlog_bdstrat( | 1657 | xlog_bdstrat( |
@@ -1638,6 +1659,7 @@ xlog_bdstrat( | |||
1638 | { | 1659 | { |
1639 | struct xlog_in_core *iclog = bp->b_fspriv; | 1660 | struct xlog_in_core *iclog = bp->b_fspriv; |
1640 | 1661 | ||
1662 | xfs_buf_lock(bp); | ||
1641 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 1663 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
1642 | xfs_buf_ioerror(bp, EIO); | 1664 | xfs_buf_ioerror(bp, EIO); |
1643 | xfs_buf_stale(bp); | 1665 | xfs_buf_stale(bp); |
@@ -1645,7 +1667,8 @@ xlog_bdstrat( | |||
1645 | /* | 1667 | /* |
1646 | * It would seem logical to return EIO here, but we rely on | 1668 | * It would seem logical to return EIO here, but we rely on |
1647 | * the log state machine to propagate I/O errors instead of | 1669 | * the log state machine to propagate I/O errors instead of |
1648 | * doing it here. | 1670 | * doing it here. Similarly, IO completion will unlock the |
1671 | * buffer, so we don't do it here. | ||
1649 | */ | 1672 | */ |
1650 | return 0; | 1673 | return 0; |
1651 | } | 1674 | } |
@@ -1847,14 +1870,28 @@ xlog_dealloc_log( | |||
1847 | xlog_cil_destroy(log); | 1870 | xlog_cil_destroy(log); |
1848 | 1871 | ||
1849 | /* | 1872 | /* |
1850 | * always need to ensure that the extra buffer does not point to memory | 1873 | * Cycle all the iclogbuf locks to make sure all log IO completion |
1851 | * owned by another log buffer before we free it. | 1874 | * is done before we tear down these buffers. |
1852 | */ | 1875 | */ |
1876 | iclog = log->l_iclog; | ||
1877 | for (i = 0; i < log->l_iclog_bufs; i++) { | ||
1878 | xfs_buf_lock(iclog->ic_bp); | ||
1879 | xfs_buf_unlock(iclog->ic_bp); | ||
1880 | iclog = iclog->ic_next; | ||
1881 | } | ||
1882 | |||
1883 | /* | ||
1884 | * Always need to ensure that the extra buffer does not point to memory | ||
1885 | * owned by another log buffer before we free it. Also, cycle the lock | ||
1886 | * first to ensure we've completed IO on it. | ||
1887 | */ | ||
1888 | xfs_buf_lock(log->l_xbuf); | ||
1889 | xfs_buf_unlock(log->l_xbuf); | ||
1853 | xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); | 1890 | xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); |
1854 | xfs_buf_free(log->l_xbuf); | 1891 | xfs_buf_free(log->l_xbuf); |
1855 | 1892 | ||
1856 | iclog = log->l_iclog; | 1893 | iclog = log->l_iclog; |
1857 | for (i=0; i<log->l_iclog_bufs; i++) { | 1894 | for (i = 0; i < log->l_iclog_bufs; i++) { |
1858 | xfs_buf_free(iclog->ic_bp); | 1895 | xfs_buf_free(iclog->ic_bp); |
1859 | next_iclog = iclog->ic_next; | 1896 | next_iclog = iclog->ic_next; |
1860 | kmem_free(iclog); | 1897 | kmem_free(iclog); |