aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_mount.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_mount.c')
-rw-r--r--fs/xfs/xfs_mount.c57
1 files changed, 27 insertions, 30 deletions
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 9a80b5581844..6bc92c85733f 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -696,7 +696,6 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
696 uint64_t bfreelst = 0; 696 uint64_t bfreelst = 0;
697 uint64_t btree = 0; 697 uint64_t btree = 0;
698 int error; 698 int error;
699 int s;
700 699
701 for (index = 0; index < agcount; index++) { 700 for (index = 0; index < agcount; index++) {
702 /* 701 /*
@@ -721,11 +720,11 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
721 /* 720 /*
722 * Overwrite incore superblock counters with just-read data 721 * Overwrite incore superblock counters with just-read data
723 */ 722 */
724 s = XFS_SB_LOCK(mp); 723 spin_lock(&mp->m_sb_lock);
725 sbp->sb_ifree = ifree; 724 sbp->sb_ifree = ifree;
726 sbp->sb_icount = ialloc; 725 sbp->sb_icount = ialloc;
727 sbp->sb_fdblocks = bfree + bfreelst + btree; 726 sbp->sb_fdblocks = bfree + bfreelst + btree;
728 XFS_SB_UNLOCK(mp, s); 727 spin_unlock(&mp->m_sb_lock);
729 728
730 /* Fixup the per-cpu counters as well. */ 729 /* Fixup the per-cpu counters as well. */
731 xfs_icsb_reinit_counters(mp); 730 xfs_icsb_reinit_counters(mp);
@@ -1440,7 +1439,7 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
1440 * Fields are not allowed to dip below zero, so if the delta would 1439 * Fields are not allowed to dip below zero, so if the delta would
1441 * do this do not apply it and return EINVAL. 1440 * do this do not apply it and return EINVAL.
1442 * 1441 *
1443 * The SB_LOCK must be held when this routine is called. 1442 * The m_sb_lock must be held when this routine is called.
1444 */ 1443 */
1445int 1444int
1446xfs_mod_incore_sb_unlocked( 1445xfs_mod_incore_sb_unlocked(
@@ -1605,7 +1604,7 @@ xfs_mod_incore_sb_unlocked(
1605/* 1604/*
1606 * xfs_mod_incore_sb() is used to change a field in the in-core 1605 * xfs_mod_incore_sb() is used to change a field in the in-core
1607 * superblock structure by the specified delta. This modification 1606 * superblock structure by the specified delta. This modification
1608 * is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked() 1607 * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked()
1609 * routine to do the work. 1608 * routine to do the work.
1610 */ 1609 */
1611int 1610int
@@ -1615,7 +1614,6 @@ xfs_mod_incore_sb(
1615 int64_t delta, 1614 int64_t delta,
1616 int rsvd) 1615 int rsvd)
1617{ 1616{
1618 unsigned long s;
1619 int status; 1617 int status;
1620 1618
1621 /* check for per-cpu counters */ 1619 /* check for per-cpu counters */
@@ -1632,9 +1630,9 @@ xfs_mod_incore_sb(
1632 /* FALLTHROUGH */ 1630 /* FALLTHROUGH */
1633#endif 1631#endif
1634 default: 1632 default:
1635 s = XFS_SB_LOCK(mp); 1633 spin_lock(&mp->m_sb_lock);
1636 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 1634 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1637 XFS_SB_UNLOCK(mp, s); 1635 spin_unlock(&mp->m_sb_lock);
1638 break; 1636 break;
1639 } 1637 }
1640 1638
@@ -1655,7 +1653,6 @@ xfs_mod_incore_sb(
1655int 1653int
1656xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) 1654xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1657{ 1655{
1658 unsigned long s;
1659 int status=0; 1656 int status=0;
1660 xfs_mod_sb_t *msbp; 1657 xfs_mod_sb_t *msbp;
1661 1658
@@ -1663,10 +1660,10 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1663 * Loop through the array of mod structures and apply each 1660 * Loop through the array of mod structures and apply each
1664 * individually. If any fail, then back out all those 1661 * individually. If any fail, then back out all those
1665 * which have already been applied. Do all of this within 1662 * which have already been applied. Do all of this within
1666 * the scope of the SB_LOCK so that all of the changes will 1663 * the scope of the m_sb_lock so that all of the changes will
1667 * be atomic. 1664 * be atomic.
1668 */ 1665 */
1669 s = XFS_SB_LOCK(mp); 1666 spin_lock(&mp->m_sb_lock);
1670 msbp = &msb[0]; 1667 msbp = &msb[0];
1671 for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { 1668 for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) {
1672 /* 1669 /*
@@ -1680,11 +1677,11 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1680 case XFS_SBS_IFREE: 1677 case XFS_SBS_IFREE:
1681 case XFS_SBS_FDBLOCKS: 1678 case XFS_SBS_FDBLOCKS:
1682 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { 1679 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1683 XFS_SB_UNLOCK(mp, s); 1680 spin_unlock(&mp->m_sb_lock);
1684 status = xfs_icsb_modify_counters(mp, 1681 status = xfs_icsb_modify_counters(mp,
1685 msbp->msb_field, 1682 msbp->msb_field,
1686 msbp->msb_delta, rsvd); 1683 msbp->msb_delta, rsvd);
1687 s = XFS_SB_LOCK(mp); 1684 spin_lock(&mp->m_sb_lock);
1688 break; 1685 break;
1689 } 1686 }
1690 /* FALLTHROUGH */ 1687 /* FALLTHROUGH */
@@ -1718,12 +1715,12 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1718 case XFS_SBS_IFREE: 1715 case XFS_SBS_IFREE:
1719 case XFS_SBS_FDBLOCKS: 1716 case XFS_SBS_FDBLOCKS:
1720 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { 1717 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1721 XFS_SB_UNLOCK(mp, s); 1718 spin_unlock(&mp->m_sb_lock);
1722 status = xfs_icsb_modify_counters(mp, 1719 status = xfs_icsb_modify_counters(mp,
1723 msbp->msb_field, 1720 msbp->msb_field,
1724 -(msbp->msb_delta), 1721 -(msbp->msb_delta),
1725 rsvd); 1722 rsvd);
1726 s = XFS_SB_LOCK(mp); 1723 spin_lock(&mp->m_sb_lock);
1727 break; 1724 break;
1728 } 1725 }
1729 /* FALLTHROUGH */ 1726 /* FALLTHROUGH */
@@ -1739,7 +1736,7 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1739 msbp--; 1736 msbp--;
1740 } 1737 }
1741 } 1738 }
1742 XFS_SB_UNLOCK(mp, s); 1739 spin_unlock(&mp->m_sb_lock);
1743 return status; 1740 return status;
1744} 1741}
1745 1742
@@ -1887,12 +1884,12 @@ xfs_mount_log_sbunit(
1887 * 1884 *
1888 * Locking rules: 1885 * Locking rules:
1889 * 1886 *
1890 * 1. XFS_SB_LOCK() before picking up per-cpu locks 1887 * 1. m_sb_lock before picking up per-cpu locks
1891 * 2. per-cpu locks always picked up via for_each_online_cpu() order 1888 * 2. per-cpu locks always picked up via for_each_online_cpu() order
1892 * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks 1889 * 3. accurate counter sync requires m_sb_lock + per cpu locks
1893 * 4. modifying per-cpu counters requires holding per-cpu lock 1890 * 4. modifying per-cpu counters requires holding per-cpu lock
1894 * 5. modifying global counters requires holding XFS_SB_LOCK 1891 * 5. modifying global counters requires holding m_sb_lock
1895 * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK 1892 * 6. enabling or disabling a counter requires holding the m_sb_lock
1896 * and _none_ of the per-cpu locks. 1893 * and _none_ of the per-cpu locks.
1897 * 1894 *
1898 * Disabled counters are only ever re-enabled by a balance operation 1895 * Disabled counters are only ever re-enabled by a balance operation
@@ -1945,7 +1942,7 @@ xfs_icsb_cpu_notify(
1945 * count into the total on the global superblock and 1942 * count into the total on the global superblock and
1946 * re-enable the counters. */ 1943 * re-enable the counters. */
1947 xfs_icsb_lock(mp); 1944 xfs_icsb_lock(mp);
1948 s = XFS_SB_LOCK(mp); 1945 spin_lock(&mp->m_sb_lock);
1949 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); 1946 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
1950 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); 1947 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
1951 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); 1948 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
@@ -1962,7 +1959,7 @@ xfs_icsb_cpu_notify(
1962 XFS_ICSB_SB_LOCKED, 0); 1959 XFS_ICSB_SB_LOCKED, 0);
1963 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 1960 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
1964 XFS_ICSB_SB_LOCKED, 0); 1961 XFS_ICSB_SB_LOCKED, 0);
1965 XFS_SB_UNLOCK(mp, s); 1962 spin_unlock(&mp->m_sb_lock);
1966 xfs_icsb_unlock(mp); 1963 xfs_icsb_unlock(mp);
1967 break; 1964 break;
1968 } 1965 }
@@ -2197,7 +2194,7 @@ xfs_icsb_sync_counters_flags(
2197 2194
2198 /* Pass 1: lock all counters */ 2195 /* Pass 1: lock all counters */
2199 if ((flags & XFS_ICSB_SB_LOCKED) == 0) 2196 if ((flags & XFS_ICSB_SB_LOCKED) == 0)
2200 s = XFS_SB_LOCK(mp); 2197 spin_lock(&mp->m_sb_lock);
2201 2198
2202 xfs_icsb_count(mp, &cnt, flags); 2199 xfs_icsb_count(mp, &cnt, flags);
2203 2200
@@ -2210,7 +2207,7 @@ xfs_icsb_sync_counters_flags(
2210 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; 2207 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2211 2208
2212 if ((flags & XFS_ICSB_SB_LOCKED) == 0) 2209 if ((flags & XFS_ICSB_SB_LOCKED) == 0)
2213 XFS_SB_UNLOCK(mp, s); 2210 spin_unlock(&mp->m_sb_lock);
2214} 2211}
2215 2212
2216/* 2213/*
@@ -2255,7 +2252,7 @@ xfs_icsb_balance_counter(
2255 uint64_t min = (uint64_t)min_per_cpu; 2252 uint64_t min = (uint64_t)min_per_cpu;
2256 2253
2257 if (!(flags & XFS_ICSB_SB_LOCKED)) 2254 if (!(flags & XFS_ICSB_SB_LOCKED))
2258 s = XFS_SB_LOCK(mp); 2255 spin_lock(&mp->m_sb_lock);
2259 2256
2260 /* disable counter and sync counter */ 2257 /* disable counter and sync counter */
2261 xfs_icsb_disable_counter(mp, field); 2258 xfs_icsb_disable_counter(mp, field);
@@ -2289,7 +2286,7 @@ xfs_icsb_balance_counter(
2289 xfs_icsb_enable_counter(mp, field, count, resid); 2286 xfs_icsb_enable_counter(mp, field, count, resid);
2290out: 2287out:
2291 if (!(flags & XFS_ICSB_SB_LOCKED)) 2288 if (!(flags & XFS_ICSB_SB_LOCKED))
2292 XFS_SB_UNLOCK(mp, s); 2289 spin_unlock(&mp->m_sb_lock);
2293} 2290}
2294 2291
2295int 2292int
@@ -2379,15 +2376,15 @@ slow_path:
2379 * running atomically here, we know a rebalance cannot 2376 * running atomically here, we know a rebalance cannot
2380 * be in progress. Hence we can go straight to operating 2377 * be in progress. Hence we can go straight to operating
2381 * on the global superblock. We do not call xfs_mod_incore_sb() 2378 * on the global superblock. We do not call xfs_mod_incore_sb()
2382 * here even though we need to get the SB_LOCK. Doing so 2379 * here even though we need to get the m_sb_lock. Doing so
2383 * will cause us to re-enter this function and deadlock. 2380 * will cause us to re-enter this function and deadlock.
2384 * Hence we get the SB_LOCK ourselves and then call 2381 * Hence we get the m_sb_lock ourselves and then call
2385 * xfs_mod_incore_sb_unlocked() as the unlocked path operates 2382 * xfs_mod_incore_sb_unlocked() as the unlocked path operates
2386 * directly on the global counters. 2383 * directly on the global counters.
2387 */ 2384 */
2388 s = XFS_SB_LOCK(mp); 2385 spin_lock(&mp->m_sb_lock);
2389 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 2386 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
2390 XFS_SB_UNLOCK(mp, s); 2387 spin_unlock(&mp->m_sb_lock);
2391 2388
2392 /* 2389 /*
2393 * Now that we've modified the global superblock, we 2390 * Now that we've modified the global superblock, we