aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_mount.c
diff options
context:
space:
mode:
authorDavid Chinner <dgc@sgi.com>2007-02-10 02:35:09 -0500
committerTim Shimmin <tes@sgi.com>2007-02-10 02:35:09 -0500
commit20b642858b6bb413976ff13ae6a35cc596967bab (patch)
tree363a370fc2e0097603a0cc4494f291bdc7433397 /fs/xfs/xfs_mount.c
parent804195b63a6dcb767f5fae43b435067079b52903 (diff)
[XFS] Reduction global superblock lock contention near ENOSPC.
The existing per-cpu superblock counter code uses the global superblock spin lock when we approach ENOSPC for global synchronisation. On larger machines than this code was originally tested on this can still get catastrophic spinlock contention due increasing rebalance frequency near ENOSPC. By introducing a sleeping lock that is used to serialise balances and modifications near ENOSPC we prevent contention from needlessly from wasting the CPU time of potentially hundreds of CPUs. To reduce the number of balances occuring, we separate the need rebalance case from the slow allocate case. Now, a counter running dry will trigger a rebalance during which counters are disabled. Any thread that sees a disabled counter enters a different path where it waits on the new mutex. When it gets the new mutex, it checks if the counter is disabled. If the counter is disabled, then we _know_ that we have to use the global counter and lock and it is safe to do so immediately. Otherwise, we drop the mutex and go back to trying the per-cpu counters which we know were re-enabled. SGI-PV: 952227 SGI-Modid: xfs-linux-melb:xfs-kern:27612a Signed-off-by: David Chinner <dgc@sgi.com> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_mount.c')
-rw-r--r--fs/xfs/xfs_mount.c232
1 files changed, 139 insertions, 93 deletions
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 397730f570b9..37c612ce3d05 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -52,21 +52,19 @@ STATIC void xfs_unmountfs_wait(xfs_mount_t *);
52 52
53#ifdef HAVE_PERCPU_SB 53#ifdef HAVE_PERCPU_SB
54STATIC void xfs_icsb_destroy_counters(xfs_mount_t *); 54STATIC void xfs_icsb_destroy_counters(xfs_mount_t *);
55STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, int); 55STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, int,
56int);
56STATIC void xfs_icsb_sync_counters(xfs_mount_t *); 57STATIC void xfs_icsb_sync_counters(xfs_mount_t *);
57STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t, 58STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
58 int, int); 59 int, int);
59STATIC int xfs_icsb_modify_counters_locked(xfs_mount_t *, xfs_sb_field_t,
60 int, int);
61STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); 60STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
62 61
63#else 62#else
64 63
65#define xfs_icsb_destroy_counters(mp) do { } while (0) 64#define xfs_icsb_destroy_counters(mp) do { } while (0)
66#define xfs_icsb_balance_counter(mp, a, b) do { } while (0) 65#define xfs_icsb_balance_counter(mp, a, b, c) do { } while (0)
67#define xfs_icsb_sync_counters(mp) do { } while (0) 66#define xfs_icsb_sync_counters(mp) do { } while (0)
68#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0) 67#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
69#define xfs_icsb_modify_counters_locked(mp, a, b, c) do { } while (0)
70 68
71#endif 69#endif
72 70
@@ -545,9 +543,11 @@ xfs_readsb(xfs_mount_t *mp, int flags)
545 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 543 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
546 } 544 }
547 545
548 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); 546 mutex_lock(&mp->m_icsb_mutex);
549 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); 547 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
550 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); 548 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
549 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
550 mutex_unlock(&mp->m_icsb_mutex);
551 551
552 mp->m_sb_bp = bp; 552 mp->m_sb_bp = bp;
553 xfs_buf_relse(bp); 553 xfs_buf_relse(bp);
@@ -1485,9 +1485,11 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1485 case XFS_SBS_IFREE: 1485 case XFS_SBS_IFREE:
1486 case XFS_SBS_FDBLOCKS: 1486 case XFS_SBS_FDBLOCKS:
1487 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { 1487 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1488 status = xfs_icsb_modify_counters_locked(mp, 1488 XFS_SB_UNLOCK(mp, s);
1489 status = xfs_icsb_modify_counters(mp,
1489 msbp->msb_field, 1490 msbp->msb_field,
1490 msbp->msb_delta, rsvd); 1491 msbp->msb_delta, rsvd);
1492 s = XFS_SB_LOCK(mp);
1491 break; 1493 break;
1492 } 1494 }
1493 /* FALLTHROUGH */ 1495 /* FALLTHROUGH */
@@ -1521,11 +1523,12 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1521 case XFS_SBS_IFREE: 1523 case XFS_SBS_IFREE:
1522 case XFS_SBS_FDBLOCKS: 1524 case XFS_SBS_FDBLOCKS:
1523 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { 1525 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1524 status = 1526 XFS_SB_UNLOCK(mp, s);
1525 xfs_icsb_modify_counters_locked(mp, 1527 status = xfs_icsb_modify_counters(mp,
1526 msbp->msb_field, 1528 msbp->msb_field,
1527 -(msbp->msb_delta), 1529 -(msbp->msb_delta),
1528 rsvd); 1530 rsvd);
1531 s = XFS_SB_LOCK(mp);
1529 break; 1532 break;
1530 } 1533 }
1531 /* FALLTHROUGH */ 1534 /* FALLTHROUGH */
@@ -1733,14 +1736,17 @@ xfs_icsb_cpu_notify(
1733 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1736 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1734 break; 1737 break;
1735 case CPU_ONLINE: 1738 case CPU_ONLINE:
1736 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); 1739 mutex_lock(&mp->m_icsb_mutex);
1737 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); 1740 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
1738 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); 1741 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
1742 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
1743 mutex_unlock(&mp->m_icsb_mutex);
1739 break; 1744 break;
1740 case CPU_DEAD: 1745 case CPU_DEAD:
1741 /* Disable all the counters, then fold the dead cpu's 1746 /* Disable all the counters, then fold the dead cpu's
1742 * count into the total on the global superblock and 1747 * count into the total on the global superblock and
1743 * re-enable the counters. */ 1748 * re-enable the counters. */
1749 mutex_lock(&mp->m_icsb_mutex);
1744 s = XFS_SB_LOCK(mp); 1750 s = XFS_SB_LOCK(mp);
1745 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); 1751 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
1746 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); 1752 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
@@ -1752,10 +1758,14 @@ xfs_icsb_cpu_notify(
1752 1758
1753 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1759 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1754 1760
1755 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, XFS_ICSB_SB_LOCKED); 1761 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT,
1756 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, XFS_ICSB_SB_LOCKED); 1762 XFS_ICSB_SB_LOCKED, 0);
1757 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, XFS_ICSB_SB_LOCKED); 1763 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE,
1764 XFS_ICSB_SB_LOCKED, 0);
1765 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
1766 XFS_ICSB_SB_LOCKED, 0);
1758 XFS_SB_UNLOCK(mp, s); 1767 XFS_SB_UNLOCK(mp, s);
1768 mutex_unlock(&mp->m_icsb_mutex);
1759 break; 1769 break;
1760 } 1770 }
1761 1771
@@ -1784,6 +1794,9 @@ xfs_icsb_init_counters(
1784 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1794 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1785 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1795 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1786 } 1796 }
1797
1798 mutex_init(&mp->m_icsb_mutex);
1799
1787 /* 1800 /*
1788 * start with all counters disabled so that the 1801 * start with all counters disabled so that the
1789 * initial balance kicks us off correctly 1802 * initial balance kicks us off correctly
@@ -1888,6 +1901,17 @@ xfs_icsb_disable_counter(
1888 1901
1889 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 1902 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1890 1903
1904 /*
1905 * If we are already disabled, then there is nothing to do
1906 * here. We check before locking all the counters to avoid
1907 * the expensive lock operation when being called in the
1908 * slow path and the counter is already disabled. This is
1909 * safe because the only time we set or clear this state is under
1910 * the m_icsb_mutex.
1911 */
1912 if (xfs_icsb_counter_disabled(mp, field))
1913 return 0;
1914
1891 xfs_icsb_lock_all_counters(mp); 1915 xfs_icsb_lock_all_counters(mp);
1892 if (!test_and_set_bit(field, &mp->m_icsb_counters)) { 1916 if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
1893 /* drain back to superblock */ 1917 /* drain back to superblock */
@@ -1997,24 +2021,33 @@ xfs_icsb_sync_counters_lazy(
1997/* 2021/*
1998 * Balance and enable/disable counters as necessary. 2022 * Balance and enable/disable counters as necessary.
1999 * 2023 *
2000 * Thresholds for re-enabling counters are somewhat magic. 2024 * Thresholds for re-enabling counters are somewhat magic. inode counts are
2001 * inode counts are chosen to be the same number as single 2025 * chosen to be the same number as single on disk allocation chunk per CPU, and
2002 * on disk allocation chunk per CPU, and free blocks is 2026 * free blocks is something far enough zero that we aren't going thrash when we
2003 * something far enough zero that we aren't going thrash 2027 * get near ENOSPC. We also need to supply a minimum we require per cpu to
2004 * when we get near ENOSPC. 2028 * prevent looping endlessly when xfs_alloc_space asks for more than will
2029 * be distributed to a single CPU but each CPU has enough blocks to be
2030 * reenabled.
2031 *
2032 * Note that we can be called when counters are already disabled.
2033 * xfs_icsb_disable_counter() optimises the counter locking in this case to
2034 * prevent locking every per-cpu counter needlessly.
2005 */ 2035 */
2006#define XFS_ICSB_INO_CNTR_REENABLE 64 2036
2037#define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
2007#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \ 2038#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
2008 (512 + XFS_ALLOC_SET_ASIDE(mp)) 2039 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
2009STATIC void 2040STATIC void
2010xfs_icsb_balance_counter( 2041xfs_icsb_balance_counter(
2011 xfs_mount_t *mp, 2042 xfs_mount_t *mp,
2012 xfs_sb_field_t field, 2043 xfs_sb_field_t field,
2013 int flags) 2044 int flags,
2045 int min_per_cpu)
2014{ 2046{
2015 uint64_t count, resid; 2047 uint64_t count, resid;
2016 int weight = num_online_cpus(); 2048 int weight = num_online_cpus();
2017 int s; 2049 int s;
2050 uint64_t min = (uint64_t)min_per_cpu;
2018 2051
2019 if (!(flags & XFS_ICSB_SB_LOCKED)) 2052 if (!(flags & XFS_ICSB_SB_LOCKED))
2020 s = XFS_SB_LOCK(mp); 2053 s = XFS_SB_LOCK(mp);
@@ -2027,19 +2060,19 @@ xfs_icsb_balance_counter(
2027 case XFS_SBS_ICOUNT: 2060 case XFS_SBS_ICOUNT:
2028 count = mp->m_sb.sb_icount; 2061 count = mp->m_sb.sb_icount;
2029 resid = do_div(count, weight); 2062 resid = do_div(count, weight);
2030 if (count < XFS_ICSB_INO_CNTR_REENABLE) 2063 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2031 goto out; 2064 goto out;
2032 break; 2065 break;
2033 case XFS_SBS_IFREE: 2066 case XFS_SBS_IFREE:
2034 count = mp->m_sb.sb_ifree; 2067 count = mp->m_sb.sb_ifree;
2035 resid = do_div(count, weight); 2068 resid = do_div(count, weight);
2036 if (count < XFS_ICSB_INO_CNTR_REENABLE) 2069 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2037 goto out; 2070 goto out;
2038 break; 2071 break;
2039 case XFS_SBS_FDBLOCKS: 2072 case XFS_SBS_FDBLOCKS:
2040 count = mp->m_sb.sb_fdblocks; 2073 count = mp->m_sb.sb_fdblocks;
2041 resid = do_div(count, weight); 2074 resid = do_div(count, weight);
2042 if (count < XFS_ICSB_FDBLK_CNTR_REENABLE(mp)) 2075 if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
2043 goto out; 2076 goto out;
2044 break; 2077 break;
2045 default: 2078 default:
@@ -2054,32 +2087,39 @@ out:
2054 XFS_SB_UNLOCK(mp, s); 2087 XFS_SB_UNLOCK(mp, s);
2055} 2088}
2056 2089
2057STATIC int 2090int
2058xfs_icsb_modify_counters_int( 2091xfs_icsb_modify_counters(
2059 xfs_mount_t *mp, 2092 xfs_mount_t *mp,
2060 xfs_sb_field_t field, 2093 xfs_sb_field_t field,
2061 int delta, 2094 int delta,
2062 int rsvd, 2095 int rsvd)
2063 int flags)
2064{ 2096{
2065 xfs_icsb_cnts_t *icsbp; 2097 xfs_icsb_cnts_t *icsbp;
2066 long long lcounter; /* long counter for 64 bit fields */ 2098 long long lcounter; /* long counter for 64 bit fields */
2067 int cpu, s, locked = 0; 2099 int cpu, ret = 0, s;
2068 int ret = 0, balance_done = 0;
2069 2100
2101 might_sleep();
2070again: 2102again:
2071 cpu = get_cpu(); 2103 cpu = get_cpu();
2072 icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu), 2104 icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu);
2073 xfs_icsb_lock_cntr(icsbp); 2105
2106 /*
2107 * if the counter is disabled, go to slow path
2108 */
2074 if (unlikely(xfs_icsb_counter_disabled(mp, field))) 2109 if (unlikely(xfs_icsb_counter_disabled(mp, field)))
2075 goto slow_path; 2110 goto slow_path;
2111 xfs_icsb_lock_cntr(icsbp);
2112 if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
2113 xfs_icsb_unlock_cntr(icsbp);
2114 goto slow_path;
2115 }
2076 2116
2077 switch (field) { 2117 switch (field) {
2078 case XFS_SBS_ICOUNT: 2118 case XFS_SBS_ICOUNT:
2079 lcounter = icsbp->icsb_icount; 2119 lcounter = icsbp->icsb_icount;
2080 lcounter += delta; 2120 lcounter += delta;
2081 if (unlikely(lcounter < 0)) 2121 if (unlikely(lcounter < 0))
2082 goto slow_path; 2122 goto balance_counter;
2083 icsbp->icsb_icount = lcounter; 2123 icsbp->icsb_icount = lcounter;
2084 break; 2124 break;
2085 2125
@@ -2087,7 +2127,7 @@ again:
2087 lcounter = icsbp->icsb_ifree; 2127 lcounter = icsbp->icsb_ifree;
2088 lcounter += delta; 2128 lcounter += delta;
2089 if (unlikely(lcounter < 0)) 2129 if (unlikely(lcounter < 0))
2090 goto slow_path; 2130 goto balance_counter;
2091 icsbp->icsb_ifree = lcounter; 2131 icsbp->icsb_ifree = lcounter;
2092 break; 2132 break;
2093 2133
@@ -2097,7 +2137,7 @@ again:
2097 lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 2137 lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
2098 lcounter += delta; 2138 lcounter += delta;
2099 if (unlikely(lcounter < 0)) 2139 if (unlikely(lcounter < 0))
2100 goto slow_path; 2140 goto balance_counter;
2101 icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); 2141 icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
2102 break; 2142 break;
2103 default: 2143 default:
@@ -2106,72 +2146,78 @@ again:
2106 } 2146 }
2107 xfs_icsb_unlock_cntr(icsbp); 2147 xfs_icsb_unlock_cntr(icsbp);
2108 put_cpu(); 2148 put_cpu();
2109 if (locked)
2110 XFS_SB_UNLOCK(mp, s);
2111 return 0; 2149 return 0;
2112 2150
2113 /*
2114 * The slow path needs to be run with the SBLOCK
2115 * held so that we prevent other threads from
2116 * attempting to run this path at the same time.
2117 * this provides exclusion for the balancing code,
2118 * and exclusive fallback if the balance does not
2119 * provide enough resources to continue in an unlocked
2120 * manner.
2121 */
2122slow_path: 2151slow_path:
2123 xfs_icsb_unlock_cntr(icsbp);
2124 put_cpu(); 2152 put_cpu();
2125 2153
2126 /* need to hold superblock incase we need 2154 /*
2127 * to disable a counter */ 2155 * serialise with a mutex so we don't burn lots of cpu on
2128 if (!(flags & XFS_ICSB_SB_LOCKED)) { 2156 * the superblock lock. We still need to hold the superblock
2129 s = XFS_SB_LOCK(mp); 2157 * lock, however, when we modify the global structures.
2130 locked = 1; 2158 */
2131 flags |= XFS_ICSB_SB_LOCKED; 2159 mutex_lock(&mp->m_icsb_mutex);
2132 } 2160
2133 if (!balance_done) { 2161 /*
2134 xfs_icsb_balance_counter(mp, field, flags); 2162 * Now running atomically.
2135 balance_done = 1; 2163 *
2164 * If the counter is enabled, someone has beaten us to rebalancing.
2165 * Drop the lock and try again in the fast path....
2166 */
2167 if (!(xfs_icsb_counter_disabled(mp, field))) {
2168 mutex_unlock(&mp->m_icsb_mutex);
2136 goto again; 2169 goto again;
2137 } else {
2138 /*
2139 * we might not have enough on this local
2140 * cpu to allocate for a bulk request.
2141 * We need to drain this field from all CPUs
2142 * and disable the counter fastpath
2143 */
2144 xfs_icsb_disable_counter(mp, field);
2145 } 2170 }
2146 2171
2172 /*
2173 * The counter is currently disabled. Because we are
2174 * running atomically here, we know a rebalance cannot
2175 * be in progress. Hence we can go straight to operating
2176 * on the global superblock. We do not call xfs_mod_incore_sb()
2177 * here even though we need to get the SB_LOCK. Doing so
2178 * will cause us to re-enter this function and deadlock.
2179 * Hence we get the SB_LOCK ourselves and then call
2180 * xfs_mod_incore_sb_unlocked() as the unlocked path operates
2181 * directly on the global counters.
2182 */
2183 s = XFS_SB_LOCK(mp);
2147 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 2184 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
2185 XFS_SB_UNLOCK(mp, s);
2148 2186
2149 if (locked) 2187 /*
2150 XFS_SB_UNLOCK(mp, s); 2188 * Now that we've modified the global superblock, we
2189 * may be able to re-enable the distributed counters
2190 * (e.g. lots of space just got freed). After that
2191 * we are done.
2192 */
2193 if (ret != ENOSPC)
2194 xfs_icsb_balance_counter(mp, field, 0, 0);
2195 mutex_unlock(&mp->m_icsb_mutex);
2151 return ret; 2196 return ret;
2152}
2153 2197
2154STATIC int 2198balance_counter:
2155xfs_icsb_modify_counters( 2199 xfs_icsb_unlock_cntr(icsbp);
2156 xfs_mount_t *mp, 2200 put_cpu();
2157 xfs_sb_field_t field,
2158 int delta,
2159 int rsvd)
2160{
2161 return xfs_icsb_modify_counters_int(mp, field, delta, rsvd, 0);
2162}
2163 2201
2164/* 2202 /*
2165 * Called when superblock is already locked 2203 * We may have multiple threads here if multiple per-cpu
2166 */ 2204 * counters run dry at the same time. This will mean we can
2167STATIC int 2205 * do more balances than strictly necessary but it is not
2168xfs_icsb_modify_counters_locked( 2206 * the common slowpath case.
2169 xfs_mount_t *mp, 2207 */
2170 xfs_sb_field_t field, 2208 mutex_lock(&mp->m_icsb_mutex);
2171 int delta, 2209
2172 int rsvd) 2210 /*
2173{ 2211 * running atomically.
2174 return xfs_icsb_modify_counters_int(mp, field, delta, 2212 *
2175 rsvd, XFS_ICSB_SB_LOCKED); 2213 * This will leave the counter in the correct state for future
2214 * accesses. After the rebalance, we simply try again and our retry
2215 * will either succeed through the fast path or slow path without
2216 * another balance operation being required.
2217 */
2218 xfs_icsb_balance_counter(mp, field, 0, delta);
2219 mutex_unlock(&mp->m_icsb_mutex);
2220 goto again;
2176} 2221}
2222
2177#endif 2223#endif