aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_mount.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_mount.c')
-rw-r--r--fs/xfs/xfs_mount.c288
1 files changed, 172 insertions, 116 deletions
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 9dfae18d995f..3bed0cf0d8af 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -52,21 +52,19 @@ STATIC void xfs_unmountfs_wait(xfs_mount_t *);
52 52
53#ifdef HAVE_PERCPU_SB 53#ifdef HAVE_PERCPU_SB
54STATIC void xfs_icsb_destroy_counters(xfs_mount_t *); 54STATIC void xfs_icsb_destroy_counters(xfs_mount_t *);
55STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, int); 55STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
56 int, int);
56STATIC void xfs_icsb_sync_counters(xfs_mount_t *); 57STATIC void xfs_icsb_sync_counters(xfs_mount_t *);
57STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t, 58STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
58 int, int); 59 int64_t, int);
59STATIC int xfs_icsb_modify_counters_locked(xfs_mount_t *, xfs_sb_field_t,
60 int, int);
61STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); 60STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
62 61
63#else 62#else
64 63
65#define xfs_icsb_destroy_counters(mp) do { } while (0) 64#define xfs_icsb_destroy_counters(mp) do { } while (0)
66#define xfs_icsb_balance_counter(mp, a, b) do { } while (0) 65#define xfs_icsb_balance_counter(mp, a, b, c) do { } while (0)
67#define xfs_icsb_sync_counters(mp) do { } while (0) 66#define xfs_icsb_sync_counters(mp) do { } while (0)
68#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0) 67#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
69#define xfs_icsb_modify_counters_locked(mp, a, b, c) do { } while (0)
70 68
71#endif 69#endif
72 70
@@ -545,9 +543,8 @@ xfs_readsb(xfs_mount_t *mp, int flags)
545 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 543 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
546 } 544 }
547 545
548 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); 546 /* Initialize per-cpu counters */
549 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); 547 xfs_icsb_reinit_counters(mp);
550 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
551 548
552 mp->m_sb_bp = bp; 549 mp->m_sb_bp = bp;
553 xfs_buf_relse(bp); 550 xfs_buf_relse(bp);
@@ -1254,8 +1251,11 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
1254 * The SB_LOCK must be held when this routine is called. 1251 * The SB_LOCK must be held when this routine is called.
1255 */ 1252 */
1256int 1253int
1257xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field, 1254xfs_mod_incore_sb_unlocked(
1258 int delta, int rsvd) 1255 xfs_mount_t *mp,
1256 xfs_sb_field_t field,
1257 int64_t delta,
1258 int rsvd)
1259{ 1259{
1260 int scounter; /* short counter for 32 bit fields */ 1260 int scounter; /* short counter for 32 bit fields */
1261 long long lcounter; /* long counter for 64 bit fields */ 1261 long long lcounter; /* long counter for 64 bit fields */
@@ -1287,7 +1287,6 @@ xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
1287 mp->m_sb.sb_ifree = lcounter; 1287 mp->m_sb.sb_ifree = lcounter;
1288 return 0; 1288 return 0;
1289 case XFS_SBS_FDBLOCKS: 1289 case XFS_SBS_FDBLOCKS:
1290
1291 lcounter = (long long) 1290 lcounter = (long long)
1292 mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 1291 mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1293 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); 1292 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
@@ -1418,7 +1417,11 @@ xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
1418 * routine to do the work. 1417 * routine to do the work.
1419 */ 1418 */
1420int 1419int
1421xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd) 1420xfs_mod_incore_sb(
1421 xfs_mount_t *mp,
1422 xfs_sb_field_t field,
1423 int64_t delta,
1424 int rsvd)
1422{ 1425{
1423 unsigned long s; 1426 unsigned long s;
1424 int status; 1427 int status;
@@ -1485,9 +1488,11 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1485 case XFS_SBS_IFREE: 1488 case XFS_SBS_IFREE:
1486 case XFS_SBS_FDBLOCKS: 1489 case XFS_SBS_FDBLOCKS:
1487 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { 1490 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1488 status = xfs_icsb_modify_counters_locked(mp, 1491 XFS_SB_UNLOCK(mp, s);
1492 status = xfs_icsb_modify_counters(mp,
1489 msbp->msb_field, 1493 msbp->msb_field,
1490 msbp->msb_delta, rsvd); 1494 msbp->msb_delta, rsvd);
1495 s = XFS_SB_LOCK(mp);
1491 break; 1496 break;
1492 } 1497 }
1493 /* FALLTHROUGH */ 1498 /* FALLTHROUGH */
@@ -1521,11 +1526,12 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1521 case XFS_SBS_IFREE: 1526 case XFS_SBS_IFREE:
1522 case XFS_SBS_FDBLOCKS: 1527 case XFS_SBS_FDBLOCKS:
1523 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { 1528 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1524 status = 1529 XFS_SB_UNLOCK(mp, s);
1525 xfs_icsb_modify_counters_locked(mp, 1530 status = xfs_icsb_modify_counters(mp,
1526 msbp->msb_field, 1531 msbp->msb_field,
1527 -(msbp->msb_delta), 1532 -(msbp->msb_delta),
1528 rsvd); 1533 rsvd);
1534 s = XFS_SB_LOCK(mp);
1529 break; 1535 break;
1530 } 1536 }
1531 /* FALLTHROUGH */ 1537 /* FALLTHROUGH */
@@ -1733,14 +1739,17 @@ xfs_icsb_cpu_notify(
1733 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1739 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1734 break; 1740 break;
1735 case CPU_ONLINE: 1741 case CPU_ONLINE:
1736 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); 1742 xfs_icsb_lock(mp);
1737 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); 1743 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
1738 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); 1744 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
1745 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
1746 xfs_icsb_unlock(mp);
1739 break; 1747 break;
1740 case CPU_DEAD: 1748 case CPU_DEAD:
1741 /* Disable all the counters, then fold the dead cpu's 1749 /* Disable all the counters, then fold the dead cpu's
1742 * count into the total on the global superblock and 1750 * count into the total on the global superblock and
1743 * re-enable the counters. */ 1751 * re-enable the counters. */
1752 xfs_icsb_lock(mp);
1744 s = XFS_SB_LOCK(mp); 1753 s = XFS_SB_LOCK(mp);
1745 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); 1754 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
1746 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); 1755 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
@@ -1752,10 +1761,14 @@ xfs_icsb_cpu_notify(
1752 1761
1753 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1762 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1754 1763
1755 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, XFS_ICSB_SB_LOCKED); 1764 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT,
1756 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, XFS_ICSB_SB_LOCKED); 1765 XFS_ICSB_SB_LOCKED, 0);
1757 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, XFS_ICSB_SB_LOCKED); 1766 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE,
1767 XFS_ICSB_SB_LOCKED, 0);
1768 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
1769 XFS_ICSB_SB_LOCKED, 0);
1758 XFS_SB_UNLOCK(mp, s); 1770 XFS_SB_UNLOCK(mp, s);
1771 xfs_icsb_unlock(mp);
1759 break; 1772 break;
1760 } 1773 }
1761 1774
@@ -1784,6 +1797,9 @@ xfs_icsb_init_counters(
1784 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1797 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1785 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1798 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1786 } 1799 }
1800
1801 mutex_init(&mp->m_icsb_mutex);
1802
1787 /* 1803 /*
1788 * start with all counters disabled so that the 1804 * start with all counters disabled so that the
1789 * initial balance kicks us off correctly 1805 * initial balance kicks us off correctly
@@ -1792,6 +1808,22 @@ xfs_icsb_init_counters(
1792 return 0; 1808 return 0;
1793} 1809}
1794 1810
1811void
1812xfs_icsb_reinit_counters(
1813 xfs_mount_t *mp)
1814{
1815 xfs_icsb_lock(mp);
1816 /*
1817 * start with all counters disabled so that the
1818 * initial balance kicks us off correctly
1819 */
1820 mp->m_icsb_counters = -1;
1821 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
1822 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
1823 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
1824 xfs_icsb_unlock(mp);
1825}
1826
1795STATIC void 1827STATIC void
1796xfs_icsb_destroy_counters( 1828xfs_icsb_destroy_counters(
1797 xfs_mount_t *mp) 1829 xfs_mount_t *mp)
@@ -1800,9 +1832,10 @@ xfs_icsb_destroy_counters(
1800 unregister_hotcpu_notifier(&mp->m_icsb_notifier); 1832 unregister_hotcpu_notifier(&mp->m_icsb_notifier);
1801 free_percpu(mp->m_sb_cnts); 1833 free_percpu(mp->m_sb_cnts);
1802 } 1834 }
1835 mutex_destroy(&mp->m_icsb_mutex);
1803} 1836}
1804 1837
1805STATIC inline void 1838STATIC_INLINE void
1806xfs_icsb_lock_cntr( 1839xfs_icsb_lock_cntr(
1807 xfs_icsb_cnts_t *icsbp) 1840 xfs_icsb_cnts_t *icsbp)
1808{ 1841{
@@ -1811,7 +1844,7 @@ xfs_icsb_lock_cntr(
1811 } 1844 }
1812} 1845}
1813 1846
1814STATIC inline void 1847STATIC_INLINE void
1815xfs_icsb_unlock_cntr( 1848xfs_icsb_unlock_cntr(
1816 xfs_icsb_cnts_t *icsbp) 1849 xfs_icsb_cnts_t *icsbp)
1817{ 1850{
@@ -1819,7 +1852,7 @@ xfs_icsb_unlock_cntr(
1819} 1852}
1820 1853
1821 1854
1822STATIC inline void 1855STATIC_INLINE void
1823xfs_icsb_lock_all_counters( 1856xfs_icsb_lock_all_counters(
1824 xfs_mount_t *mp) 1857 xfs_mount_t *mp)
1825{ 1858{
@@ -1832,7 +1865,7 @@ xfs_icsb_lock_all_counters(
1832 } 1865 }
1833} 1866}
1834 1867
1835STATIC inline void 1868STATIC_INLINE void
1836xfs_icsb_unlock_all_counters( 1869xfs_icsb_unlock_all_counters(
1837 xfs_mount_t *mp) 1870 xfs_mount_t *mp)
1838{ 1871{
@@ -1888,6 +1921,17 @@ xfs_icsb_disable_counter(
1888 1921
1889 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 1922 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1890 1923
1924 /*
1925 * If we are already disabled, then there is nothing to do
1926 * here. We check before locking all the counters to avoid
1927 * the expensive lock operation when being called in the
1928 * slow path and the counter is already disabled. This is
1929 * safe because the only time we set or clear this state is under
1930 * the m_icsb_mutex.
1931 */
1932 if (xfs_icsb_counter_disabled(mp, field))
1933 return 0;
1934
1891 xfs_icsb_lock_all_counters(mp); 1935 xfs_icsb_lock_all_counters(mp);
1892 if (!test_and_set_bit(field, &mp->m_icsb_counters)) { 1936 if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
1893 /* drain back to superblock */ 1937 /* drain back to superblock */
@@ -1948,8 +1992,8 @@ xfs_icsb_enable_counter(
1948 xfs_icsb_unlock_all_counters(mp); 1992 xfs_icsb_unlock_all_counters(mp);
1949} 1993}
1950 1994
1951STATIC void 1995void
1952xfs_icsb_sync_counters_int( 1996xfs_icsb_sync_counters_flags(
1953 xfs_mount_t *mp, 1997 xfs_mount_t *mp,
1954 int flags) 1998 int flags)
1955{ 1999{
@@ -1981,40 +2025,39 @@ STATIC void
1981xfs_icsb_sync_counters( 2025xfs_icsb_sync_counters(
1982 xfs_mount_t *mp) 2026 xfs_mount_t *mp)
1983{ 2027{
1984 xfs_icsb_sync_counters_int(mp, 0); 2028 xfs_icsb_sync_counters_flags(mp, 0);
1985}
1986
1987/*
1988 * lazy addition used for things like df, background sb syncs, etc
1989 */
1990void
1991xfs_icsb_sync_counters_lazy(
1992 xfs_mount_t *mp)
1993{
1994 xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT);
1995} 2029}
1996 2030
1997/* 2031/*
1998 * Balance and enable/disable counters as necessary. 2032 * Balance and enable/disable counters as necessary.
1999 * 2033 *
2000 * Thresholds for re-enabling counters are somewhat magic. 2034 * Thresholds for re-enabling counters are somewhat magic. inode counts are
2001 * inode counts are chosen to be the same number as single 2035 * chosen to be the same number as single on disk allocation chunk per CPU, and
2002 * on disk allocation chunk per CPU, and free blocks is 2036 * free blocks is something far enough zero that we aren't going thrash when we
2003 * something far enough zero that we aren't going thrash 2037 * get near ENOSPC. We also need to supply a minimum we require per cpu to
2004 * when we get near ENOSPC. 2038 * prevent looping endlessly when xfs_alloc_space asks for more than will
2039 * be distributed to a single CPU but each CPU has enough blocks to be
2040 * reenabled.
2041 *
2042 * Note that we can be called when counters are already disabled.
2043 * xfs_icsb_disable_counter() optimises the counter locking in this case to
2044 * prevent locking every per-cpu counter needlessly.
2005 */ 2045 */
2006#define XFS_ICSB_INO_CNTR_REENABLE 64 2046
2047#define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
2007#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \ 2048#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
2008 (512 + XFS_ALLOC_SET_ASIDE(mp)) 2049 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
2009STATIC void 2050STATIC void
2010xfs_icsb_balance_counter( 2051xfs_icsb_balance_counter(
2011 xfs_mount_t *mp, 2052 xfs_mount_t *mp,
2012 xfs_sb_field_t field, 2053 xfs_sb_field_t field,
2013 int flags) 2054 int flags,
2055 int min_per_cpu)
2014{ 2056{
2015 uint64_t count, resid; 2057 uint64_t count, resid;
2016 int weight = num_online_cpus(); 2058 int weight = num_online_cpus();
2017 int s; 2059 int s;
2060 uint64_t min = (uint64_t)min_per_cpu;
2018 2061
2019 if (!(flags & XFS_ICSB_SB_LOCKED)) 2062 if (!(flags & XFS_ICSB_SB_LOCKED))
2020 s = XFS_SB_LOCK(mp); 2063 s = XFS_SB_LOCK(mp);
@@ -2027,19 +2070,19 @@ xfs_icsb_balance_counter(
2027 case XFS_SBS_ICOUNT: 2070 case XFS_SBS_ICOUNT:
2028 count = mp->m_sb.sb_icount; 2071 count = mp->m_sb.sb_icount;
2029 resid = do_div(count, weight); 2072 resid = do_div(count, weight);
2030 if (count < XFS_ICSB_INO_CNTR_REENABLE) 2073 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2031 goto out; 2074 goto out;
2032 break; 2075 break;
2033 case XFS_SBS_IFREE: 2076 case XFS_SBS_IFREE:
2034 count = mp->m_sb.sb_ifree; 2077 count = mp->m_sb.sb_ifree;
2035 resid = do_div(count, weight); 2078 resid = do_div(count, weight);
2036 if (count < XFS_ICSB_INO_CNTR_REENABLE) 2079 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2037 goto out; 2080 goto out;
2038 break; 2081 break;
2039 case XFS_SBS_FDBLOCKS: 2082 case XFS_SBS_FDBLOCKS:
2040 count = mp->m_sb.sb_fdblocks; 2083 count = mp->m_sb.sb_fdblocks;
2041 resid = do_div(count, weight); 2084 resid = do_div(count, weight);
2042 if (count < XFS_ICSB_FDBLK_CNTR_REENABLE(mp)) 2085 if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
2043 goto out; 2086 goto out;
2044 break; 2087 break;
2045 default: 2088 default:
@@ -2054,32 +2097,39 @@ out:
2054 XFS_SB_UNLOCK(mp, s); 2097 XFS_SB_UNLOCK(mp, s);
2055} 2098}
2056 2099
2057STATIC int 2100int
2058xfs_icsb_modify_counters_int( 2101xfs_icsb_modify_counters(
2059 xfs_mount_t *mp, 2102 xfs_mount_t *mp,
2060 xfs_sb_field_t field, 2103 xfs_sb_field_t field,
2061 int delta, 2104 int64_t delta,
2062 int rsvd, 2105 int rsvd)
2063 int flags)
2064{ 2106{
2065 xfs_icsb_cnts_t *icsbp; 2107 xfs_icsb_cnts_t *icsbp;
2066 long long lcounter; /* long counter for 64 bit fields */ 2108 long long lcounter; /* long counter for 64 bit fields */
2067 int cpu, s, locked = 0; 2109 int cpu, ret = 0, s;
2068 int ret = 0, balance_done = 0;
2069 2110
2111 might_sleep();
2070again: 2112again:
2071 cpu = get_cpu(); 2113 cpu = get_cpu();
2072 icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu), 2114 icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu);
2073 xfs_icsb_lock_cntr(icsbp); 2115
2116 /*
2117 * if the counter is disabled, go to slow path
2118 */
2074 if (unlikely(xfs_icsb_counter_disabled(mp, field))) 2119 if (unlikely(xfs_icsb_counter_disabled(mp, field)))
2075 goto slow_path; 2120 goto slow_path;
2121 xfs_icsb_lock_cntr(icsbp);
2122 if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
2123 xfs_icsb_unlock_cntr(icsbp);
2124 goto slow_path;
2125 }
2076 2126
2077 switch (field) { 2127 switch (field) {
2078 case XFS_SBS_ICOUNT: 2128 case XFS_SBS_ICOUNT:
2079 lcounter = icsbp->icsb_icount; 2129 lcounter = icsbp->icsb_icount;
2080 lcounter += delta; 2130 lcounter += delta;
2081 if (unlikely(lcounter < 0)) 2131 if (unlikely(lcounter < 0))
2082 goto slow_path; 2132 goto balance_counter;
2083 icsbp->icsb_icount = lcounter; 2133 icsbp->icsb_icount = lcounter;
2084 break; 2134 break;
2085 2135
@@ -2087,7 +2137,7 @@ again:
2087 lcounter = icsbp->icsb_ifree; 2137 lcounter = icsbp->icsb_ifree;
2088 lcounter += delta; 2138 lcounter += delta;
2089 if (unlikely(lcounter < 0)) 2139 if (unlikely(lcounter < 0))
2090 goto slow_path; 2140 goto balance_counter;
2091 icsbp->icsb_ifree = lcounter; 2141 icsbp->icsb_ifree = lcounter;
2092 break; 2142 break;
2093 2143
@@ -2097,7 +2147,7 @@ again:
2097 lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 2147 lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
2098 lcounter += delta; 2148 lcounter += delta;
2099 if (unlikely(lcounter < 0)) 2149 if (unlikely(lcounter < 0))
2100 goto slow_path; 2150 goto balance_counter;
2101 icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); 2151 icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
2102 break; 2152 break;
2103 default: 2153 default:
@@ -2106,72 +2156,78 @@ again:
2106 } 2156 }
2107 xfs_icsb_unlock_cntr(icsbp); 2157 xfs_icsb_unlock_cntr(icsbp);
2108 put_cpu(); 2158 put_cpu();
2109 if (locked)
2110 XFS_SB_UNLOCK(mp, s);
2111 return 0; 2159 return 0;
2112 2160
2113 /*
2114 * The slow path needs to be run with the SBLOCK
2115 * held so that we prevent other threads from
2116 * attempting to run this path at the same time.
2117 * this provides exclusion for the balancing code,
2118 * and exclusive fallback if the balance does not
2119 * provide enough resources to continue in an unlocked
2120 * manner.
2121 */
2122slow_path: 2161slow_path:
2123 xfs_icsb_unlock_cntr(icsbp);
2124 put_cpu(); 2162 put_cpu();
2125 2163
2126 /* need to hold superblock incase we need 2164 /*
2127 * to disable a counter */ 2165 * serialise with a mutex so we don't burn lots of cpu on
2128 if (!(flags & XFS_ICSB_SB_LOCKED)) { 2166 * the superblock lock. We still need to hold the superblock
2129 s = XFS_SB_LOCK(mp); 2167 * lock, however, when we modify the global structures.
2130 locked = 1; 2168 */
2131 flags |= XFS_ICSB_SB_LOCKED; 2169 xfs_icsb_lock(mp);
2132 } 2170
2133 if (!balance_done) { 2171 /*
2134 xfs_icsb_balance_counter(mp, field, flags); 2172 * Now running atomically.
2135 balance_done = 1; 2173 *
2174 * If the counter is enabled, someone has beaten us to rebalancing.
2175 * Drop the lock and try again in the fast path....
2176 */
2177 if (!(xfs_icsb_counter_disabled(mp, field))) {
2178 xfs_icsb_unlock(mp);
2136 goto again; 2179 goto again;
2137 } else {
2138 /*
2139 * we might not have enough on this local
2140 * cpu to allocate for a bulk request.
2141 * We need to drain this field from all CPUs
2142 * and disable the counter fastpath
2143 */
2144 xfs_icsb_disable_counter(mp, field);
2145 } 2180 }
2146 2181
2182 /*
2183 * The counter is currently disabled. Because we are
2184 * running atomically here, we know a rebalance cannot
2185 * be in progress. Hence we can go straight to operating
2186 * on the global superblock. We do not call xfs_mod_incore_sb()
2187 * here even though we need to get the SB_LOCK. Doing so
2188 * will cause us to re-enter this function and deadlock.
2189 * Hence we get the SB_LOCK ourselves and then call
2190 * xfs_mod_incore_sb_unlocked() as the unlocked path operates
2191 * directly on the global counters.
2192 */
2193 s = XFS_SB_LOCK(mp);
2147 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 2194 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
2195 XFS_SB_UNLOCK(mp, s);
2148 2196
2149 if (locked) 2197 /*
2150 XFS_SB_UNLOCK(mp, s); 2198 * Now that we've modified the global superblock, we
2199 * may be able to re-enable the distributed counters
2200 * (e.g. lots of space just got freed). After that
2201 * we are done.
2202 */
2203 if (ret != ENOSPC)
2204 xfs_icsb_balance_counter(mp, field, 0, 0);
2205 xfs_icsb_unlock(mp);
2151 return ret; 2206 return ret;
2152}
2153 2207
2154STATIC int 2208balance_counter:
2155xfs_icsb_modify_counters( 2209 xfs_icsb_unlock_cntr(icsbp);
2156 xfs_mount_t *mp, 2210 put_cpu();
2157 xfs_sb_field_t field,
2158 int delta,
2159 int rsvd)
2160{
2161 return xfs_icsb_modify_counters_int(mp, field, delta, rsvd, 0);
2162}
2163 2211
2164/* 2212 /*
2165 * Called when superblock is already locked 2213 * We may have multiple threads here if multiple per-cpu
2166 */ 2214 * counters run dry at the same time. This will mean we can
2167STATIC int 2215 * do more balances than strictly necessary but it is not
2168xfs_icsb_modify_counters_locked( 2216 * the common slowpath case.
2169 xfs_mount_t *mp, 2217 */
2170 xfs_sb_field_t field, 2218 xfs_icsb_lock(mp);
2171 int delta, 2219
2172 int rsvd) 2220 /*
2173{ 2221 * running atomically.
2174 return xfs_icsb_modify_counters_int(mp, field, delta, 2222 *
2175 rsvd, XFS_ICSB_SB_LOCKED); 2223 * This will leave the counter in the correct state for future
2224 * accesses. After the rebalance, we simply try again and our retry
2225 * will either succeed through the fast path or slow path without
2226 * another balance operation being required.
2227 */
2228 xfs_icsb_balance_counter(mp, field, 0, delta);
2229 xfs_icsb_unlock(mp);
2230 goto again;
2176} 2231}
2232
2177#endif 2233#endif