aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_mount.c
diff options
context:
space:
mode:
authorDave Chinner <david@fromorbit.com>2015-02-23 05:19:28 -0500
committerDave Chinner <david@fromorbit.com>2015-02-23 05:19:28 -0500
commit501ab32387533924b211cacff36d19296414ec0b (patch)
tree47f131ce9bc31ae20949706a18d351b68944e581 /fs/xfs/xfs_mount.c
parentc517d838eb7d07bbe9507871fab3931deccff539 (diff)
xfs: use generic percpu counters for inode counter
XFS has hand-rolled per-cpu counters for the superblock since before there was any generic implementation. There are some warts around the use of them for the inode counter as the hand rolled counter is designed to be accurate at zero, but has no specific accurracy at any other value. This design causes problems for the maximum inode count threshold enforcement, as there is no trigger that balances the counters as they get close tothe maximum threshold. Instead of designing new triggers for balancing, just replace the handrolled per-cpu counter with a generic counter. This enables us to update the counter through the normal superblock modification funtions, but rather than do that we add a xfs_mod_icount() helper function (from Christoph Hellwig) and keep the percpu counter outside the superblock in the struct xfs_mount. This means we still need to initialise the per-cpu counter specifically when we read the superblock, and vice versa when we log/write it, but it does mean that we don't need to change any other code. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs/xfs_mount.c')
-rw-r--r--fs/xfs/xfs_mount.c76
1 files changed, 35 insertions, 41 deletions
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 4fa80e63eea2..702ea6a7e648 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1099,6 +1099,21 @@ xfs_log_sbcount(xfs_mount_t *mp)
1099 return xfs_sync_sb(mp, true); 1099 return xfs_sync_sb(mp, true);
1100} 1100}
1101 1101
1102int
1103xfs_mod_icount(
1104 struct xfs_mount *mp,
1105 int64_t delta)
1106{
1107 /* deltas are +/-64, hence the large batch size of 128. */
1108 __percpu_counter_add(&mp->m_icount, delta, 128);
1109 if (percpu_counter_compare(&mp->m_icount, 0) < 0) {
1110 ASSERT(0);
1111 percpu_counter_add(&mp->m_icount, -delta);
1112 return -EINVAL;
1113 }
1114 return 0;
1115}
1116
1102/* 1117/*
1103 * xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply 1118 * xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply
1104 * a delta to a specified field in the in-core superblock. Simply 1119 * a delta to a specified field in the in-core superblock. Simply
@@ -1127,14 +1142,8 @@ xfs_mod_incore_sb_unlocked(
1127 */ 1142 */
1128 switch (field) { 1143 switch (field) {
1129 case XFS_SBS_ICOUNT: 1144 case XFS_SBS_ICOUNT:
1130 lcounter = (long long)mp->m_sb.sb_icount; 1145 ASSERT(0);
1131 lcounter += delta; 1146 return -ENOSPC;
1132 if (lcounter < 0) {
1133 ASSERT(0);
1134 return -EINVAL;
1135 }
1136 mp->m_sb.sb_icount = lcounter;
1137 return 0;
1138 case XFS_SBS_IFREE: 1147 case XFS_SBS_IFREE:
1139 lcounter = (long long)mp->m_sb.sb_ifree; 1148 lcounter = (long long)mp->m_sb.sb_ifree;
1140 lcounter += delta; 1149 lcounter += delta;
@@ -1288,8 +1297,9 @@ xfs_mod_incore_sb(
1288 int status; 1297 int status;
1289 1298
1290#ifdef HAVE_PERCPU_SB 1299#ifdef HAVE_PERCPU_SB
1291 ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS); 1300 ASSERT(field < XFS_SBS_IFREE || field > XFS_SBS_FDBLOCKS);
1292#endif 1301#endif
1302
1293 spin_lock(&mp->m_sb_lock); 1303 spin_lock(&mp->m_sb_lock);
1294 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 1304 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1295 spin_unlock(&mp->m_sb_lock); 1305 spin_unlock(&mp->m_sb_lock);
@@ -1492,7 +1502,6 @@ xfs_icsb_cpu_notify(
1492 case CPU_ONLINE: 1502 case CPU_ONLINE:
1493 case CPU_ONLINE_FROZEN: 1503 case CPU_ONLINE_FROZEN:
1494 xfs_icsb_lock(mp); 1504 xfs_icsb_lock(mp);
1495 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
1496 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); 1505 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
1497 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); 1506 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
1498 xfs_icsb_unlock(mp); 1507 xfs_icsb_unlock(mp);
@@ -1504,17 +1513,14 @@ xfs_icsb_cpu_notify(
1504 * re-enable the counters. */ 1513 * re-enable the counters. */
1505 xfs_icsb_lock(mp); 1514 xfs_icsb_lock(mp);
1506 spin_lock(&mp->m_sb_lock); 1515 spin_lock(&mp->m_sb_lock);
1507 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
1508 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); 1516 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
1509 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); 1517 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
1510 1518
1511 mp->m_sb.sb_icount += cntp->icsb_icount;
1512 mp->m_sb.sb_ifree += cntp->icsb_ifree; 1519 mp->m_sb.sb_ifree += cntp->icsb_ifree;
1513 mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks; 1520 mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
1514 1521
1515 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1522 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1516 1523
1517 xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
1518 xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0); 1524 xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
1519 xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0); 1525 xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
1520 spin_unlock(&mp->m_sb_lock); 1526 spin_unlock(&mp->m_sb_lock);
@@ -1531,11 +1537,18 @@ xfs_icsb_init_counters(
1531 xfs_mount_t *mp) 1537 xfs_mount_t *mp)
1532{ 1538{
1533 xfs_icsb_cnts_t *cntp; 1539 xfs_icsb_cnts_t *cntp;
1540 int error;
1534 int i; 1541 int i;
1535 1542
1543 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1544 if (error)
1545 return error;
1546
1536 mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t); 1547 mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
1537 if (mp->m_sb_cnts == NULL) 1548 if (!mp->m_sb_cnts) {
1549 percpu_counter_destroy(&mp->m_icount);
1538 return -ENOMEM; 1550 return -ENOMEM;
1551 }
1539 1552
1540 for_each_online_cpu(i) { 1553 for_each_online_cpu(i) {
1541 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1554 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
@@ -1563,13 +1576,14 @@ void
1563xfs_icsb_reinit_counters( 1576xfs_icsb_reinit_counters(
1564 xfs_mount_t *mp) 1577 xfs_mount_t *mp)
1565{ 1578{
1579 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1580
1566 xfs_icsb_lock(mp); 1581 xfs_icsb_lock(mp);
1567 /* 1582 /*
1568 * start with all counters disabled so that the 1583 * start with all counters disabled so that the
1569 * initial balance kicks us off correctly 1584 * initial balance kicks us off correctly
1570 */ 1585 */
1571 mp->m_icsb_counters = -1; 1586 mp->m_icsb_counters = -1;
1572 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
1573 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); 1587 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
1574 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); 1588 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
1575 xfs_icsb_unlock(mp); 1589 xfs_icsb_unlock(mp);
@@ -1583,6 +1597,9 @@ xfs_icsb_destroy_counters(
1583 unregister_hotcpu_notifier(&mp->m_icsb_notifier); 1597 unregister_hotcpu_notifier(&mp->m_icsb_notifier);
1584 free_percpu(mp->m_sb_cnts); 1598 free_percpu(mp->m_sb_cnts);
1585 } 1599 }
1600
1601 percpu_counter_destroy(&mp->m_icount);
1602
1586 mutex_destroy(&mp->m_icsb_mutex); 1603 mutex_destroy(&mp->m_icsb_mutex);
1587} 1604}
1588 1605
@@ -1645,7 +1662,6 @@ xfs_icsb_count(
1645 1662
1646 for_each_online_cpu(i) { 1663 for_each_online_cpu(i) {
1647 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1664 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1648 cnt->icsb_icount += cntp->icsb_icount;
1649 cnt->icsb_ifree += cntp->icsb_ifree; 1665 cnt->icsb_ifree += cntp->icsb_ifree;
1650 cnt->icsb_fdblocks += cntp->icsb_fdblocks; 1666 cnt->icsb_fdblocks += cntp->icsb_fdblocks;
1651 } 1667 }
@@ -1659,7 +1675,7 @@ xfs_icsb_counter_disabled(
1659 xfs_mount_t *mp, 1675 xfs_mount_t *mp,
1660 xfs_sb_field_t field) 1676 xfs_sb_field_t field)
1661{ 1677{
1662 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 1678 ASSERT((field >= XFS_SBS_IFREE) && (field <= XFS_SBS_FDBLOCKS));
1663 return test_bit(field, &mp->m_icsb_counters); 1679 return test_bit(field, &mp->m_icsb_counters);
1664} 1680}
1665 1681
@@ -1670,7 +1686,7 @@ xfs_icsb_disable_counter(
1670{ 1686{
1671 xfs_icsb_cnts_t cnt; 1687 xfs_icsb_cnts_t cnt;
1672 1688
1673 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 1689 ASSERT((field >= XFS_SBS_IFREE) && (field <= XFS_SBS_FDBLOCKS));
1674 1690
1675 /* 1691 /*
1676 * If we are already disabled, then there is nothing to do 1692 * If we are already disabled, then there is nothing to do
@@ -1689,9 +1705,6 @@ xfs_icsb_disable_counter(
1689 1705
1690 xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT); 1706 xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
1691 switch(field) { 1707 switch(field) {
1692 case XFS_SBS_ICOUNT:
1693 mp->m_sb.sb_icount = cnt.icsb_icount;
1694 break;
1695 case XFS_SBS_IFREE: 1708 case XFS_SBS_IFREE:
1696 mp->m_sb.sb_ifree = cnt.icsb_ifree; 1709 mp->m_sb.sb_ifree = cnt.icsb_ifree;
1697 break; 1710 break;
@@ -1716,15 +1729,12 @@ xfs_icsb_enable_counter(
1716 xfs_icsb_cnts_t *cntp; 1729 xfs_icsb_cnts_t *cntp;
1717 int i; 1730 int i;
1718 1731
1719 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 1732 ASSERT((field >= XFS_SBS_IFREE) && (field <= XFS_SBS_FDBLOCKS));
1720 1733
1721 xfs_icsb_lock_all_counters(mp); 1734 xfs_icsb_lock_all_counters(mp);
1722 for_each_online_cpu(i) { 1735 for_each_online_cpu(i) {
1723 cntp = per_cpu_ptr(mp->m_sb_cnts, i); 1736 cntp = per_cpu_ptr(mp->m_sb_cnts, i);
1724 switch (field) { 1737 switch (field) {
1725 case XFS_SBS_ICOUNT:
1726 cntp->icsb_icount = count + resid;
1727 break;
1728 case XFS_SBS_IFREE: 1738 case XFS_SBS_IFREE:
1729 cntp->icsb_ifree = count + resid; 1739 cntp->icsb_ifree = count + resid;
1730 break; 1740 break;
@@ -1750,8 +1760,6 @@ xfs_icsb_sync_counters_locked(
1750 1760
1751 xfs_icsb_count(mp, &cnt, flags); 1761 xfs_icsb_count(mp, &cnt, flags);
1752 1762
1753 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
1754 mp->m_sb.sb_icount = cnt.icsb_icount;
1755 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) 1763 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
1756 mp->m_sb.sb_ifree = cnt.icsb_ifree; 1764 mp->m_sb.sb_ifree = cnt.icsb_ifree;
1757 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) 1765 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
@@ -1805,12 +1813,6 @@ xfs_icsb_balance_counter_locked(
1805 1813
1806 /* update counters - first CPU gets residual*/ 1814 /* update counters - first CPU gets residual*/
1807 switch (field) { 1815 switch (field) {
1808 case XFS_SBS_ICOUNT:
1809 count = mp->m_sb.sb_icount;
1810 resid = do_div(count, weight);
1811 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
1812 return;
1813 break;
1814 case XFS_SBS_IFREE: 1816 case XFS_SBS_IFREE:
1815 count = mp->m_sb.sb_ifree; 1817 count = mp->m_sb.sb_ifree;
1816 resid = do_div(count, weight); 1818 resid = do_div(count, weight);
@@ -1871,14 +1873,6 @@ again:
1871 } 1873 }
1872 1874
1873 switch (field) { 1875 switch (field) {
1874 case XFS_SBS_ICOUNT:
1875 lcounter = icsbp->icsb_icount;
1876 lcounter += delta;
1877 if (unlikely(lcounter < 0))
1878 goto balance_counter;
1879 icsbp->icsb_icount = lcounter;
1880 break;
1881
1882 case XFS_SBS_IFREE: 1876 case XFS_SBS_IFREE:
1883 lcounter = icsbp->icsb_ifree; 1877 lcounter = icsbp->icsb_ifree;
1884 lcounter += delta; 1878 lcounter += delta;