aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-02-01 08:57:20 -0500
committerBen Myers <bpm@sgi.com>2012-02-10 13:38:09 -0500
commit92b2e5b31dd2ad2c9273578c2289d17f417fe32d (patch)
tree312cdaf3479d943234d2a887a4395aa9626f6442
parent4177af3a8a6f119484c7903845c6693d7381c13e (diff)
xfs: use a normal shrinker for the dquot freelist
Stop reusing dquots from the freelist when allocating new ones directly, and implement a shrinker that actually follows the specifications for the interface. The shrinker implementation is still highly suboptimal at this point, but we can gradually work on it. This also fixes an bug in the previous lock ordering, where we would take the hash and dqlist locks inside of the freelist lock against the normal lock ordering. This is only solvable by introducing the dispose list, and thus not when using direct reclaim of unused dquots for new allocations. As a side-effect the quota upper bound and used to free ratio values in /proc/fs/xfs/xqm are set to 0 as these values don't make any sense in the new world order. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ben Myers <bpm@sgi.com> (cherry picked from commit 04da0c8196ac0b12fb6b84f4b7a51ad2fa56d869)
-rw-r--r--fs/xfs/kmem.h6
-rw-r--r--fs/xfs/xfs_dquot.c103
-rw-r--r--fs/xfs/xfs_qm.c291
-rw-r--r--fs/xfs/xfs_qm.h14
-rw-r--r--fs/xfs/xfs_qm_stats.c4
-rw-r--r--fs/xfs/xfs_trace.h5
6 files changed, 141 insertions, 282 deletions
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 292eff198030..ab7c53fe346e 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -110,10 +110,4 @@ kmem_zone_destroy(kmem_zone_t *zone)
110extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); 110extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
111extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); 111extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
112 112
113static inline int
114kmem_shake_allow(gfp_t gfp_mask)
115{
116 return ((gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS));
117}
118
119#endif /* __XFS_SUPPORT_KMEM_H__ */ 113#endif /* __XFS_SUPPORT_KMEM_H__ */
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index bf4fe8637f3d..6d7faa87b41c 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -63,82 +63,6 @@ int xfs_dqerror_mod = 33;
63static struct lock_class_key xfs_dquot_other_class; 63static struct lock_class_key xfs_dquot_other_class;
64 64
65/* 65/*
66 * Allocate and initialize a dquot. We don't always allocate fresh memory;
67 * we try to reclaim a free dquot if the number of incore dquots are above
68 * a threshold.
69 * The only field inside the core that gets initialized at this point
70 * is the d_id field. The idea is to fill in the entire q_core
71 * when we read in the on disk dquot.
72 */
73STATIC xfs_dquot_t *
74xfs_qm_dqinit(
75 xfs_mount_t *mp,
76 xfs_dqid_t id,
77 uint type)
78{
79 xfs_dquot_t *dqp;
80 boolean_t brandnewdquot;
81
82 brandnewdquot = xfs_qm_dqalloc_incore(&dqp);
83 dqp->dq_flags = type;
84 dqp->q_core.d_id = cpu_to_be32(id);
85 dqp->q_mount = mp;
86
87 /*
88 * No need to re-initialize these if this is a reclaimed dquot.
89 */
90 if (brandnewdquot) {
91 INIT_LIST_HEAD(&dqp->q_freelist);
92 mutex_init(&dqp->q_qlock);
93 init_waitqueue_head(&dqp->q_pinwait);
94
95 /*
96 * Because we want to use a counting completion, complete
97 * the flush completion once to allow a single access to
98 * the flush completion without blocking.
99 */
100 init_completion(&dqp->q_flush);
101 complete(&dqp->q_flush);
102
103 trace_xfs_dqinit(dqp);
104 } else {
105 /*
106 * Only the q_core portion was zeroed in dqreclaim_one().
107 * So, we need to reset others.
108 */
109 dqp->q_nrefs = 0;
110 dqp->q_blkno = 0;
111 INIT_LIST_HEAD(&dqp->q_mplist);
112 INIT_LIST_HEAD(&dqp->q_hashlist);
113 dqp->q_bufoffset = 0;
114 dqp->q_fileoffset = 0;
115 dqp->q_transp = NULL;
116 dqp->q_gdquot = NULL;
117 dqp->q_res_bcount = 0;
118 dqp->q_res_icount = 0;
119 dqp->q_res_rtbcount = 0;
120 atomic_set(&dqp->q_pincount, 0);
121 dqp->q_hash = NULL;
122 ASSERT(list_empty(&dqp->q_freelist));
123
124 trace_xfs_dqreuse(dqp);
125 }
126
127 /*
128 * In either case we need to make sure group quotas have a different
129 * lock class than user quotas, to make sure lockdep knows we can
130 * locks of one of each at the same time.
131 */
132 if (!(type & XFS_DQ_USER))
133 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
134
135 /*
136 * log item gets initialized later
137 */
138 return (dqp);
139}
140
141/*
142 * This is called to free all the memory associated with a dquot 66 * This is called to free all the memory associated with a dquot
143 */ 67 */
144void 68void
@@ -567,7 +491,32 @@ xfs_qm_dqread(
567 int error; 491 int error;
568 int cancelflags = 0; 492 int cancelflags = 0;
569 493
570 dqp = xfs_qm_dqinit(mp, id, type); 494
495 dqp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
496
497 dqp->dq_flags = type;
498 dqp->q_core.d_id = cpu_to_be32(id);
499 dqp->q_mount = mp;
500 INIT_LIST_HEAD(&dqp->q_freelist);
501 mutex_init(&dqp->q_qlock);
502 init_waitqueue_head(&dqp->q_pinwait);
503
504 /*
505 * Because we want to use a counting completion, complete
506 * the flush completion once to allow a single access to
507 * the flush completion without blocking.
508 */
509 init_completion(&dqp->q_flush);
510 complete(&dqp->q_flush);
511
512 /*
513 * Make sure group quotas have a different lock class than user
514 * quotas.
515 */
516 if (!(type & XFS_DQ_USER))
517 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
518
519 atomic_inc(&xfs_Gqm->qm_totaldquots);
571 520
572 trace_xfs_dqread(dqp); 521 trace_xfs_dqread(dqp);
573 522
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 1b2f5b37eac4..c872feaf3697 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -50,7 +50,6 @@
50 */ 50 */
51struct mutex xfs_Gqm_lock; 51struct mutex xfs_Gqm_lock;
52struct xfs_qm *xfs_Gqm; 52struct xfs_qm *xfs_Gqm;
53uint ndquot;
54 53
55kmem_zone_t *qm_dqzone; 54kmem_zone_t *qm_dqzone;
56kmem_zone_t *qm_dqtrxzone; 55kmem_zone_t *qm_dqtrxzone;
@@ -93,7 +92,6 @@ xfs_Gqm_init(void)
93 goto out_free_udqhash; 92 goto out_free_udqhash;
94 93
95 hsize /= sizeof(xfs_dqhash_t); 94 hsize /= sizeof(xfs_dqhash_t);
96 ndquot = hsize << 8;
97 95
98 xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); 96 xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
99 xqm->qm_dqhashmask = hsize - 1; 97 xqm->qm_dqhashmask = hsize - 1;
@@ -137,7 +135,6 @@ xfs_Gqm_init(void)
137 xqm->qm_dqtrxzone = qm_dqtrxzone; 135 xqm->qm_dqtrxzone = qm_dqtrxzone;
138 136
139 atomic_set(&xqm->qm_totaldquots, 0); 137 atomic_set(&xqm->qm_totaldquots, 0);
140 xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
141 xqm->qm_nrefs = 0; 138 xqm->qm_nrefs = 0;
142 return xqm; 139 return xqm;
143 140
@@ -1600,216 +1597,150 @@ xfs_qm_init_quotainos(
1600 return 0; 1597 return 0;
1601} 1598}
1602 1599
1600STATIC void
1601xfs_qm_dqfree_one(
1602 struct xfs_dquot *dqp)
1603{
1604 struct xfs_mount *mp = dqp->q_mount;
1605 struct xfs_quotainfo *qi = mp->m_quotainfo;
1603 1606
1607 mutex_lock(&dqp->q_hash->qh_lock);
1608 list_del_init(&dqp->q_hashlist);
1609 dqp->q_hash->qh_version++;
1610 mutex_unlock(&dqp->q_hash->qh_lock);
1604 1611
1605/* 1612 mutex_lock(&qi->qi_dqlist_lock);
1606 * Pop the least recently used dquot off the freelist and recycle it. 1613 list_del_init(&dqp->q_mplist);
1607 */ 1614 qi->qi_dquots--;
1608STATIC struct xfs_dquot * 1615 qi->qi_dqreclaims++;
1609xfs_qm_dqreclaim_one(void) 1616 mutex_unlock(&qi->qi_dqlist_lock);
1617
1618 xfs_qm_dqdestroy(dqp);
1619}
1620
1621STATIC void
1622xfs_qm_dqreclaim_one(
1623 struct xfs_dquot *dqp,
1624 struct list_head *dispose_list)
1610{ 1625{
1611 struct xfs_dquot *dqp; 1626 struct xfs_mount *mp = dqp->q_mount;
1612 int restarts = 0; 1627 int error;
1613 1628
1614 mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); 1629 if (!xfs_dqlock_nowait(dqp))
1615restart: 1630 goto out_busy;
1616 list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
1617 struct xfs_mount *mp = dqp->q_mount;
1618 1631
1619 if (!xfs_dqlock_nowait(dqp)) 1632 /*
1620 continue; 1633 * This dquot has acquired a reference in the meantime remove it from
1634 * the freelist and try again.
1635 */
1636 if (dqp->q_nrefs) {
1637 xfs_dqunlock(dqp);
1621 1638
1622 /* 1639 trace_xfs_dqreclaim_want(dqp);
1623 * This dquot has already been grabbed by dqlookup. 1640 XQM_STATS_INC(xqmstats.xs_qm_dqwants);
1624 * Remove it from the freelist and try again.
1625 */
1626 if (dqp->q_nrefs) {
1627 trace_xfs_dqreclaim_want(dqp);
1628 XQM_STATS_INC(xqmstats.xs_qm_dqwants);
1629
1630 list_del_init(&dqp->q_freelist);
1631 xfs_Gqm->qm_dqfrlist_cnt--;
1632 restarts++;
1633 goto dqunlock;
1634 }
1635 1641
1636 ASSERT(dqp->q_hash); 1642 list_del_init(&dqp->q_freelist);
1637 ASSERT(!list_empty(&dqp->q_mplist)); 1643 xfs_Gqm->qm_dqfrlist_cnt--;
1644 return;
1645 }
1638 1646
1639 /* 1647 ASSERT(dqp->q_hash);
1640 * Try to grab the flush lock. If this dquot is in the process 1648 ASSERT(!list_empty(&dqp->q_mplist));
1641 * of getting flushed to disk, we don't want to reclaim it.
1642 */
1643 if (!xfs_dqflock_nowait(dqp))
1644 goto dqunlock;
1645 1649
1646 /* 1650 /*
1647 * We have the flush lock so we know that this is not in the 1651 * Try to grab the flush lock. If this dquot is in the process of
1648 * process of being flushed. So, if this is dirty, flush it 1652 * getting flushed to disk, we don't want to reclaim it.
1649 * DELWRI so that we don't get a freelist infested with 1653 */
1650 * dirty dquots. 1654 if (!xfs_dqflock_nowait(dqp))
1651 */ 1655 goto out_busy;
1652 if (XFS_DQ_IS_DIRTY(dqp)) {
1653 int error;
1654 1656
1655 trace_xfs_dqreclaim_dirty(dqp); 1657 /*
1658 * We have the flush lock so we know that this is not in the
1659 * process of being flushed. So, if this is dirty, flush it
1660 * DELWRI so that we don't get a freelist infested with
1661 * dirty dquots.
1662 */
1663 if (XFS_DQ_IS_DIRTY(dqp)) {
1664 trace_xfs_dqreclaim_dirty(dqp);
1656 1665
1657 /* 1666 /*
1658 * We flush it delayed write, so don't bother 1667 * We flush it delayed write, so don't bother releasing the
1659 * releasing the freelist lock. 1668 * freelist lock.
1660 */ 1669 */
1661 error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK); 1670 error = xfs_qm_dqflush(dqp, 0);
1662 if (error) { 1671 if (error) {
1663 xfs_warn(mp, "%s: dquot %p flush failed", 1672 xfs_warn(mp, "%s: dquot %p flush failed",
1664 __func__, dqp); 1673 __func__, dqp);
1665 }
1666 goto dqunlock;
1667 } 1674 }
1668 xfs_dqfunlock(dqp);
1669 1675
1670 /* 1676 /*
1671 * Prevent lookup now that we are going to reclaim the dquot. 1677 * Give the dquot another try on the freelist, as the
1672 * Once XFS_DQ_FREEING is set lookup won't touch the dquot, 1678 * flushing will take some time.
1673 * thus we can drop the lock now.
1674 */ 1679 */
1675 dqp->dq_flags |= XFS_DQ_FREEING; 1680 goto out_busy;
1676 xfs_dqunlock(dqp); 1681 }
1677 1682 xfs_dqfunlock(dqp);
1678 mutex_lock(&dqp->q_hash->qh_lock);
1679 list_del_init(&dqp->q_hashlist);
1680 dqp->q_hash->qh_version++;
1681 mutex_unlock(&dqp->q_hash->qh_lock);
1682
1683 mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
1684 list_del_init(&dqp->q_mplist);
1685 mp->m_quotainfo->qi_dquots--;
1686 mp->m_quotainfo->qi_dqreclaims++;
1687 mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
1688 1683
1689 ASSERT(dqp->q_nrefs == 0); 1684 /*
1690 list_del_init(&dqp->q_freelist); 1685 * Prevent lookups now that we are past the point of no return.
1691 xfs_Gqm->qm_dqfrlist_cnt--; 1686 */
1687 dqp->dq_flags |= XFS_DQ_FREEING;
1688 xfs_dqunlock(dqp);
1692 1689
1693 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); 1690 ASSERT(dqp->q_nrefs == 0);
1694 return dqp; 1691 list_move_tail(&dqp->q_freelist, dispose_list);
1695dqunlock: 1692 xfs_Gqm->qm_dqfrlist_cnt--;
1696 xfs_dqunlock(dqp);
1697 if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
1698 break;
1699 goto restart;
1700 }
1701 1693
1702 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); 1694 trace_xfs_dqreclaim_done(dqp);
1703 return NULL; 1695 XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
1704} 1696 return;
1705 1697
1706/* 1698out_busy:
1707 * Traverse the freelist of dquots and attempt to reclaim a maximum of 1699 xfs_dqunlock(dqp);
1708 * 'howmany' dquots. This operation races with dqlookup(), and attempts to
1709 * favor the lookup function ...
1710 */
1711STATIC int
1712xfs_qm_shake_freelist(
1713 int howmany)
1714{
1715 int nreclaimed = 0;
1716 xfs_dquot_t *dqp;
1717 1700
1718 if (howmany <= 0) 1701 /*
1719 return 0; 1702 * Move the dquot to the tail of the list so that we don't spin on it.
1703 */
1704 list_move_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
1720 1705
1721 while (nreclaimed < howmany) { 1706 trace_xfs_dqreclaim_busy(dqp);
1722 dqp = xfs_qm_dqreclaim_one(); 1707 XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
1723 if (!dqp)
1724 return nreclaimed;
1725 xfs_qm_dqdestroy(dqp);
1726 nreclaimed++;
1727 }
1728 return nreclaimed;
1729} 1708}
1730 1709
1731/*
1732 * The kmem_shake interface is invoked when memory is running low.
1733 */
1734/* ARGSUSED */
1735STATIC int 1710STATIC int
1736xfs_qm_shake( 1711xfs_qm_shake(
1737 struct shrinker *shrink, 1712 struct shrinker *shrink,
1738 struct shrink_control *sc) 1713 struct shrink_control *sc)
1739{ 1714{
1740 int ndqused, nfree, n; 1715 int nr_to_scan = sc->nr_to_scan;
1741 gfp_t gfp_mask = sc->gfp_mask; 1716 LIST_HEAD (dispose_list);
1742 1717 struct xfs_dquot *dqp;
1743 if (!kmem_shake_allow(gfp_mask))
1744 return 0;
1745 if (!xfs_Gqm)
1746 return 0;
1747
1748 nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */
1749 /* incore dquots in all f/s's */
1750 ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
1751
1752 ASSERT(ndqused >= 0);
1753 1718
1754 if (nfree <= ndqused && nfree < ndquot) 1719 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
1755 return 0; 1720 return 0;
1721 if (!nr_to_scan)
1722 goto out;
1756 1723
1757 ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ 1724 mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
1758 n = nfree - ndqused - ndquot; /* # over target */ 1725 while (!list_empty(&xfs_Gqm->qm_dqfrlist)) {
1759 1726 if (nr_to_scan-- <= 0)
1760 return xfs_qm_shake_freelist(MAX(nfree, n)); 1727 break;
1761} 1728 dqp = list_first_entry(&xfs_Gqm->qm_dqfrlist, struct xfs_dquot,
1762 1729 q_freelist);
1763 1730 xfs_qm_dqreclaim_one(dqp, &dispose_list);
1764/*------------------------------------------------------------------*/
1765
1766/*
1767 * Return a new incore dquot. Depending on the number of
1768 * dquots in the system, we either allocate a new one on the kernel heap,
1769 * or reclaim a free one.
1770 * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed
1771 * to reclaim an existing one from the freelist.
1772 */
1773boolean_t
1774xfs_qm_dqalloc_incore(
1775 xfs_dquot_t **O_dqpp)
1776{
1777 xfs_dquot_t *dqp;
1778
1779 /*
1780 * Check against high water mark to see if we want to pop
1781 * a nincompoop dquot off the freelist.
1782 */
1783 if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) {
1784 /*
1785 * Try to recycle a dquot from the freelist.
1786 */
1787 if ((dqp = xfs_qm_dqreclaim_one())) {
1788 XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
1789 /*
1790 * Just zero the core here. The rest will get
1791 * reinitialized by caller. XXX we shouldn't even
1792 * do this zero ...
1793 */
1794 memset(&dqp->q_core, 0, sizeof(dqp->q_core));
1795 *O_dqpp = dqp;
1796 return B_FALSE;
1797 }
1798 XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
1799 } 1731 }
1732 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
1800 1733
1801 /* 1734 while (!list_empty(&dispose_list)) {
1802 * Allocate a brand new dquot on the kernel heap and return it 1735 dqp = list_first_entry(&dispose_list, struct xfs_dquot,
1803 * to the caller to initialize. 1736 q_freelist);
1804 */ 1737 list_del_init(&dqp->q_freelist);
1805 ASSERT(xfs_Gqm->qm_dqzone != NULL); 1738 xfs_qm_dqfree_one(dqp);
1806 *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); 1739 }
1807 atomic_inc(&xfs_Gqm->qm_totaldquots); 1740out:
1808 1741 return (xfs_Gqm->qm_dqfrlist_cnt / 100) * sysctl_vfs_cache_pressure;
1809 return B_TRUE;
1810} 1742}
1811 1743
1812
1813/* 1744/*
1814 * Start a transaction and write the incore superblock changes to 1745 * Start a transaction and write the incore superblock changes to
1815 * disk. flags parameter indicates which fields have changed. 1746 * disk. flags parameter indicates which fields have changed.
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 9b4f3adefbc5..9a9b997e1a0a 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -26,24 +26,12 @@
26struct xfs_qm; 26struct xfs_qm;
27struct xfs_inode; 27struct xfs_inode;
28 28
29extern uint ndquot;
30extern struct mutex xfs_Gqm_lock; 29extern struct mutex xfs_Gqm_lock;
31extern struct xfs_qm *xfs_Gqm; 30extern struct xfs_qm *xfs_Gqm;
32extern kmem_zone_t *qm_dqzone; 31extern kmem_zone_t *qm_dqzone;
33extern kmem_zone_t *qm_dqtrxzone; 32extern kmem_zone_t *qm_dqtrxzone;
34 33
35/* 34/*
36 * Ditto, for xfs_qm_dqreclaim_one.
37 */
38#define XFS_QM_RECLAIM_MAX_RESTARTS 4
39
40/*
41 * Ideal ratio of free to in use dquots. Quota manager makes an attempt
42 * to keep this balance.
43 */
44#define XFS_QM_DQFREE_RATIO 2
45
46/*
47 * Dquot hashtable constants/threshold values. 35 * Dquot hashtable constants/threshold values.
48 */ 36 */
49#define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t)) 37#define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t))
@@ -74,7 +62,6 @@ typedef struct xfs_qm {
74 int qm_dqfrlist_cnt; 62 int qm_dqfrlist_cnt;
75 atomic_t qm_totaldquots; /* total incore dquots */ 63 atomic_t qm_totaldquots; /* total incore dquots */
76 uint qm_nrefs; /* file systems with quota on */ 64 uint qm_nrefs; /* file systems with quota on */
77 int qm_dqfree_ratio;/* ratio of free to inuse dquots */
78 kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ 65 kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */
79 kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ 66 kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */
80} xfs_qm_t; 67} xfs_qm_t;
@@ -143,7 +130,6 @@ extern int xfs_qm_quotacheck(xfs_mount_t *);
143extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); 130extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
144 131
145/* dquot stuff */ 132/* dquot stuff */
146extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **);
147extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); 133extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint);
148extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); 134extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
149 135
diff --git a/fs/xfs/xfs_qm_stats.c b/fs/xfs/xfs_qm_stats.c
index 8671a0b32644..5729ba570877 100644
--- a/fs/xfs/xfs_qm_stats.c
+++ b/fs/xfs/xfs_qm_stats.c
@@ -42,9 +42,9 @@ static int xqm_proc_show(struct seq_file *m, void *v)
42{ 42{
43 /* maximum; incore; ratio free to inuse; freelist */ 43 /* maximum; incore; ratio free to inuse; freelist */
44 seq_printf(m, "%d\t%d\t%d\t%u\n", 44 seq_printf(m, "%d\t%d\t%d\t%u\n",
45 ndquot, 45 0,
46 xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, 46 xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
47 xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0, 47 0,
48 xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0); 48 xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0);
49 return 0; 49 return 0;
50} 50}
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 6b6df5802e95..bb134a819930 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -733,11 +733,10 @@ DEFINE_EVENT(xfs_dquot_class, name, \
733DEFINE_DQUOT_EVENT(xfs_dqadjust); 733DEFINE_DQUOT_EVENT(xfs_dqadjust);
734DEFINE_DQUOT_EVENT(xfs_dqreclaim_want); 734DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
735DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty); 735DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
736DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink); 736DEFINE_DQUOT_EVENT(xfs_dqreclaim_busy);
737DEFINE_DQUOT_EVENT(xfs_dqreclaim_done);
737DEFINE_DQUOT_EVENT(xfs_dqattach_found); 738DEFINE_DQUOT_EVENT(xfs_dqattach_found);
738DEFINE_DQUOT_EVENT(xfs_dqattach_get); 739DEFINE_DQUOT_EVENT(xfs_dqattach_get);
739DEFINE_DQUOT_EVENT(xfs_dqinit);
740DEFINE_DQUOT_EVENT(xfs_dqreuse);
741DEFINE_DQUOT_EVENT(xfs_dqalloc); 740DEFINE_DQUOT_EVENT(xfs_dqalloc);
742DEFINE_DQUOT_EVENT(xfs_dqtobp_read); 741DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
743DEFINE_DQUOT_EVENT(xfs_dqread); 742DEFINE_DQUOT_EVENT(xfs_dqread);