aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-27 20:18:07 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:31 -0400
commitcd56a39a59868911bbf8832725630c1cf43a7b09 (patch)
tree1d110f7c0ee68db0dfa1ae37ecdf5c0cc304ceeb /fs/xfs
parenta408235726aa82c0358c9ec68124b6f4bc0a79df (diff)
xfs: convert dquot cache lru to list_lru
Convert the XFS dquot lru to use the list_lru construct and convert the shrinker to being node aware. [glommer@openvz.org: edited for conflicts + warning fixes] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_dquot.c7
-rw-r--r--fs/xfs/xfs_qm.c277
-rw-r--r--fs/xfs/xfs_qm.h4
3 files changed, 144 insertions, 144 deletions
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 251c66632e5e..71520e6e5d65 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -940,13 +940,8 @@ xfs_qm_dqput_final(
940 940
941 trace_xfs_dqput_free(dqp); 941 trace_xfs_dqput_free(dqp);
942 942
943 mutex_lock(&qi->qi_lru_lock); 943 if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
944 if (list_empty(&dqp->q_lru)) {
945 list_add_tail(&dqp->q_lru, &qi->qi_lru_list);
946 qi->qi_lru_count++;
947 XFS_STATS_INC(xs_qm_dquot_unused); 944 XFS_STATS_INC(xs_qm_dquot_unused);
948 }
949 mutex_unlock(&qi->qi_lru_lock);
950 945
951 /* 946 /*
952 * If we just added a udquot to the freelist, then we want to release 947 * If we just added a udquot to the freelist, then we want to release
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 956da2e1c7af..0fa98753bf67 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -51,8 +51,9 @@
51 */ 51 */
52STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 52STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
53STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 53STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
54STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *);
55 54
55
56STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
56/* 57/*
57 * We use the batch lookup interface to iterate over the dquots as it 58 * We use the batch lookup interface to iterate over the dquots as it
58 * currently is the only interface into the radix tree code that allows 59 * currently is the only interface into the radix tree code that allows
@@ -203,12 +204,9 @@ xfs_qm_dqpurge(
203 * We move dquots to the freelist as soon as their reference count 204 * We move dquots to the freelist as soon as their reference count
204 * hits zero, so it really should be on the freelist here. 205 * hits zero, so it really should be on the freelist here.
205 */ 206 */
206 mutex_lock(&qi->qi_lru_lock);
207 ASSERT(!list_empty(&dqp->q_lru)); 207 ASSERT(!list_empty(&dqp->q_lru));
208 list_del_init(&dqp->q_lru); 208 list_lru_del(&qi->qi_lru, &dqp->q_lru);
209 qi->qi_lru_count--;
210 XFS_STATS_DEC(xs_qm_dquot_unused); 209 XFS_STATS_DEC(xs_qm_dquot_unused);
211 mutex_unlock(&qi->qi_lru_lock);
212 210
213 xfs_qm_dqdestroy(dqp); 211 xfs_qm_dqdestroy(dqp);
214 212
@@ -680,6 +678,141 @@ xfs_qm_calc_dquots_per_chunk(
680 return ndquots; 678 return ndquots;
681} 679}
682 680
681struct xfs_qm_isolate {
682 struct list_head buffers;
683 struct list_head dispose;
684};
685
686static enum lru_status
687xfs_qm_dquot_isolate(
688 struct list_head *item,
689 spinlock_t *lru_lock,
690 void *arg)
691{
692 struct xfs_dquot *dqp = container_of(item,
693 struct xfs_dquot, q_lru);
694 struct xfs_qm_isolate *isol = arg;
695
696 if (!xfs_dqlock_nowait(dqp))
697 goto out_miss_busy;
698
699 /*
700 * This dquot has acquired a reference in the meantime remove it from
701 * the freelist and try again.
702 */
703 if (dqp->q_nrefs) {
704 xfs_dqunlock(dqp);
705 XFS_STATS_INC(xs_qm_dqwants);
706
707 trace_xfs_dqreclaim_want(dqp);
708 list_del_init(&dqp->q_lru);
709 XFS_STATS_DEC(xs_qm_dquot_unused);
710 return 0;
711 }
712
713 /*
714 * If the dquot is dirty, flush it. If it's already being flushed, just
715 * skip it so there is time for the IO to complete before we try to
716 * reclaim it again on the next LRU pass.
717 */
718 if (!xfs_dqflock_nowait(dqp)) {
719 xfs_dqunlock(dqp);
720 goto out_miss_busy;
721 }
722
723 if (XFS_DQ_IS_DIRTY(dqp)) {
724 struct xfs_buf *bp = NULL;
725 int error;
726
727 trace_xfs_dqreclaim_dirty(dqp);
728
729 /* we have to drop the LRU lock to flush the dquot */
730 spin_unlock(lru_lock);
731
732 error = xfs_qm_dqflush(dqp, &bp);
733 if (error) {
734 xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
735 __func__, dqp);
736 goto out_unlock_dirty;
737 }
738
739 xfs_buf_delwri_queue(bp, &isol->buffers);
740 xfs_buf_relse(bp);
741 goto out_unlock_dirty;
742 }
743 xfs_dqfunlock(dqp);
744
745 /*
746 * Prevent lookups now that we are past the point of no return.
747 */
748 dqp->dq_flags |= XFS_DQ_FREEING;
749 xfs_dqunlock(dqp);
750
751 ASSERT(dqp->q_nrefs == 0);
752 list_move_tail(&dqp->q_lru, &isol->dispose);
753 XFS_STATS_DEC(xs_qm_dquot_unused);
754 trace_xfs_dqreclaim_done(dqp);
755 XFS_STATS_INC(xs_qm_dqreclaims);
756 return 0;
757
758out_miss_busy:
759 trace_xfs_dqreclaim_busy(dqp);
760 XFS_STATS_INC(xs_qm_dqreclaim_misses);
761 return 2;
762
763out_unlock_dirty:
764 trace_xfs_dqreclaim_busy(dqp);
765 XFS_STATS_INC(xs_qm_dqreclaim_misses);
766 return 3;
767}
768
769static long
770xfs_qm_shrink_scan(
771 struct shrinker *shrink,
772 struct shrink_control *sc)
773{
774 struct xfs_quotainfo *qi = container_of(shrink,
775 struct xfs_quotainfo, qi_shrinker);
776 struct xfs_qm_isolate isol;
777 long freed;
778 int error;
779 unsigned long nr_to_scan = sc->nr_to_scan;
780
781 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
782 return 0;
783
784 INIT_LIST_HEAD(&isol.buffers);
785 INIT_LIST_HEAD(&isol.dispose);
786
787 freed = list_lru_walk_node(&qi->qi_lru, sc->nid, xfs_qm_dquot_isolate, &isol,
788 &nr_to_scan);
789
790 error = xfs_buf_delwri_submit(&isol.buffers);
791 if (error)
792 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
793
794 while (!list_empty(&isol.dispose)) {
795 struct xfs_dquot *dqp;
796
797 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
798 list_del_init(&dqp->q_lru);
799 xfs_qm_dqfree_one(dqp);
800 }
801
802 return freed;
803}
804
805static long
806xfs_qm_shrink_count(
807 struct shrinker *shrink,
808 struct shrink_control *sc)
809{
810 struct xfs_quotainfo *qi = container_of(shrink,
811 struct xfs_quotainfo, qi_shrinker);
812
813 return list_lru_count_node(&qi->qi_lru, sc->nid);
814}
815
683/* 816/*
684 * This initializes all the quota information that's kept in the 817 * This initializes all the quota information that's kept in the
685 * mount structure 818 * mount structure
@@ -711,9 +844,7 @@ xfs_qm_init_quotainfo(
711 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); 844 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
712 mutex_init(&qinf->qi_tree_lock); 845 mutex_init(&qinf->qi_tree_lock);
713 846
714 INIT_LIST_HEAD(&qinf->qi_lru_list); 847 list_lru_init(&qinf->qi_lru);
715 qinf->qi_lru_count = 0;
716 mutex_init(&qinf->qi_lru_lock);
717 848
718 /* mutex used to serialize quotaoffs */ 849 /* mutex used to serialize quotaoffs */
719 mutex_init(&qinf->qi_quotaofflock); 850 mutex_init(&qinf->qi_quotaofflock);
@@ -779,8 +910,10 @@ xfs_qm_init_quotainfo(
779 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; 910 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
780 } 911 }
781 912
782 qinf->qi_shrinker.shrink = xfs_qm_shake; 913 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
914 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
783 qinf->qi_shrinker.seeks = DEFAULT_SEEKS; 915 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
916 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
784 register_shrinker(&qinf->qi_shrinker); 917 register_shrinker(&qinf->qi_shrinker);
785 return 0; 918 return 0;
786} 919}
@@ -1599,132 +1732,6 @@ xfs_qm_dqfree_one(
1599 xfs_qm_dqdestroy(dqp); 1732 xfs_qm_dqdestroy(dqp);
1600} 1733}
1601 1734
1602STATIC void
1603xfs_qm_dqreclaim_one(
1604 struct xfs_dquot *dqp,
1605 struct list_head *buffer_list,
1606 struct list_head *dispose_list)
1607{
1608 struct xfs_mount *mp = dqp->q_mount;
1609 struct xfs_quotainfo *qi = mp->m_quotainfo;
1610 int error;
1611
1612 if (!xfs_dqlock_nowait(dqp))
1613 goto out_move_tail;
1614
1615 /*
1616 * This dquot has acquired a reference in the meantime remove it from
1617 * the freelist and try again.
1618 */
1619 if (dqp->q_nrefs) {
1620 xfs_dqunlock(dqp);
1621
1622 trace_xfs_dqreclaim_want(dqp);
1623 XFS_STATS_INC(xs_qm_dqwants);
1624
1625 list_del_init(&dqp->q_lru);
1626 qi->qi_lru_count--;
1627 XFS_STATS_DEC(xs_qm_dquot_unused);
1628 return;
1629 }
1630
1631 /*
1632 * Try to grab the flush lock. If this dquot is in the process of
1633 * getting flushed to disk, we don't want to reclaim it.
1634 */
1635 if (!xfs_dqflock_nowait(dqp))
1636 goto out_unlock_move_tail;
1637
1638 if (XFS_DQ_IS_DIRTY(dqp)) {
1639 struct xfs_buf *bp = NULL;
1640
1641 trace_xfs_dqreclaim_dirty(dqp);
1642
1643 error = xfs_qm_dqflush(dqp, &bp);
1644 if (error) {
1645 xfs_warn(mp, "%s: dquot %p flush failed",
1646 __func__, dqp);
1647 goto out_unlock_move_tail;
1648 }
1649
1650 xfs_buf_delwri_queue(bp, buffer_list);
1651 xfs_buf_relse(bp);
1652 /*
1653 * Give the dquot another try on the freelist, as the
1654 * flushing will take some time.
1655 */
1656 goto out_unlock_move_tail;
1657 }
1658 xfs_dqfunlock(dqp);
1659
1660 /*
1661 * Prevent lookups now that we are past the point of no return.
1662 */
1663 dqp->dq_flags |= XFS_DQ_FREEING;
1664 xfs_dqunlock(dqp);
1665
1666 ASSERT(dqp->q_nrefs == 0);
1667 list_move_tail(&dqp->q_lru, dispose_list);
1668 qi->qi_lru_count--;
1669 XFS_STATS_DEC(xs_qm_dquot_unused);
1670
1671 trace_xfs_dqreclaim_done(dqp);
1672 XFS_STATS_INC(xs_qm_dqreclaims);
1673 return;
1674
1675 /*
1676 * Move the dquot to the tail of the list so that we don't spin on it.
1677 */
1678out_unlock_move_tail:
1679 xfs_dqunlock(dqp);
1680out_move_tail:
1681 list_move_tail(&dqp->q_lru, &qi->qi_lru_list);
1682 trace_xfs_dqreclaim_busy(dqp);
1683 XFS_STATS_INC(xs_qm_dqreclaim_misses);
1684}
1685
1686STATIC int
1687xfs_qm_shake(
1688 struct shrinker *shrink,
1689 struct shrink_control *sc)
1690{
1691 struct xfs_quotainfo *qi =
1692 container_of(shrink, struct xfs_quotainfo, qi_shrinker);
1693 int nr_to_scan = sc->nr_to_scan;
1694 LIST_HEAD (buffer_list);
1695 LIST_HEAD (dispose_list);
1696 struct xfs_dquot *dqp;
1697 int error;
1698
1699 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
1700 return 0;
1701 if (!nr_to_scan)
1702 goto out;
1703
1704 mutex_lock(&qi->qi_lru_lock);
1705 while (!list_empty(&qi->qi_lru_list)) {
1706 if (nr_to_scan-- <= 0)
1707 break;
1708 dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot,
1709 q_lru);
1710 xfs_qm_dqreclaim_one(dqp, &buffer_list, &dispose_list);
1711 }
1712 mutex_unlock(&qi->qi_lru_lock);
1713
1714 error = xfs_buf_delwri_submit(&buffer_list);
1715 if (error)
1716 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
1717
1718 while (!list_empty(&dispose_list)) {
1719 dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru);
1720 list_del_init(&dqp->q_lru);
1721 xfs_qm_dqfree_one(dqp);
1722 }
1723
1724out:
1725 return vfs_pressure_ratio(qi->qi_lru_count);
1726}
1727
1728/* 1735/*
1729 * Start a transaction and write the incore superblock changes to 1736 * Start a transaction and write the incore superblock changes to
1730 * disk. flags parameter indicates which fields have changed. 1737 * disk. flags parameter indicates which fields have changed.
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 670cd4464070..2b602df9c242 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -49,9 +49,7 @@ typedef struct xfs_quotainfo {
49 struct xfs_inode *qi_uquotaip; /* user quota inode */ 49 struct xfs_inode *qi_uquotaip; /* user quota inode */
50 struct xfs_inode *qi_gquotaip; /* group quota inode */ 50 struct xfs_inode *qi_gquotaip; /* group quota inode */
51 struct xfs_inode *qi_pquotaip; /* project quota inode */ 51 struct xfs_inode *qi_pquotaip; /* project quota inode */
52 struct list_head qi_lru_list; 52 struct list_lru qi_lru;
53 struct mutex qi_lru_lock;
54 int qi_lru_count;
55 int qi_dquots; 53 int qi_dquots;
56 time_t qi_btimelimit; /* limit for blks timer */ 54 time_t qi_btimelimit; /* limit for blks timer */
57 time_t qi_itimelimit; /* limit for inodes timer */ 55 time_t qi_itimelimit; /* limit for inodes timer */