aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-07-19 23:18:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-07-19 23:18:24 -0400
commit620d0be88188cd3dc79db3f5eab1420d6f97c70c (patch)
tree99dc63926b972288b176be6ed3663e6390289ce5 /fs
parentee1039307a8a64b038f9b8cdc6f9120ecd9dfe9b (diff)
parent16fd5367370099b59d96e30bb7d9de8d419659f2 (diff)
Merge branch 'shrinker' of git://git.kernel.org/pub/scm/linux/kernel/git/dgc/xfsdev
* 'shrinker' of git://git.kernel.org/pub/scm/linux/kernel/git/dgc/xfsdev: xfs: track AGs with reclaimable inodes in per-ag radix tree xfs: convert inode shrinker to per-filesystem contexts mm: add context argument to shrinker callback
Diffstat (limited to 'fs')
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/gfs2/quota.h2
-rw-r--r--fs/inode.c2
-rw-r--r--fs/mbcache.c5
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/internal.h3
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/ubifs/shrinker.c2
-rw-r--r--fs/ubifs/ubifs.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c130
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h3
-rw-r--r--fs/xfs/quota/xfs_qm.c7
-rw-r--r--fs/xfs/xfs_mount.h2
18 files changed, 103 insertions, 74 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index c8c78ba0782..86d4db15473 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -896,7 +896,7 @@ EXPORT_SYMBOL(shrink_dcache_parent);
896 * 896 *
897 * In this case we return -1 to tell the caller that we baled. 897 * In this case we return -1 to tell the caller that we baled.
898 */ 898 */
899static int shrink_dcache_memory(int nr, gfp_t gfp_mask) 899static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
900{ 900{
901 if (nr) { 901 if (nr) {
902 if (!(gfp_mask & __GFP_FS)) 902 if (!(gfp_mask & __GFP_FS))
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index dbab3fdc258..0898f3ec821 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1358,7 +1358,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1358} 1358}
1359 1359
1360 1360
1361static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) 1361static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
1362{ 1362{
1363 struct gfs2_glock *gl; 1363 struct gfs2_glock *gl;
1364 int may_demote; 1364 int may_demote;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index b256d6f2428..8f02d3db8f4 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -77,7 +77,7 @@ static LIST_HEAD(qd_lru_list);
77static atomic_t qd_lru_count = ATOMIC_INIT(0); 77static atomic_t qd_lru_count = ATOMIC_INIT(0);
78static DEFINE_SPINLOCK(qd_lru_lock); 78static DEFINE_SPINLOCK(qd_lru_lock);
79 79
80int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) 80int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
81{ 81{
82 struct gfs2_quota_data *qd; 82 struct gfs2_quota_data *qd;
83 struct gfs2_sbd *sdp; 83 struct gfs2_sbd *sdp;
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 195f60c8bd1..e7d236ca48b 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -51,7 +51,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
51 return ret; 51 return ret;
52} 52}
53 53
54extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); 54extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask);
55extern const struct quotactl_ops gfs2_quotactl_ops; 55extern const struct quotactl_ops gfs2_quotactl_ops;
56 56
57#endif /* __QUOTA_DOT_H__ */ 57#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/inode.c b/fs/inode.c
index 2bee20ae3d6..722860b323a 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -512,7 +512,7 @@ static void prune_icache(int nr_to_scan)
512 * This function is passed the number of inodes to scan, and it returns the 512 * This function is passed the number of inodes to scan, and it returns the
513 * total number of remaining possibly-reclaimable inodes. 513 * total number of remaining possibly-reclaimable inodes.
514 */ 514 */
515static int shrink_icache_memory(int nr, gfp_t gfp_mask) 515static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
516{ 516{
517 if (nr) { 517 if (nr) {
518 /* 518 /*
diff --git a/fs/mbcache.c b/fs/mbcache.c
index ec88ff3d04a..e28f21b9534 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -115,7 +115,7 @@ mb_cache_indexes(struct mb_cache *cache)
115 * What the mbcache registers as to get shrunk dynamically. 115 * What the mbcache registers as to get shrunk dynamically.
116 */ 116 */
117 117
118static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); 118static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
119 119
120static struct shrinker mb_cache_shrinker = { 120static struct shrinker mb_cache_shrinker = {
121 .shrink = mb_cache_shrink_fn, 121 .shrink = mb_cache_shrink_fn,
@@ -191,13 +191,14 @@ forget:
191 * This function is called by the kernel memory management when memory 191 * This function is called by the kernel memory management when memory
192 * gets low. 192 * gets low.
193 * 193 *
194 * @shrink: (ignored)
194 * @nr_to_scan: Number of objects to scan 195 * @nr_to_scan: Number of objects to scan
195 * @gfp_mask: (ignored) 196 * @gfp_mask: (ignored)
196 * 197 *
197 * Returns the number of objects which are present in the cache. 198 * Returns the number of objects which are present in the cache.
198 */ 199 */
199static int 200static int
200mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask) 201mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
201{ 202{
202 LIST_HEAD(free_list); 203 LIST_HEAD(free_list);
203 struct list_head *l, *ltmp; 204 struct list_head *l, *ltmp;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 782b431ef91..e60416d3f81 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1710,7 +1710,7 @@ static void nfs_access_free_list(struct list_head *head)
1710 } 1710 }
1711} 1711}
1712 1712
1713int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) 1713int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
1714{ 1714{
1715 LIST_HEAD(head); 1715 LIST_HEAD(head);
1716 struct nfs_inode *nfsi; 1716 struct nfs_inode *nfsi;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index d8bd619e386..e70f44b9b3f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -205,7 +205,8 @@ extern struct rpc_procinfo nfs4_procedures[];
205void nfs_close_context(struct nfs_open_context *ctx, int is_sync); 205void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
206 206
207/* dir.c */ 207/* dir.c */
208extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask); 208extern int nfs_access_cache_shrinker(struct shrinker *shrink,
209 int nr_to_scan, gfp_t gfp_mask);
209 210
210/* inode.c */ 211/* inode.c */
211extern struct workqueue_struct *nfsiod_workqueue; 212extern struct workqueue_struct *nfsiod_workqueue;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 12c233da1b6..437d2ca2de9 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -676,7 +676,7 @@ static void prune_dqcache(int count)
676 * This is called from kswapd when we think we need some 676 * This is called from kswapd when we think we need some
677 * more memory 677 * more memory
678 */ 678 */
679static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) 679static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
680{ 680{
681 if (nr) { 681 if (nr) {
682 spin_lock(&dq_list_lock); 682 spin_lock(&dq_list_lock);
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index 02feb59cefc..0b201114a5a 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -277,7 +277,7 @@ static int kick_a_thread(void)
277 return 0; 277 return 0;
278} 278}
279 279
280int ubifs_shrinker(int nr, gfp_t gfp_mask) 280int ubifs_shrinker(struct shrinker *shrink, int nr, gfp_t gfp_mask)
281{ 281{
282 int freed, contention = 0; 282 int freed, contention = 0;
283 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); 283 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 2eef553d50c..04310878f44 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1575,7 +1575,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot);
1575int ubifs_tnc_end_commit(struct ubifs_info *c); 1575int ubifs_tnc_end_commit(struct ubifs_info *c);
1576 1576
1577/* shrinker.c */ 1577/* shrinker.c */
1578int ubifs_shrinker(int nr_to_scan, gfp_t gfp_mask); 1578int ubifs_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
1579 1579
1580/* commit.c */ 1580/* commit.c */
1581int ubifs_bg_thread(void *info); 1581int ubifs_bg_thread(void *info);
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 649ade8ef59..2ee3f7a6016 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -45,7 +45,7 @@
45 45
46static kmem_zone_t *xfs_buf_zone; 46static kmem_zone_t *xfs_buf_zone;
47STATIC int xfsbufd(void *); 47STATIC int xfsbufd(void *);
48STATIC int xfsbufd_wakeup(int, gfp_t); 48STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
49STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); 49STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
50static struct shrinker xfs_buf_shake = { 50static struct shrinker xfs_buf_shake = {
51 .shrink = xfsbufd_wakeup, 51 .shrink = xfsbufd_wakeup,
@@ -340,7 +340,7 @@ _xfs_buf_lookup_pages(
340 __func__, gfp_mask); 340 __func__, gfp_mask);
341 341
342 XFS_STATS_INC(xb_page_retries); 342 XFS_STATS_INC(xb_page_retries);
343 xfsbufd_wakeup(0, gfp_mask); 343 xfsbufd_wakeup(NULL, 0, gfp_mask);
344 congestion_wait(BLK_RW_ASYNC, HZ/50); 344 congestion_wait(BLK_RW_ASYNC, HZ/50);
345 goto retry; 345 goto retry;
346 } 346 }
@@ -1762,6 +1762,7 @@ xfs_buf_runall_queues(
1762 1762
1763STATIC int 1763STATIC int
1764xfsbufd_wakeup( 1764xfsbufd_wakeup(
1765 struct shrinker *shrink,
1765 int priority, 1766 int priority,
1766 gfp_t mask) 1767 gfp_t mask)
1767{ 1768{
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index f2d1718c916..80938c736c2 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1883,7 +1883,6 @@ init_xfs_fs(void)
1883 goto out_cleanup_procfs; 1883 goto out_cleanup_procfs;
1884 1884
1885 vfs_initquota(); 1885 vfs_initquota();
1886 xfs_inode_shrinker_init();
1887 1886
1888 error = register_filesystem(&xfs_fs_type); 1887 error = register_filesystem(&xfs_fs_type);
1889 if (error) 1888 if (error)
@@ -1911,7 +1910,6 @@ exit_xfs_fs(void)
1911{ 1910{
1912 vfs_exitquota(); 1911 vfs_exitquota();
1913 unregister_filesystem(&xfs_fs_type); 1912 unregister_filesystem(&xfs_fs_type);
1914 xfs_inode_shrinker_destroy();
1915 xfs_sysctl_unregister(); 1913 xfs_sysctl_unregister();
1916 xfs_cleanup_procfs(); 1914 xfs_cleanup_procfs();
1917 xfs_buf_terminate(); 1915 xfs_buf_terminate();
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index ef7f0218bcc..a51a07c3a70 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -144,6 +144,41 @@ restart:
144 return last_error; 144 return last_error;
145} 145}
146 146
147/*
148 * Select the next per-ag structure to iterate during the walk. The reclaim
149 * walk is optimised only to walk AGs with reclaimable inodes in them.
150 */
151static struct xfs_perag *
152xfs_inode_ag_iter_next_pag(
153 struct xfs_mount *mp,
154 xfs_agnumber_t *first,
155 int tag)
156{
157 struct xfs_perag *pag = NULL;
158
159 if (tag == XFS_ICI_RECLAIM_TAG) {
160 int found;
161 int ref;
162
163 spin_lock(&mp->m_perag_lock);
164 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
165 (void **)&pag, *first, 1, tag);
166 if (found <= 0) {
167 spin_unlock(&mp->m_perag_lock);
168 return NULL;
169 }
170 *first = pag->pag_agno + 1;
171 /* open coded pag reference increment */
172 ref = atomic_inc_return(&pag->pag_ref);
173 spin_unlock(&mp->m_perag_lock);
174 trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_);
175 } else {
176 pag = xfs_perag_get(mp, *first);
177 (*first)++;
178 }
179 return pag;
180}
181
147int 182int
148xfs_inode_ag_iterator( 183xfs_inode_ag_iterator(
149 struct xfs_mount *mp, 184 struct xfs_mount *mp,
@@ -154,16 +189,15 @@ xfs_inode_ag_iterator(
154 int exclusive, 189 int exclusive,
155 int *nr_to_scan) 190 int *nr_to_scan)
156{ 191{
192 struct xfs_perag *pag;
157 int error = 0; 193 int error = 0;
158 int last_error = 0; 194 int last_error = 0;
159 xfs_agnumber_t ag; 195 xfs_agnumber_t ag;
160 int nr; 196 int nr;
161 197
162 nr = nr_to_scan ? *nr_to_scan : INT_MAX; 198 nr = nr_to_scan ? *nr_to_scan : INT_MAX;
163 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { 199 ag = 0;
164 struct xfs_perag *pag; 200 while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, tag))) {
165
166 pag = xfs_perag_get(mp, ag);
167 error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, 201 error = xfs_inode_ag_walk(mp, pag, execute, flags, tag,
168 exclusive, &nr); 202 exclusive, &nr);
169 xfs_perag_put(pag); 203 xfs_perag_put(pag);
@@ -640,6 +674,17 @@ __xfs_inode_set_reclaim_tag(
640 radix_tree_tag_set(&pag->pag_ici_root, 674 radix_tree_tag_set(&pag->pag_ici_root,
641 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), 675 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
642 XFS_ICI_RECLAIM_TAG); 676 XFS_ICI_RECLAIM_TAG);
677
678 if (!pag->pag_ici_reclaimable) {
679 /* propagate the reclaim tag up into the perag radix tree */
680 spin_lock(&ip->i_mount->m_perag_lock);
681 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
682 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
683 XFS_ICI_RECLAIM_TAG);
684 spin_unlock(&ip->i_mount->m_perag_lock);
685 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
686 -1, _RET_IP_);
687 }
643 pag->pag_ici_reclaimable++; 688 pag->pag_ici_reclaimable++;
644} 689}
645 690
@@ -674,6 +719,16 @@ __xfs_inode_clear_reclaim_tag(
674 radix_tree_tag_clear(&pag->pag_ici_root, 719 radix_tree_tag_clear(&pag->pag_ici_root,
675 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); 720 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
676 pag->pag_ici_reclaimable--; 721 pag->pag_ici_reclaimable--;
722 if (!pag->pag_ici_reclaimable) {
723 /* clear the reclaim tag from the perag radix tree */
724 spin_lock(&ip->i_mount->m_perag_lock);
725 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
726 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
727 XFS_ICI_RECLAIM_TAG);
728 spin_unlock(&ip->i_mount->m_perag_lock);
729 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
730 -1, _RET_IP_);
731 }
677} 732}
678 733
679/* 734/*
@@ -828,83 +883,52 @@ xfs_reclaim_inodes(
828 883
829/* 884/*
830 * Shrinker infrastructure. 885 * Shrinker infrastructure.
831 *
832 * This is all far more complex than it needs to be. It adds a global list of
833 * mounts because the shrinkers can only call a global context. We need to make
834 * the shrinkers pass a context to avoid the need for global state.
835 */ 886 */
836static LIST_HEAD(xfs_mount_list);
837static struct rw_semaphore xfs_mount_list_lock;
838
839static int 887static int
840xfs_reclaim_inode_shrink( 888xfs_reclaim_inode_shrink(
889 struct shrinker *shrink,
841 int nr_to_scan, 890 int nr_to_scan,
842 gfp_t gfp_mask) 891 gfp_t gfp_mask)
843{ 892{
844 struct xfs_mount *mp; 893 struct xfs_mount *mp;
845 struct xfs_perag *pag; 894 struct xfs_perag *pag;
846 xfs_agnumber_t ag; 895 xfs_agnumber_t ag;
847 int reclaimable = 0; 896 int reclaimable;
848 897
898 mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
849 if (nr_to_scan) { 899 if (nr_to_scan) {
850 if (!(gfp_mask & __GFP_FS)) 900 if (!(gfp_mask & __GFP_FS))
851 return -1; 901 return -1;
852 902
853 down_read(&xfs_mount_list_lock); 903 xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
854 list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
855 xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
856 XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); 904 XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
857 if (nr_to_scan <= 0) 905 /* if we don't exhaust the scan, don't bother coming back */
858 break; 906 if (nr_to_scan > 0)
859 } 907 return -1;
860 up_read(&xfs_mount_list_lock); 908 }
861 }
862 909
863 down_read(&xfs_mount_list_lock); 910 reclaimable = 0;
864 list_for_each_entry(mp, &xfs_mount_list, m_mplist) { 911 ag = 0;
865 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { 912 while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag,
866 pag = xfs_perag_get(mp, ag); 913 XFS_ICI_RECLAIM_TAG))) {
867 reclaimable += pag->pag_ici_reclaimable; 914 reclaimable += pag->pag_ici_reclaimable;
868 xfs_perag_put(pag); 915 xfs_perag_put(pag);
869 }
870 } 916 }
871 up_read(&xfs_mount_list_lock);
872 return reclaimable; 917 return reclaimable;
873} 918}
874 919
875static struct shrinker xfs_inode_shrinker = {
876 .shrink = xfs_reclaim_inode_shrink,
877 .seeks = DEFAULT_SEEKS,
878};
879
880void __init
881xfs_inode_shrinker_init(void)
882{
883 init_rwsem(&xfs_mount_list_lock);
884 register_shrinker(&xfs_inode_shrinker);
885}
886
887void
888xfs_inode_shrinker_destroy(void)
889{
890 ASSERT(list_empty(&xfs_mount_list));
891 unregister_shrinker(&xfs_inode_shrinker);
892}
893
894void 920void
895xfs_inode_shrinker_register( 921xfs_inode_shrinker_register(
896 struct xfs_mount *mp) 922 struct xfs_mount *mp)
897{ 923{
898 down_write(&xfs_mount_list_lock); 924 mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink;
899 list_add_tail(&mp->m_mplist, &xfs_mount_list); 925 mp->m_inode_shrink.seeks = DEFAULT_SEEKS;
900 up_write(&xfs_mount_list_lock); 926 register_shrinker(&mp->m_inode_shrink);
901} 927}
902 928
903void 929void
904xfs_inode_shrinker_unregister( 930xfs_inode_shrinker_unregister(
905 struct xfs_mount *mp) 931 struct xfs_mount *mp)
906{ 932{
907 down_write(&xfs_mount_list_lock); 933 unregister_shrinker(&mp->m_inode_shrink);
908 list_del(&mp->m_mplist);
909 up_write(&xfs_mount_list_lock);
910} 934}
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index cdcbaaca988..e28139aaa4a 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -55,8 +55,6 @@ int xfs_inode_ag_iterator(struct xfs_mount *mp,
55 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), 55 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
56 int flags, int tag, int write_lock, int *nr_to_scan); 56 int flags, int tag, int write_lock, int *nr_to_scan);
57 57
58void xfs_inode_shrinker_init(void);
59void xfs_inode_shrinker_destroy(void);
60void xfs_inode_shrinker_register(struct xfs_mount *mp); 58void xfs_inode_shrinker_register(struct xfs_mount *mp);
61void xfs_inode_shrinker_unregister(struct xfs_mount *mp); 59void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
62 60
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index 73d5aa11738..30282069090 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -124,7 +124,10 @@ DEFINE_EVENT(xfs_perag_class, name, \
124 unsigned long caller_ip), \ 124 unsigned long caller_ip), \
125 TP_ARGS(mp, agno, refcount, caller_ip)) 125 TP_ARGS(mp, agno, refcount, caller_ip))
126DEFINE_PERAG_REF_EVENT(xfs_perag_get); 126DEFINE_PERAG_REF_EVENT(xfs_perag_get);
127DEFINE_PERAG_REF_EVENT(xfs_perag_get_reclaim);
127DEFINE_PERAG_REF_EVENT(xfs_perag_put); 128DEFINE_PERAG_REF_EVENT(xfs_perag_put);
129DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
130DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
128 131
129TRACE_EVENT(xfs_attr_list_node_descend, 132TRACE_EVENT(xfs_attr_list_node_descend,
130 TP_PROTO(struct xfs_attr_list_context *ctx, 133 TP_PROTO(struct xfs_attr_list_context *ctx,
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 8c117ff2e3a..67c018392d6 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -69,7 +69,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
69 69
70STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 70STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
71STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 71STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
72STATIC int xfs_qm_shake(int, gfp_t); 72STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t);
73 73
74static struct shrinker xfs_qm_shaker = { 74static struct shrinker xfs_qm_shaker = {
75 .shrink = xfs_qm_shake, 75 .shrink = xfs_qm_shake,
@@ -2117,7 +2117,10 @@ xfs_qm_shake_freelist(
2117 */ 2117 */
2118/* ARGSUSED */ 2118/* ARGSUSED */
2119STATIC int 2119STATIC int
2120xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask) 2120xfs_qm_shake(
2121 struct shrinker *shrink,
2122 int nr_to_scan,
2123 gfp_t gfp_mask)
2121{ 2124{
2122 int ndqused, nfree, n; 2125 int ndqused, nfree, n;
2123 2126
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1d2c7eed4ed..5761087ee8e 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -259,7 +259,7 @@ typedef struct xfs_mount {
259 wait_queue_head_t m_wait_single_sync_task; 259 wait_queue_head_t m_wait_single_sync_task;
260 __int64_t m_update_flags; /* sb flags we need to update 260 __int64_t m_update_flags; /* sb flags we need to update
261 on the next remount,rw */ 261 on the next remount,rw */
262 struct list_head m_mplist; /* inode shrinker mount list */ 262 struct shrinker m_inode_shrink; /* inode reclaim shrinker */
263} xfs_mount_t; 263} xfs_mount_t;
264 264
265/* 265/*