aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYing Han <yinghan@google.com>2011-05-24 20:12:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:26 -0400
commit1495f230fa7750479c79e3656286b9183d662077 (patch)
treee5e233bb9fe1916ccc7281e7dcc71b1572fb22c5
parenta09ed5e00084448453c8bada4dcd31e5fbfc2f21 (diff)
vmscan: change shrinker API by passing shrink_control struct
Change each shrinker's API by consolidating the existing parameters into shrink_control struct. This will simplify any further features added w/o touching each file of shrinker. [akpm@linux-foundation.org: fix build] [akpm@linux-foundation.org: fix warning] [kosaki.motohiro@jp.fujitsu.com: fix up new shrinker API] [akpm@linux-foundation.org: fix xfs warning] [akpm@linux-foundation.org: update gfs2] Signed-off-by: Ying Han <yinghan@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Minchan Kim <minchan.kim@gmail.com> Acked-by: Pavel Emelyanov <xemul@openvz.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Acked-by: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Steven Whitehouse <swhiteho@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c4
-rw-r--r--drivers/staging/zcache/zcache.c5
-rw-r--r--fs/dcache.c8
-rw-r--r--fs/drop_caches.c3
-rw-r--r--fs/gfs2/glock.c5
-rw-r--r--fs/gfs2/quota.c12
-rw-r--r--fs/gfs2/quota.h4
-rw-r--r--fs/inode.c6
-rw-r--r--fs/mbcache.c10
-rw-r--r--fs/nfs/dir.c5
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/quota/dquot.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c5
-rw-r--r--fs/xfs/quota/xfs_qm.c6
-rw-r--r--include/linux/mm.h19
-rw-r--r--mm/memory-failure.c3
-rw-r--r--mm/vmscan.c34
-rw-r--r--net/sunrpc/auth.c4
21 files changed, 95 insertions, 61 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 28418054b880..bd14bb4c8594 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3545,10 +3545,11 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
3545 return kvm_mmu_prepare_zap_page(kvm, page, invalid_list); 3545 return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
3546} 3546}
3547 3547
3548static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 3548static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
3549{ 3549{
3550 struct kvm *kvm; 3550 struct kvm *kvm;
3551 struct kvm *kvm_freed = NULL; 3551 struct kvm *kvm_freed = NULL;
3552 int nr_to_scan = sc->nr_to_scan;
3552 3553
3553 if (nr_to_scan == 0) 3554 if (nr_to_scan == 0)
3554 goto out; 3555 goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c6289034e29a..0b2e167d2bce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -56,9 +56,7 @@ static int i915_gem_phys_pwrite(struct drm_device *dev,
56static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); 56static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
57 57
58static int i915_gem_inactive_shrink(struct shrinker *shrinker, 58static int i915_gem_inactive_shrink(struct shrinker *shrinker,
59 int nr_to_scan, 59 struct shrink_control *sc);
60 gfp_t gfp_mask);
61
62 60
63/* some bookkeeping */ 61/* some bookkeeping */
64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 62static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -4092,9 +4090,7 @@ i915_gpu_is_active(struct drm_device *dev)
4092} 4090}
4093 4091
4094static int 4092static int
4095i915_gem_inactive_shrink(struct shrinker *shrinker, 4093i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4096 int nr_to_scan,
4097 gfp_t gfp_mask)
4098{ 4094{
4099 struct drm_i915_private *dev_priv = 4095 struct drm_i915_private *dev_priv =
4100 container_of(shrinker, 4096 container_of(shrinker,
@@ -4102,6 +4098,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker,
4102 mm.inactive_shrinker); 4098 mm.inactive_shrinker);
4103 struct drm_device *dev = dev_priv->dev; 4099 struct drm_device *dev = dev_priv->dev;
4104 struct drm_i915_gem_object *obj, *next; 4100 struct drm_i915_gem_object *obj, *next;
4101 int nr_to_scan = sc->nr_to_scan;
4105 int cnt; 4102 int cnt;
4106 4103
4107 if (!mutex_trylock(&dev->struct_mutex)) 4104 if (!mutex_trylock(&dev->struct_mutex))
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 9d9d92945f8c..d948575717bf 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -395,12 +395,14 @@ static int ttm_pool_get_num_unused_pages(void)
395/** 395/**
396 * Callback for mm to request pool to reduce number of page held. 396 * Callback for mm to request pool to reduce number of page held.
397 */ 397 */
398static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask) 398static int ttm_pool_mm_shrink(struct shrinker *shrink,
399 struct shrink_control *sc)
399{ 400{
400 static atomic_t start_pool = ATOMIC_INIT(0); 401 static atomic_t start_pool = ATOMIC_INIT(0);
401 unsigned i; 402 unsigned i;
402 unsigned pool_offset = atomic_add_return(1, &start_pool); 403 unsigned pool_offset = atomic_add_return(1, &start_pool);
403 struct ttm_page_pool *pool; 404 struct ttm_page_pool *pool;
405 int shrink_pages = sc->nr_to_scan;
404 406
405 pool_offset = pool_offset % NUM_POOLS; 407 pool_offset = pool_offset % NUM_POOLS;
406 /* select start pool in round robin fashion */ 408 /* select start pool in round robin fashion */
diff --git a/drivers/staging/zcache/zcache.c b/drivers/staging/zcache/zcache.c
index b8a2b30a1572..77ac2d4d3ef1 100644
--- a/drivers/staging/zcache/zcache.c
+++ b/drivers/staging/zcache/zcache.c
@@ -1181,9 +1181,12 @@ static bool zcache_freeze;
1181/* 1181/*
1182 * zcache shrinker interface (only useful for ephemeral pages, so zbud only) 1182 * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
1183 */ 1183 */
1184static int shrink_zcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 1184static int shrink_zcache_memory(struct shrinker *shrink,
1185 struct shrink_control *sc)
1185{ 1186{
1186 int ret = -1; 1187 int ret = -1;
1188 int nr = sc->nr_to_scan;
1189 gfp_t gfp_mask = sc->gfp_mask;
1187 1190
1188 if (nr >= 0) { 1191 if (nr >= 0) {
1189 if (!(gfp_mask & __GFP_FS)) 1192 if (!(gfp_mask & __GFP_FS))
diff --git a/fs/dcache.c b/fs/dcache.c
index 18b2a1f10ed8..37f72ee5bf7c 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1220,7 +1220,7 @@ void shrink_dcache_parent(struct dentry * parent)
1220EXPORT_SYMBOL(shrink_dcache_parent); 1220EXPORT_SYMBOL(shrink_dcache_parent);
1221 1221
1222/* 1222/*
1223 * Scan `nr' dentries and return the number which remain. 1223 * Scan `sc->nr_slab_to_reclaim' dentries and return the number which remain.
1224 * 1224 *
1225 * We need to avoid reentering the filesystem if the caller is performing a 1225 * We need to avoid reentering the filesystem if the caller is performing a
1226 * GFP_NOFS allocation attempt. One example deadlock is: 1226 * GFP_NOFS allocation attempt. One example deadlock is:
@@ -1231,8 +1231,12 @@ EXPORT_SYMBOL(shrink_dcache_parent);
1231 * 1231 *
1232 * In this case we return -1 to tell the caller that we baled. 1232 * In this case we return -1 to tell the caller that we baled.
1233 */ 1233 */
1234static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 1234static int shrink_dcache_memory(struct shrinker *shrink,
1235 struct shrink_control *sc)
1235{ 1236{
1237 int nr = sc->nr_to_scan;
1238 gfp_t gfp_mask = sc->gfp_mask;
1239
1236 if (nr) { 1240 if (nr) {
1237 if (!(gfp_mask & __GFP_FS)) 1241 if (!(gfp_mask & __GFP_FS))
1238 return -1; 1242 return -1;
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 440999c24353..c00e055b6282 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -42,11 +42,10 @@ static void drop_slab(void)
42 int nr_objects; 42 int nr_objects;
43 struct shrink_control shrink = { 43 struct shrink_control shrink = {
44 .gfp_mask = GFP_KERNEL, 44 .gfp_mask = GFP_KERNEL,
45 .nr_scanned = 1000,
46 }; 45 };
47 46
48 do { 47 do {
49 nr_objects = shrink_slab(&shrink, 1000); 48 nr_objects = shrink_slab(&shrink, 1000, 1000);
50 } while (nr_objects > 10); 49 } while (nr_objects > 10);
51} 50}
52 51
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index a2a6abbccc07..2792a790e50b 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1346,11 +1346,14 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1346} 1346}
1347 1347
1348 1348
1349static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 1349static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1350 struct shrink_control *sc)
1350{ 1351{
1351 struct gfs2_glock *gl; 1352 struct gfs2_glock *gl;
1352 int may_demote; 1353 int may_demote;
1353 int nr_skipped = 0; 1354 int nr_skipped = 0;
1355 int nr = sc->nr_to_scan;
1356 gfp_t gfp_mask = sc->gfp_mask;
1354 LIST_HEAD(skipped); 1357 LIST_HEAD(skipped);
1355 1358
1356 if (nr == 0) 1359 if (nr == 0)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index e23d9864c418..42e8d23bc047 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -38,6 +38,7 @@
38 38
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/mm.h>
41#include <linux/spinlock.h> 42#include <linux/spinlock.h>
42#include <linux/completion.h> 43#include <linux/completion.h>
43#include <linux/buffer_head.h> 44#include <linux/buffer_head.h>
@@ -77,19 +78,20 @@ static LIST_HEAD(qd_lru_list);
77static atomic_t qd_lru_count = ATOMIC_INIT(0); 78static atomic_t qd_lru_count = ATOMIC_INIT(0);
78static DEFINE_SPINLOCK(qd_lru_lock); 79static DEFINE_SPINLOCK(qd_lru_lock);
79 80
80int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 81int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
81{ 82{
82 struct gfs2_quota_data *qd; 83 struct gfs2_quota_data *qd;
83 struct gfs2_sbd *sdp; 84 struct gfs2_sbd *sdp;
85 int nr_to_scan = sc->nr_to_scan;
84 86
85 if (nr == 0) 87 if (nr_to_scan == 0)
86 goto out; 88 goto out;
87 89
88 if (!(gfp_mask & __GFP_FS)) 90 if (!(sc->gfp_mask & __GFP_FS))
89 return -1; 91 return -1;
90 92
91 spin_lock(&qd_lru_lock); 93 spin_lock(&qd_lru_lock);
92 while (nr && !list_empty(&qd_lru_list)) { 94 while (nr_to_scan && !list_empty(&qd_lru_list)) {
93 qd = list_entry(qd_lru_list.next, 95 qd = list_entry(qd_lru_list.next,
94 struct gfs2_quota_data, qd_reclaim); 96 struct gfs2_quota_data, qd_reclaim);
95 sdp = qd->qd_gl->gl_sbd; 97 sdp = qd->qd_gl->gl_sbd;
@@ -110,7 +112,7 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
110 spin_unlock(&qd_lru_lock); 112 spin_unlock(&qd_lru_lock);
111 kmem_cache_free(gfs2_quotad_cachep, qd); 113 kmem_cache_free(gfs2_quotad_cachep, qd);
112 spin_lock(&qd_lru_lock); 114 spin_lock(&qd_lru_lock);
113 nr--; 115 nr_to_scan--;
114 } 116 }
115 spin_unlock(&qd_lru_lock); 117 spin_unlock(&qd_lru_lock);
116 118
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index e7d236ca48bd..90bf1c302a98 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -12,6 +12,7 @@
12 12
13struct gfs2_inode; 13struct gfs2_inode;
14struct gfs2_sbd; 14struct gfs2_sbd;
15struct shrink_control;
15 16
16#define NO_QUOTA_CHANGE ((u32)-1) 17#define NO_QUOTA_CHANGE ((u32)-1)
17 18
@@ -51,7 +52,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
51 return ret; 52 return ret;
52} 53}
53 54
54extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask); 55extern int gfs2_shrink_qd_memory(struct shrinker *shrink,
56 struct shrink_control *sc);
55extern const struct quotactl_ops gfs2_quotactl_ops; 57extern const struct quotactl_ops gfs2_quotactl_ops;
56 58
57#endif /* __QUOTA_DOT_H__ */ 59#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/inode.c b/fs/inode.c
index 88aa3bcc7681..990d284877a1 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -751,8 +751,12 @@ static void prune_icache(int nr_to_scan)
751 * This function is passed the number of inodes to scan, and it returns the 751 * This function is passed the number of inodes to scan, and it returns the
752 * total number of remaining possibly-reclaimable inodes. 752 * total number of remaining possibly-reclaimable inodes.
753 */ 753 */
754static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 754static int shrink_icache_memory(struct shrinker *shrink,
755 struct shrink_control *sc)
755{ 756{
757 int nr = sc->nr_to_scan;
758 gfp_t gfp_mask = sc->gfp_mask;
759
756 if (nr) { 760 if (nr) {
757 /* 761 /*
758 * Nasty deadlock avoidance. We may hold various FS locks, 762 * Nasty deadlock avoidance. We may hold various FS locks,
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 2f174be06555..8c32ef3ba88e 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -90,7 +90,8 @@ static DEFINE_SPINLOCK(mb_cache_spinlock);
90 * What the mbcache registers as to get shrunk dynamically. 90 * What the mbcache registers as to get shrunk dynamically.
91 */ 91 */
92 92
93static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); 93static int mb_cache_shrink_fn(struct shrinker *shrink,
94 struct shrink_control *sc);
94 95
95static struct shrinker mb_cache_shrinker = { 96static struct shrinker mb_cache_shrinker = {
96 .shrink = mb_cache_shrink_fn, 97 .shrink = mb_cache_shrink_fn,
@@ -156,18 +157,19 @@ forget:
156 * gets low. 157 * gets low.
157 * 158 *
158 * @shrink: (ignored) 159 * @shrink: (ignored)
159 * @nr_to_scan: Number of objects to scan 160 * @sc: shrink_control passed from reclaim
160 * @gfp_mask: (ignored)
161 * 161 *
162 * Returns the number of objects which are present in the cache. 162 * Returns the number of objects which are present in the cache.
163 */ 163 */
164static int 164static int
165mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 165mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc)
166{ 166{
167 LIST_HEAD(free_list); 167 LIST_HEAD(free_list);
168 struct mb_cache *cache; 168 struct mb_cache *cache;
169 struct mb_cache_entry *entry, *tmp; 169 struct mb_cache_entry *entry, *tmp;
170 int count = 0; 170 int count = 0;
171 int nr_to_scan = sc->nr_to_scan;
172 gfp_t gfp_mask = sc->gfp_mask;
171 173
172 mb_debug("trying to free %d entries", nr_to_scan); 174 mb_debug("trying to free %d entries", nr_to_scan);
173 spin_lock(&mb_cache_spinlock); 175 spin_lock(&mb_cache_spinlock);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 7237672216c8..424e47773a84 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2042,11 +2042,14 @@ static void nfs_access_free_list(struct list_head *head)
2042 } 2042 }
2043} 2043}
2044 2044
2045int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 2045int nfs_access_cache_shrinker(struct shrinker *shrink,
2046 struct shrink_control *sc)
2046{ 2047{
2047 LIST_HEAD(head); 2048 LIST_HEAD(head);
2048 struct nfs_inode *nfsi, *next; 2049 struct nfs_inode *nfsi, *next;
2049 struct nfs_access_entry *cache; 2050 struct nfs_access_entry *cache;
2051 int nr_to_scan = sc->nr_to_scan;
2052 gfp_t gfp_mask = sc->gfp_mask;
2050 2053
2051 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) 2054 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
2052 return (nr_to_scan == 0) ? 0 : -1; 2055 return (nr_to_scan == 0) ? 0 : -1;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index ce118ce885dd..2df6ca7b5898 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -234,7 +234,7 @@ extern int nfs_init_client(struct nfs_client *clp,
234 234
235/* dir.c */ 235/* dir.c */
236extern int nfs_access_cache_shrinker(struct shrinker *shrink, 236extern int nfs_access_cache_shrinker(struct shrinker *shrink,
237 int nr_to_scan, gfp_t gfp_mask); 237 struct shrink_control *sc);
238 238
239/* inode.c */ 239/* inode.c */
240extern struct workqueue_struct *nfsiod_workqueue; 240extern struct workqueue_struct *nfsiod_workqueue;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index d3c032f5fa0a..5b572c89e6c4 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -691,8 +691,11 @@ static void prune_dqcache(int count)
691 * This is called from kswapd when we think we need some 691 * This is called from kswapd when we think we need some
692 * more memory 692 * more memory
693 */ 693 */
694static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 694static int shrink_dqcache_memory(struct shrinker *shrink,
695 struct shrink_control *sc)
695{ 696{
697 int nr = sc->nr_to_scan;
698
696 if (nr) { 699 if (nr) {
697 spin_lock(&dq_list_lock); 700 spin_lock(&dq_list_lock);
698 prune_dqcache(nr); 701 prune_dqcache(nr);
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 52b2b5da566e..5e68099db2a5 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1422,12 +1422,12 @@ restart:
1422int 1422int
1423xfs_buftarg_shrink( 1423xfs_buftarg_shrink(
1424 struct shrinker *shrink, 1424 struct shrinker *shrink,
1425 int nr_to_scan, 1425 struct shrink_control *sc)
1426 gfp_t mask)
1427{ 1426{
1428 struct xfs_buftarg *btp = container_of(shrink, 1427 struct xfs_buftarg *btp = container_of(shrink,
1429 struct xfs_buftarg, bt_shrinker); 1428 struct xfs_buftarg, bt_shrinker);
1430 struct xfs_buf *bp; 1429 struct xfs_buf *bp;
1430 int nr_to_scan = sc->nr_to_scan;
1431 LIST_HEAD(dispose); 1431 LIST_HEAD(dispose);
1432 1432
1433 if (!nr_to_scan) 1433 if (!nr_to_scan)
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index cb1bb2080e44..8ecad5ff9f9b 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -1032,13 +1032,14 @@ xfs_reclaim_inodes(
1032static int 1032static int
1033xfs_reclaim_inode_shrink( 1033xfs_reclaim_inode_shrink(
1034 struct shrinker *shrink, 1034 struct shrinker *shrink,
1035 int nr_to_scan, 1035 struct shrink_control *sc)
1036 gfp_t gfp_mask)
1037{ 1036{
1038 struct xfs_mount *mp; 1037 struct xfs_mount *mp;
1039 struct xfs_perag *pag; 1038 struct xfs_perag *pag;
1040 xfs_agnumber_t ag; 1039 xfs_agnumber_t ag;
1041 int reclaimable; 1040 int reclaimable;
1041 int nr_to_scan = sc->nr_to_scan;
1042 gfp_t gfp_mask = sc->gfp_mask;
1042 1043
1043 mp = container_of(shrink, struct xfs_mount, m_inode_shrink); 1044 mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
1044 if (nr_to_scan) { 1045 if (nr_to_scan) {
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 69228aa8605a..b94dace4e785 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -60,7 +60,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
60 60
61STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 61STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
62STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 62STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
63STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t); 63STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *);
64 64
65static struct shrinker xfs_qm_shaker = { 65static struct shrinker xfs_qm_shaker = {
66 .shrink = xfs_qm_shake, 66 .shrink = xfs_qm_shake,
@@ -2009,10 +2009,10 @@ xfs_qm_shake_freelist(
2009STATIC int 2009STATIC int
2010xfs_qm_shake( 2010xfs_qm_shake(
2011 struct shrinker *shrink, 2011 struct shrinker *shrink,
2012 int nr_to_scan, 2012 struct shrink_control *sc)
2013 gfp_t gfp_mask)
2014{ 2013{
2015 int ndqused, nfree, n; 2014 int ndqused, nfree, n;
2015 gfp_t gfp_mask = sc->gfp_mask;
2016 2016
2017 if (!kmem_shake_allow(gfp_mask)) 2017 if (!kmem_shake_allow(gfp_mask))
2018 return 0; 2018 return 0;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 32cfa9602d00..5cbbf78eaac7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1166,18 +1166,20 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
1166 * We consolidate the values for easier extention later. 1166 * We consolidate the values for easier extention later.
1167 */ 1167 */
1168struct shrink_control { 1168struct shrink_control {
1169 unsigned long nr_scanned;
1170 gfp_t gfp_mask; 1169 gfp_t gfp_mask;
1170
1171 /* How many slab objects shrinker() should scan and try to reclaim */
1172 unsigned long nr_to_scan;
1171}; 1173};
1172 1174
1173/* 1175/*
1174 * A callback you can register to apply pressure to ageable caches. 1176 * A callback you can register to apply pressure to ageable caches.
1175 * 1177 *
1176 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should 1178 * 'sc' is passed shrink_control which includes a count 'nr_to_scan'
1177 * look through the least-recently-used 'nr_to_scan' entries and 1179 * and a 'gfpmask'. It should look through the least-recently-used
1178 * attempt to free them up. It should return the number of objects 1180 * 'nr_to_scan' entries and attempt to free them up. It should return
1179 * which remain in the cache. If it returns -1, it means it cannot do 1181 * the number of objects which remain in the cache. If it returns -1, it means
1180 * any scanning at this time (eg. there is a risk of deadlock). 1182 * it cannot do any scanning at this time (eg. there is a risk of deadlock).
1181 * 1183 *
1182 * The 'gfpmask' refers to the allocation we are currently trying to 1184 * The 'gfpmask' refers to the allocation we are currently trying to
1183 * fulfil. 1185 * fulfil.
@@ -1186,7 +1188,7 @@ struct shrink_control {
1186 * querying the cache size, so a fastpath for that case is appropriate. 1188 * querying the cache size, so a fastpath for that case is appropriate.
1187 */ 1189 */
1188struct shrinker { 1190struct shrinker {
1189 int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask); 1191 int (*shrink)(struct shrinker *, struct shrink_control *sc);
1190 int seeks; /* seeks to recreate an obj */ 1192 int seeks; /* seeks to recreate an obj */
1191 1193
1192 /* These are for internal use */ 1194 /* These are for internal use */
@@ -1640,7 +1642,8 @@ int in_gate_area_no_mm(unsigned long addr);
1640int drop_caches_sysctl_handler(struct ctl_table *, int, 1642int drop_caches_sysctl_handler(struct ctl_table *, int,
1641 void __user *, size_t *, loff_t *); 1643 void __user *, size_t *, loff_t *);
1642unsigned long shrink_slab(struct shrink_control *shrink, 1644unsigned long shrink_slab(struct shrink_control *shrink,
1643 unsigned long lru_pages); 1645 unsigned long nr_pages_scanned,
1646 unsigned long lru_pages);
1644 1647
1645#ifndef CONFIG_MMU 1648#ifndef CONFIG_MMU
1646#define randomize_va_space 0 1649#define randomize_va_space 0
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 341341b2b47b..5c8f7e08928d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -241,10 +241,9 @@ void shake_page(struct page *p, int access)
241 do { 241 do {
242 struct shrink_control shrink = { 242 struct shrink_control shrink = {
243 .gfp_mask = GFP_KERNEL, 243 .gfp_mask = GFP_KERNEL,
244 .nr_scanned = 1000,
245 }; 244 };
246 245
247 nr = shrink_slab(&shrink, 1000); 246 nr = shrink_slab(&shrink, 1000, 1000);
248 if (page_count(p) == 1) 247 if (page_count(p) == 1)
249 break; 248 break;
250 } while (nr > 10); 249 } while (nr > 10);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e4e245ed1a5b..7e0116150dc7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -202,6 +202,14 @@ void unregister_shrinker(struct shrinker *shrinker)
202} 202}
203EXPORT_SYMBOL(unregister_shrinker); 203EXPORT_SYMBOL(unregister_shrinker);
204 204
205static inline int do_shrinker_shrink(struct shrinker *shrinker,
206 struct shrink_control *sc,
207 unsigned long nr_to_scan)
208{
209 sc->nr_to_scan = nr_to_scan;
210 return (*shrinker->shrink)(shrinker, sc);
211}
212
205#define SHRINK_BATCH 128 213#define SHRINK_BATCH 128
206/* 214/*
207 * Call the shrink functions to age shrinkable caches 215 * Call the shrink functions to age shrinkable caches
@@ -223,15 +231,14 @@ EXPORT_SYMBOL(unregister_shrinker);
223 * Returns the number of slab objects which we shrunk. 231 * Returns the number of slab objects which we shrunk.
224 */ 232 */
225unsigned long shrink_slab(struct shrink_control *shrink, 233unsigned long shrink_slab(struct shrink_control *shrink,
234 unsigned long nr_pages_scanned,
226 unsigned long lru_pages) 235 unsigned long lru_pages)
227{ 236{
228 struct shrinker *shrinker; 237 struct shrinker *shrinker;
229 unsigned long ret = 0; 238 unsigned long ret = 0;
230 unsigned long scanned = shrink->nr_scanned;
231 gfp_t gfp_mask = shrink->gfp_mask;
232 239
233 if (scanned == 0) 240 if (nr_pages_scanned == 0)
234 scanned = SWAP_CLUSTER_MAX; 241 nr_pages_scanned = SWAP_CLUSTER_MAX;
235 242
236 if (!down_read_trylock(&shrinker_rwsem)) { 243 if (!down_read_trylock(&shrinker_rwsem)) {
237 /* Assume we'll be able to shrink next time */ 244 /* Assume we'll be able to shrink next time */
@@ -244,8 +251,8 @@ unsigned long shrink_slab(struct shrink_control *shrink,
244 unsigned long total_scan; 251 unsigned long total_scan;
245 unsigned long max_pass; 252 unsigned long max_pass;
246 253
247 max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask); 254 max_pass = do_shrinker_shrink(shrinker, shrink, 0);
248 delta = (4 * scanned) / shrinker->seeks; 255 delta = (4 * nr_pages_scanned) / shrinker->seeks;
249 delta *= max_pass; 256 delta *= max_pass;
250 do_div(delta, lru_pages + 1); 257 do_div(delta, lru_pages + 1);
251 shrinker->nr += delta; 258 shrinker->nr += delta;
@@ -272,9 +279,9 @@ unsigned long shrink_slab(struct shrink_control *shrink,
272 int shrink_ret; 279 int shrink_ret;
273 int nr_before; 280 int nr_before;
274 281
275 nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask); 282 nr_before = do_shrinker_shrink(shrinker, shrink, 0);
276 shrink_ret = (*shrinker->shrink)(shrinker, this_scan, 283 shrink_ret = do_shrinker_shrink(shrinker, shrink,
277 gfp_mask); 284 this_scan);
278 if (shrink_ret == -1) 285 if (shrink_ret == -1)
279 break; 286 break;
280 if (shrink_ret < nr_before) 287 if (shrink_ret < nr_before)
@@ -2072,8 +2079,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2072 lru_pages += zone_reclaimable_pages(zone); 2079 lru_pages += zone_reclaimable_pages(zone);
2073 } 2080 }
2074 2081
2075 shrink->nr_scanned = sc->nr_scanned; 2082 shrink_slab(shrink, sc->nr_scanned, lru_pages);
2076 shrink_slab(shrink, lru_pages);
2077 if (reclaim_state) { 2083 if (reclaim_state) {
2078 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2084 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2079 reclaim_state->reclaimed_slab = 0; 2085 reclaim_state->reclaimed_slab = 0;
@@ -2456,8 +2462,7 @@ loop_again:
2456 end_zone, 0)) 2462 end_zone, 0))
2457 shrink_zone(priority, zone, &sc); 2463 shrink_zone(priority, zone, &sc);
2458 reclaim_state->reclaimed_slab = 0; 2464 reclaim_state->reclaimed_slab = 0;
2459 shrink.nr_scanned = sc.nr_scanned; 2465 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2460 nr_slab = shrink_slab(&shrink, lru_pages);
2461 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2466 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2462 total_scanned += sc.nr_scanned; 2467 total_scanned += sc.nr_scanned;
2463 2468
@@ -3025,7 +3030,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3025 } 3030 }
3026 3031
3027 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3032 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3028 shrink.nr_scanned = sc.nr_scanned;
3029 if (nr_slab_pages0 > zone->min_slab_pages) { 3033 if (nr_slab_pages0 > zone->min_slab_pages) {
3030 /* 3034 /*
3031 * shrink_slab() does not currently allow us to determine how 3035 * shrink_slab() does not currently allow us to determine how
@@ -3041,7 +3045,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3041 unsigned long lru_pages = zone_reclaimable_pages(zone); 3045 unsigned long lru_pages = zone_reclaimable_pages(zone);
3042 3046
3043 /* No reclaimable slab or very low memory pressure */ 3047 /* No reclaimable slab or very low memory pressure */
3044 if (!shrink_slab(&shrink, lru_pages)) 3048 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
3045 break; 3049 break;
3046 3050
3047 /* Freed enough memory */ 3051 /* Freed enough memory */
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 67e31276682a..cd6e4aa19dbf 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -326,10 +326,12 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
326 * Run memory cache shrinker. 326 * Run memory cache shrinker.
327 */ 327 */
328static int 328static int
329rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 329rpcauth_cache_shrinker(struct shrinker *shrink, struct shrink_control *sc)
330{ 330{
331 LIST_HEAD(free); 331 LIST_HEAD(free);
332 int res; 332 int res;
333 int nr_to_scan = sc->nr_to_scan;
334 gfp_t gfp_mask = sc->gfp_mask;
333 335
334 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) 336 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
335 return (nr_to_scan == 0) ? 0 : -1; 337 return (nr_to_scan == 0) ? 0 : -1;