aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/staging/android/ashmem.c3
-rw-r--r--fs/drop_caches.c1
-rw-r--r--include/linux/shrinker.h3
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/vmscan.c11
5 files changed, 17 insertions, 3 deletions
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 21a3f7250531..65f36d728714 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -692,6 +692,9 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
692 .gfp_mask = GFP_KERNEL, 692 .gfp_mask = GFP_KERNEL,
693 .nr_to_scan = 0, 693 .nr_to_scan = 0,
694 }; 694 };
695
696 nodes_setall(sc.nodes_to_scan);
697
695 ret = ashmem_shrink(&ashmem_shrinker, &sc); 698 ret = ashmem_shrink(&ashmem_shrinker, &sc);
696 sc.nr_to_scan = ret; 699 sc.nr_to_scan = ret;
697 ashmem_shrink(&ashmem_shrinker, &sc); 700 ashmem_shrink(&ashmem_shrinker, &sc);
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index c00e055b6282..9fd702f5bfb2 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -44,6 +44,7 @@ static void drop_slab(void)
44 .gfp_mask = GFP_KERNEL, 44 .gfp_mask = GFP_KERNEL,
45 }; 45 };
46 46
47 nodes_setall(shrink.nodes_to_scan);
47 do { 48 do {
48 nr_objects = shrink_slab(&shrink, 1000, 1000); 49 nr_objects = shrink_slab(&shrink, 1000, 1000);
49 } while (nr_objects > 10); 50 } while (nr_objects > 10);
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 884e76222e1b..76f520c4c394 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -16,6 +16,9 @@ struct shrink_control {
16 16
17 /* How many slab objects shrinker() should scan and try to reclaim */ 17 /* How many slab objects shrinker() should scan and try to reclaim */
18 unsigned long nr_to_scan; 18 unsigned long nr_to_scan;
19
20 /* shrink from these nodes */
21 nodemask_t nodes_to_scan;
19}; 22};
20 23
21#define SHRINK_STOP (~0UL) 24#define SHRINK_STOP (~0UL)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index d84c5e5331bb..baa4e0a45dec 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -248,10 +248,12 @@ void shake_page(struct page *p, int access)
248 */ 248 */
249 if (access) { 249 if (access) {
250 int nr; 250 int nr;
251 int nid = page_to_nid(p);
251 do { 252 do {
252 struct shrink_control shrink = { 253 struct shrink_control shrink = {
253 .gfp_mask = GFP_KERNEL, 254 .gfp_mask = GFP_KERNEL,
254 }; 255 };
256 node_set(nid, shrink.nodes_to_scan);
255 257
256 nr = shrink_slab(&shrink, 1000, 1000); 258 nr = shrink_slab(&shrink, 1000, 1000);
257 if (page_count(p) == 1) 259 if (page_count(p) == 1)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4d4e859b4b9c..fe0d5c458440 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2374,12 +2374,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2374 */ 2374 */
2375 if (global_reclaim(sc)) { 2375 if (global_reclaim(sc)) {
2376 unsigned long lru_pages = 0; 2376 unsigned long lru_pages = 0;
2377
2378 nodes_clear(shrink->nodes_to_scan);
2377 for_each_zone_zonelist(zone, z, zonelist, 2379 for_each_zone_zonelist(zone, z, zonelist,
2378 gfp_zone(sc->gfp_mask)) { 2380 gfp_zone(sc->gfp_mask)) {
2379 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2381 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2380 continue; 2382 continue;
2381 2383
2382 lru_pages += zone_reclaimable_pages(zone); 2384 lru_pages += zone_reclaimable_pages(zone);
2385 node_set(zone_to_nid(zone),
2386 shrink->nodes_to_scan);
2383 } 2387 }
2384 2388
2385 shrink_slab(shrink, sc->nr_scanned, lru_pages); 2389 shrink_slab(shrink, sc->nr_scanned, lru_pages);
@@ -2836,6 +2840,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
2836 return true; 2840 return true;
2837 2841
2838 shrink_zone(zone, sc); 2842 shrink_zone(zone, sc);
2843 nodes_clear(shrink.nodes_to_scan);
2844 node_set(zone_to_nid(zone), shrink.nodes_to_scan);
2839 2845
2840 reclaim_state->reclaimed_slab = 0; 2846 reclaim_state->reclaimed_slab = 0;
2841 nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages); 2847 nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages);
@@ -3544,10 +3550,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3544 * number of slab pages and shake the slab until it is reduced 3550 * number of slab pages and shake the slab until it is reduced
3545 * by the same nr_pages that we used for reclaiming unmapped 3551 * by the same nr_pages that we used for reclaiming unmapped
3546 * pages. 3552 * pages.
3547 *
3548 * Note that shrink_slab will free memory on all zones and may
3549 * take a long time.
3550 */ 3553 */
3554 nodes_clear(shrink.nodes_to_scan);
3555 node_set(zone_to_nid(zone), shrink.nodes_to_scan);
3551 for (;;) { 3556 for (;;) {
3552 unsigned long lru_pages = zone_reclaimable_pages(zone); 3557 unsigned long lru_pages = zone_reclaimable_pages(zone);
3553 3558