summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4d4e859b4b9c..fe0d5c458440 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2374,12 +2374,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2374 */ 2374 */
2375 if (global_reclaim(sc)) { 2375 if (global_reclaim(sc)) {
2376 unsigned long lru_pages = 0; 2376 unsigned long lru_pages = 0;
2377
2378 nodes_clear(shrink->nodes_to_scan);
2377 for_each_zone_zonelist(zone, z, zonelist, 2379 for_each_zone_zonelist(zone, z, zonelist,
2378 gfp_zone(sc->gfp_mask)) { 2380 gfp_zone(sc->gfp_mask)) {
2379 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2381 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2380 continue; 2382 continue;
2381 2383
2382 lru_pages += zone_reclaimable_pages(zone); 2384 lru_pages += zone_reclaimable_pages(zone);
2385 node_set(zone_to_nid(zone),
2386 shrink->nodes_to_scan);
2383 } 2387 }
2384 2388
2385 shrink_slab(shrink, sc->nr_scanned, lru_pages); 2389 shrink_slab(shrink, sc->nr_scanned, lru_pages);
@@ -2836,6 +2840,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
2836 return true; 2840 return true;
2837 2841
2838 shrink_zone(zone, sc); 2842 shrink_zone(zone, sc);
2843 nodes_clear(shrink.nodes_to_scan);
2844 node_set(zone_to_nid(zone), shrink.nodes_to_scan);
2839 2845
2840 reclaim_state->reclaimed_slab = 0; 2846 reclaim_state->reclaimed_slab = 0;
2841 nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages); 2847 nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages);
@@ -3544,10 +3550,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3544 * number of slab pages and shake the slab until it is reduced 3550 * number of slab pages and shake the slab until it is reduced
3545 * by the same nr_pages that we used for reclaiming unmapped 3551 * by the same nr_pages that we used for reclaiming unmapped
3546 * pages. 3552 * pages.
3547 *
3548 * Note that shrink_slab will free memory on all zones and may
3549 * take a long time.
3550 */ 3553 */
3554 nodes_clear(shrink.nodes_to_scan);
3555 node_set(zone_to_nid(zone), shrink.nodes_to_scan);
3551 for (;;) { 3556 for (;;) {
3552 unsigned long lru_pages = zone_reclaimable_pages(zone); 3557 unsigned long lru_pages = zone_reclaimable_pages(zone);
3553 3558