diff options
author | Dave Chinner <dchinner@redhat.com> | 2013-08-27 20:18:03 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2013-09-10 18:56:31 -0400 |
commit | 0ce3d74450815500e31f16a0b65f6bab687985c3 (patch) | |
tree | 82c7a5a75958da8f44102276e862eaf325c5f0ce /mm | |
parent | 4e717f5c1083995c334ced639cc77a75e9972567 (diff) |
shrinker: add node awareness
Pass the node of the current zone being reclaimed to shrink_slab(),
allowing the shrinker control nodemask to be set appropriately for node
aware shrinkers.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@openvz.org>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory-failure.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 11 |
2 files changed, 10 insertions, 3 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index d84c5e5331bb..baa4e0a45dec 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -248,10 +248,12 @@ void shake_page(struct page *p, int access) | |||
248 | */ | 248 | */ |
249 | if (access) { | 249 | if (access) { |
250 | int nr; | 250 | int nr; |
251 | int nid = page_to_nid(p); | ||
251 | do { | 252 | do { |
252 | struct shrink_control shrink = { | 253 | struct shrink_control shrink = { |
253 | .gfp_mask = GFP_KERNEL, | 254 | .gfp_mask = GFP_KERNEL, |
254 | }; | 255 | }; |
256 | node_set(nid, shrink.nodes_to_scan); | ||
255 | 257 | ||
256 | nr = shrink_slab(&shrink, 1000, 1000); | 258 | nr = shrink_slab(&shrink, 1000, 1000); |
257 | if (page_count(p) == 1) | 259 | if (page_count(p) == 1) |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 4d4e859b4b9c..fe0d5c458440 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2374,12 +2374,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
2374 | */ | 2374 | */ |
2375 | if (global_reclaim(sc)) { | 2375 | if (global_reclaim(sc)) { |
2376 | unsigned long lru_pages = 0; | 2376 | unsigned long lru_pages = 0; |
2377 | |||
2378 | nodes_clear(shrink->nodes_to_scan); | ||
2377 | for_each_zone_zonelist(zone, z, zonelist, | 2379 | for_each_zone_zonelist(zone, z, zonelist, |
2378 | gfp_zone(sc->gfp_mask)) { | 2380 | gfp_zone(sc->gfp_mask)) { |
2379 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 2381 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) |
2380 | continue; | 2382 | continue; |
2381 | 2383 | ||
2382 | lru_pages += zone_reclaimable_pages(zone); | 2384 | lru_pages += zone_reclaimable_pages(zone); |
2385 | node_set(zone_to_nid(zone), | ||
2386 | shrink->nodes_to_scan); | ||
2383 | } | 2387 | } |
2384 | 2388 | ||
2385 | shrink_slab(shrink, sc->nr_scanned, lru_pages); | 2389 | shrink_slab(shrink, sc->nr_scanned, lru_pages); |
@@ -2836,6 +2840,8 @@ static bool kswapd_shrink_zone(struct zone *zone, | |||
2836 | return true; | 2840 | return true; |
2837 | 2841 | ||
2838 | shrink_zone(zone, sc); | 2842 | shrink_zone(zone, sc); |
2843 | nodes_clear(shrink.nodes_to_scan); | ||
2844 | node_set(zone_to_nid(zone), shrink.nodes_to_scan); | ||
2839 | 2845 | ||
2840 | reclaim_state->reclaimed_slab = 0; | 2846 | reclaim_state->reclaimed_slab = 0; |
2841 | nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages); | 2847 | nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages); |
@@ -3544,10 +3550,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
3544 | * number of slab pages and shake the slab until it is reduced | 3550 | * number of slab pages and shake the slab until it is reduced |
3545 | * by the same nr_pages that we used for reclaiming unmapped | 3551 | * by the same nr_pages that we used for reclaiming unmapped |
3546 | * pages. | 3552 | * pages. |
3547 | * | ||
3548 | * Note that shrink_slab will free memory on all zones and may | ||
3549 | * take a long time. | ||
3550 | */ | 3553 | */ |
3554 | nodes_clear(shrink.nodes_to_scan); | ||
3555 | node_set(zone_to_nid(zone), shrink.nodes_to_scan); | ||
3551 | for (;;) { | 3556 | for (;;) { |
3552 | unsigned long lru_pages = zone_reclaimable_pages(zone); | 3557 | unsigned long lru_pages = zone_reclaimable_pages(zone); |
3553 | 3558 | ||