aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2008-04-28 05:12:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:18 -0400
commitdd1a239f6f2d4d3eedd318583ec319aa145b324c (patch)
treeaff4224c96b5e2e67588c3946858a724863eeaf9 /mm/vmscan.c
parent54a6eb5c4765aa573a030ceeba2c14e3d2ea5706 (diff)
mm: have zonelist contains structs with both a zone pointer and zone_idx
Filtering zonelists requires very frequent use of zone_idx(). This is costly as it involves a lookup of another structure and a substraction operation. As the zone_idx is often required, it should be quickly accessible. The node idx could also be stored here if it was found that accessing zone->node is significant which may be the case on workloads where nodemasks are heavily used. This patch introduces a struct zoneref to store a zone pointer and a zone index. The zonelist then consists of an array of these struct zonerefs which are looked up as necessary. Helpers are given for accessing the zone index as well as the node index. [kamezawa.hiroyu@jp.fujitsu.com: Suggested struct zoneref instead of embedding information in pointers] [hugh@veritas.com: mm-have-zonelist: fix memcg ooms] [hugh@veritas.com: just return do_try_to_free_pages] [hugh@veritas.com: do_try_to_free_pages gfp_mask redundant] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Christoph Lameter <clameter@sgi.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <clameter@sgi.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0515b8f44894..eceac9f9032f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1251,7 +1251,7 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
1251{ 1251{
1252 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1252 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1253 unsigned long nr_reclaimed = 0; 1253 unsigned long nr_reclaimed = 0;
1254 struct zone **z; 1254 struct zoneref *z;
1255 struct zone *zone; 1255 struct zone *zone;
1256 1256
1257 sc->all_unreclaimable = 1; 1257 sc->all_unreclaimable = 1;
@@ -1301,7 +1301,7 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
1301 * allocation attempt will fail. 1301 * allocation attempt will fail.
1302 */ 1302 */
1303static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 1303static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1304 gfp_t gfp_mask, struct scan_control *sc) 1304 struct scan_control *sc)
1305{ 1305{
1306 int priority; 1306 int priority;
1307 int ret = 0; 1307 int ret = 0;
@@ -1309,9 +1309,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1309 unsigned long nr_reclaimed = 0; 1309 unsigned long nr_reclaimed = 0;
1310 struct reclaim_state *reclaim_state = current->reclaim_state; 1310 struct reclaim_state *reclaim_state = current->reclaim_state;
1311 unsigned long lru_pages = 0; 1311 unsigned long lru_pages = 0;
1312 struct zone **z; 1312 struct zoneref *z;
1313 struct zone *zone; 1313 struct zone *zone;
1314 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 1314 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1315 1315
1316 if (scan_global_lru(sc)) 1316 if (scan_global_lru(sc))
1317 count_vm_event(ALLOCSTALL); 1317 count_vm_event(ALLOCSTALL);
@@ -1339,7 +1339,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1339 * over limit cgroups 1339 * over limit cgroups
1340 */ 1340 */
1341 if (scan_global_lru(sc)) { 1341 if (scan_global_lru(sc)) {
1342 shrink_slab(sc->nr_scanned, gfp_mask, lru_pages); 1342 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1343 if (reclaim_state) { 1343 if (reclaim_state) {
1344 nr_reclaimed += reclaim_state->reclaimed_slab; 1344 nr_reclaimed += reclaim_state->reclaimed_slab;
1345 reclaim_state->reclaimed_slab = 0; 1345 reclaim_state->reclaimed_slab = 0;
@@ -1410,7 +1410,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1410 .isolate_pages = isolate_pages_global, 1410 .isolate_pages = isolate_pages_global,
1411 }; 1411 };
1412 1412
1413 return do_try_to_free_pages(zonelist, gfp_mask, &sc); 1413 return do_try_to_free_pages(zonelist, &sc);
1414} 1414}
1415 1415
1416#ifdef CONFIG_CGROUP_MEM_RES_CTLR 1416#ifdef CONFIG_CGROUP_MEM_RES_CTLR
@@ -1419,7 +1419,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1419 gfp_t gfp_mask) 1419 gfp_t gfp_mask)
1420{ 1420{
1421 struct scan_control sc = { 1421 struct scan_control sc = {
1422 .gfp_mask = gfp_mask,
1423 .may_writepage = !laptop_mode, 1422 .may_writepage = !laptop_mode,
1424 .may_swap = 1, 1423 .may_swap = 1,
1425 .swap_cluster_max = SWAP_CLUSTER_MAX, 1424 .swap_cluster_max = SWAP_CLUSTER_MAX,
@@ -1429,12 +1428,11 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1429 .isolate_pages = mem_cgroup_isolate_pages, 1428 .isolate_pages = mem_cgroup_isolate_pages,
1430 }; 1429 };
1431 struct zonelist *zonelist; 1430 struct zonelist *zonelist;
1432 int target_zone = gfp_zone(GFP_HIGHUSER_MOVABLE);
1433 1431
1434 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[target_zone]; 1432 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1435 if (do_try_to_free_pages(zonelist, sc.gfp_mask, &sc)) 1433 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1436 return 1; 1434 zonelist = NODE_DATA(numa_node_id())->node_zonelists;
1437 return 0; 1435 return do_try_to_free_pages(zonelist, &sc);
1438} 1436}
1439#endif 1437#endif
1440 1438