diff options
author | Mel Gorman <mel@csn.ul.ie> | 2008-04-28 05:12:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:18 -0400 |
commit | 54a6eb5c4765aa573a030ceeba2c14e3d2ea5706 (patch) | |
tree | 547176a090beb787722a153cf2b8b942dc0e68db /mm/slub.c | |
parent | 18ea7e710d2452fa726814a406779188028cf1bf (diff) |
mm: use two zonelist that are filtered by GFP mask
Currently a node has two sets of zonelists, one for each zone type in the
system and a second set for GFP_THISNODE allocations. Based on the zones
allowed by a gfp mask, one of these zonelists is selected. All of these
zonelists consume memory and occupy cache lines.
This patch replaces the multiple zonelists per-node with two zonelists. The
first contains all populated zones in the system, ordered by distance, for
fallback allocations when the target/preferred node has no free pages. The
second contains all populated zones in the node suitable for GFP_THISNODE
allocations.
An iterator macro is introduced called for_each_zone_zonelist() that interates
through each zone allowed by the GFP flags in the selected zonelist.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 8 |
1 files changed, 5 insertions, 3 deletions
@@ -1285,6 +1285,8 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1285 | #ifdef CONFIG_NUMA | 1285 | #ifdef CONFIG_NUMA |
1286 | struct zonelist *zonelist; | 1286 | struct zonelist *zonelist; |
1287 | struct zone **z; | 1287 | struct zone **z; |
1288 | struct zone *zone; | ||
1289 | enum zone_type high_zoneidx = gfp_zone(flags); | ||
1288 | struct page *page; | 1290 | struct page *page; |
1289 | 1291 | ||
1290 | /* | 1292 | /* |
@@ -1310,12 +1312,12 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1310 | return NULL; | 1312 | return NULL; |
1311 | 1313 | ||
1312 | zonelist = node_zonelist(slab_node(current->mempolicy), flags); | 1314 | zonelist = node_zonelist(slab_node(current->mempolicy), flags); |
1313 | for (z = zonelist->zones; *z; z++) { | 1315 | for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { |
1314 | struct kmem_cache_node *n; | 1316 | struct kmem_cache_node *n; |
1315 | 1317 | ||
1316 | n = get_node(s, zone_to_nid(*z)); | 1318 | n = get_node(s, zone_to_nid(zone)); |
1317 | 1319 | ||
1318 | if (n && cpuset_zone_allowed_hardwall(*z, flags) && | 1320 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && |
1319 | n->nr_partial > MIN_PARTIAL) { | 1321 | n->nr_partial > MIN_PARTIAL) { |
1320 | page = get_partial_node(n); | 1322 | page = get_partial_node(n); |
1321 | if (page) | 1323 | if (page) |