diff options
author | Mel Gorman <mel@csn.ul.ie> | 2008-04-28 05:12:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:18 -0400 |
commit | 0e88460da6ab7bb6a7ef83675412ed5b6315d741 (patch) | |
tree | 1feb4de2362e4998a0deeab66af1efb9c7b8bb34 /mm | |
parent | dac1d27bc8d5ca636d3014ecfdf94407031d1970 (diff) |
mm: introduce node_zonelist() for accessing the zonelist for a GFP mask
Introduce a node_zonelist() helper function. It is used to lookup the
appropriate zonelist given a node and a GFP mask. The patch on its own is a
cleanup but it helps clarify parts of the two-zonelist-per-node patchset. If
necessary, it can be merged with the next patch in this set without problems.
Reviewed-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mempolicy.c | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 3 | ||||
-rw-r--r-- | mm/slub.c | 3 |
4 files changed, 6 insertions, 9 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3c3601121509..5d20bf44062f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1183,7 +1183,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) | |||
1183 | nd = 0; | 1183 | nd = 0; |
1184 | BUG(); | 1184 | BUG(); |
1185 | } | 1185 | } |
1186 | return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp); | 1186 | return node_zonelist(nd, gfp); |
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | /* Do dynamic interleaving for a process */ | 1189 | /* Do dynamic interleaving for a process */ |
@@ -1299,7 +1299,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, | |||
1299 | if (unlikely(pol != &default_policy && | 1299 | if (unlikely(pol != &default_policy && |
1300 | pol != current->mempolicy)) | 1300 | pol != current->mempolicy)) |
1301 | __mpol_free(pol); /* finished with pol */ | 1301 | __mpol_free(pol); /* finished with pol */ |
1302 | return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags); | 1302 | return node_zonelist(nid, gfp_flags); |
1303 | } | 1303 | } |
1304 | 1304 | ||
1305 | zl = zonelist_policy(GFP_HIGHUSER, pol); | 1305 | zl = zonelist_policy(GFP_HIGHUSER, pol); |
@@ -1321,7 +1321,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, | |||
1321 | struct zonelist *zl; | 1321 | struct zonelist *zl; |
1322 | struct page *page; | 1322 | struct page *page; |
1323 | 1323 | ||
1324 | zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp); | 1324 | zl = node_zonelist(nid, gfp); |
1325 | page = __alloc_pages(gfp, order, zl); | 1325 | page = __alloc_pages(gfp, order, zl); |
1326 | if (page && page_zone(page) == zl->zones[0]) | 1326 | if (page && page_zone(page) == zl->zones[0]) |
1327 | inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); | 1327 | inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1bda771a072a..63ff71830ea4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1713,10 +1713,9 @@ EXPORT_SYMBOL(free_pages); | |||
1713 | static unsigned int nr_free_zone_pages(int offset) | 1713 | static unsigned int nr_free_zone_pages(int offset) |
1714 | { | 1714 | { |
1715 | /* Just pick one node, since fallback list is circular */ | 1715 | /* Just pick one node, since fallback list is circular */ |
1716 | pg_data_t *pgdat = NODE_DATA(numa_node_id()); | ||
1717 | unsigned int sum = 0; | 1716 | unsigned int sum = 0; |
1718 | 1717 | ||
1719 | struct zonelist *zonelist = pgdat->node_zonelists + offset; | 1718 | struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); |
1720 | struct zone **zonep = zonelist->zones; | 1719 | struct zone **zonep = zonelist->zones; |
1721 | struct zone *zone; | 1720 | struct zone *zone; |
1722 | 1721 | ||
@@ -3249,8 +3249,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) | |||
3249 | if (flags & __GFP_THISNODE) | 3249 | if (flags & __GFP_THISNODE) |
3250 | return NULL; | 3250 | return NULL; |
3251 | 3251 | ||
3252 | zonelist = &NODE_DATA(slab_node(current->mempolicy)) | 3252 | zonelist = node_zonelist(slab_node(current->mempolicy), flags); |
3253 | ->node_zonelists[gfp_zone(flags)]; | ||
3254 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); | 3253 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); |
3255 | 3254 | ||
3256 | retry: | 3255 | retry: |
@@ -1309,8 +1309,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1309 | get_cycles() % 1024 > s->remote_node_defrag_ratio) | 1309 | get_cycles() % 1024 > s->remote_node_defrag_ratio) |
1310 | return NULL; | 1310 | return NULL; |
1311 | 1311 | ||
1312 | zonelist = &NODE_DATA( | 1312 | zonelist = node_zonelist(slab_node(current->mempolicy), flags); |
1313 | slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)]; | ||
1314 | for (z = zonelist->zones; *z; z++) { | 1313 | for (z = zonelist->zones; *z; z++) { |
1315 | struct kmem_cache_node *n; | 1314 | struct kmem_cache_node *n; |
1316 | 1315 | ||