aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmzone.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2008-04-28 05:12:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:19 -0400
commit19770b32609b6bf97a3dece2529089494cbfc549 (patch)
tree3b5922d1b20aabdf929bde9309f323841717747a /mm/mmzone.c
parentdd1a239f6f2d4d3eedd318583ec319aa145b324c (diff)
mm: filter based on a nodemask as well as a gfp_mask
The MPOL_BIND policy creates a zonelist that is used for allocations controlled by that mempolicy. As the per-node zonelist is already being filtered based on a zone id, this patch adds a version of __alloc_pages() that takes a nodemask for further filtering. This eliminates the need for MPOL_BIND to create a custom zonelist. A positive benefit of this is that allocations using MPOL_BIND now use the local node's distance-ordered zonelist instead of a custom node-id-ordered zonelist. I.e., pages will be allocated from the closest allowed node with available memory. [Lee.Schermerhorn@hp.com: Mempolicy: update stale documentation and comments] [Lee.Schermerhorn@hp.com: Mempolicy: make dequeue_huge_page_vma() obey MPOL_BIND nodemask] [Lee.Schermerhorn@hp.com: Mempolicy: make dequeue_huge_page_vma() obey MPOL_BIND nodemask rework] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Hugh Dickins <hugh@veritas.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmzone.c')
-rw-r--r--mm/mmzone.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/mm/mmzone.c b/mm/mmzone.c
index eb5838634f18..486ed595ee6f 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -42,3 +42,33 @@ struct zone *next_zone(struct zone *zone)
42 return zone; 42 return zone;
43} 43}
44 44
45static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
46{
47#ifdef CONFIG_NUMA
48 return node_isset(zonelist_node_idx(zref), *nodes);
49#else
50 return 1;
51#endif /* CONFIG_NUMA */
52}
53
54/* Returns the next zone at or below highest_zoneidx in a zonelist */
55struct zoneref *next_zones_zonelist(struct zoneref *z,
56 enum zone_type highest_zoneidx,
57 nodemask_t *nodes,
58 struct zone **zone)
59{
60 /*
61 * Find the next suitable zone to use for the allocation.
62 * Only filter based on nodemask if it's set
63 */
64 if (likely(nodes == NULL))
65 while (zonelist_zone_idx(z) > highest_zoneidx)
66 z++;
67 else
68 while (zonelist_zone_idx(z) > highest_zoneidx ||
69 (z->zone && !zref_in_nodemask(z, nodes)))
70 z++;
71
72 *zone = zonelist_zone(z++);
73 return z;
74}