diff options
author | Mel Gorman <mel@csn.ul.ie> | 2008-04-28 05:12:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:19 -0400 |
commit | 19770b32609b6bf97a3dece2529089494cbfc549 (patch) | |
tree | 3b5922d1b20aabdf929bde9309f323841717747a /mm/hugetlb.c | |
parent | dd1a239f6f2d4d3eedd318583ec319aa145b324c (diff) |
mm: filter based on a nodemask as well as a gfp_mask
The MPOL_BIND policy creates a zonelist that is used for allocations
controlled by that mempolicy. As the per-node zonelist is already being
filtered based on a zone id, this patch adds a version of __alloc_pages() that
takes a nodemask for further filtering. This eliminates the need for
MPOL_BIND to create a custom zonelist.
A positive benefit of this is that allocations using MPOL_BIND now use the
local node's distance-ordered zonelist instead of a custom node-id-ordered
zonelist. I.e., pages will be allocated from the closest allowed node with
available memory.
[Lee.Schermerhorn@hp.com: Mempolicy: update stale documentation and comments]
[Lee.Schermerhorn@hp.com: Mempolicy: make dequeue_huge_page_vma() obey MPOL_BIND nodemask]
[Lee.Schermerhorn@hp.com: Mempolicy: make dequeue_huge_page_vma() obey MPOL_BIND nodemask rework]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4bced0d705c..3737d82f522 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -95,12 +95,14 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma, | |||
95 | int nid; | 95 | int nid; |
96 | struct page *page = NULL; | 96 | struct page *page = NULL; |
97 | struct mempolicy *mpol; | 97 | struct mempolicy *mpol; |
98 | nodemask_t *nodemask; | ||
98 | struct zonelist *zonelist = huge_zonelist(vma, address, | 99 | struct zonelist *zonelist = huge_zonelist(vma, address, |
99 | htlb_alloc_mask, &mpol); | 100 | htlb_alloc_mask, &mpol, &nodemask); |
100 | struct zone *zone; | 101 | struct zone *zone; |
101 | struct zoneref *z; | 102 | struct zoneref *z; |
102 | 103 | ||
103 | for_each_zone_zonelist(zone, z, zonelist, MAX_NR_ZONES - 1) { | 104 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
105 | MAX_NR_ZONES - 1, nodemask) { | ||
104 | nid = zone_to_nid(zone); | 106 | nid = zone_to_nid(zone); |
105 | if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && | 107 | if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && |
106 | !list_empty(&hugepage_freelists[nid])) { | 108 | !list_empty(&hugepage_freelists[nid])) { |