diff options
author | Mel Gorman <mel@csn.ul.ie> | 2008-04-28 05:12:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:19 -0400 |
commit | 19770b32609b6bf97a3dece2529089494cbfc549 (patch) | |
tree | 3b5922d1b20aabdf929bde9309f323841717747a /kernel/cpuset.c | |
parent | dd1a239f6f2d4d3eedd318583ec319aa145b324c (diff) |
mm: filter based on a nodemask as well as a gfp_mask
The MPOL_BIND policy creates a zonelist that is used for allocations
controlled by that mempolicy. As the per-node zonelist is already being
filtered based on a zone id, this patch adds a version of __alloc_pages() that
takes a nodemask for further filtering. This eliminates the need for
MPOL_BIND to create a custom zonelist.
A positive benefit of this is that allocations using MPOL_BIND now use the
local node's distance-ordered zonelist instead of a custom node-id-ordered
zonelist. I.e., pages will be allocated from the closest allowed node with
available memory.
[Lee.Schermerhorn@hp.com: Mempolicy: update stale documentation and comments]
[Lee.Schermerhorn@hp.com: Mempolicy: make dequeue_huge_page_vma() obey MPOL_BIND nodemask]
[Lee.Schermerhorn@hp.com: Mempolicy: make dequeue_huge_page_vma() obey MPOL_BIND nodemask rework]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 18 |
1 files changed, 5 insertions, 13 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index a220b13cbfaf..c9923e3c9a3b 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1958,22 +1958,14 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) | |||
1958 | } | 1958 | } |
1959 | 1959 | ||
1960 | /** | 1960 | /** |
1961 | * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed | 1961 | * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed |
1962 | * @zl: the zonelist to be checked | 1962 | * @nodemask: the nodemask to be checked |
1963 | * | 1963 | * |
1964 | * Are any of the nodes on zonelist zl allowed in current->mems_allowed? | 1964 | * Are any of the nodes in the nodemask allowed in current->mems_allowed? |
1965 | */ | 1965 | */ |
1966 | int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) | 1966 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
1967 | { | 1967 | { |
1968 | int i; | 1968 | return nodes_intersects(*nodemask, current->mems_allowed); |
1969 | |||
1970 | for (i = 0; zl->_zonerefs[i].zone; i++) { | ||
1971 | int nid = zonelist_node_idx(&zl->_zonerefs[i]); | ||
1972 | |||
1973 | if (node_isset(nid, current->mems_allowed)) | ||
1974 | return 1; | ||
1975 | } | ||
1976 | return 0; | ||
1977 | } | 1969 | } |
1978 | 1970 | ||
1979 | /* | 1971 | /* |