diff options
author | Mel Gorman <mel@csn.ul.ie> | 2008-04-28 05:12:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:19 -0400 |
commit | 19770b32609b6bf97a3dece2529089494cbfc549 (patch) | |
tree | 3b5922d1b20aabdf929bde9309f323841717747a /include/linux/cpuset.h | |
parent | dd1a239f6f2d4d3eedd318583ec319aa145b324c (diff) |
mm: filter based on a nodemask as well as a gfp_mask
The MPOL_BIND policy creates a zonelist that is used for allocations
controlled by that mempolicy. As the per-node zonelist is already being
filtered based on a zone id, this patch adds a version of __alloc_pages() that
takes a nodemask for further filtering. This eliminates the need for
MPOL_BIND to create a custom zonelist.
A positive benefit of this is that allocations using MPOL_BIND now use the
local node's distance-ordered zonelist instead of a custom node-id-ordered
zonelist. I.e., pages will be allocated from the closest allowed node with
available memory.
[Lee.Schermerhorn@hp.com: Mempolicy: update stale documentation and comments]
[Lee.Schermerhorn@hp.com: Mempolicy: make dequeue_huge_page_vma() obey MPOL_BIND nodemask]
[Lee.Schermerhorn@hp.com: Mempolicy: make dequeue_huge_page_vma() obey MPOL_BIND nodemask rework]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/cpuset.h')
-rw-r--r-- | include/linux/cpuset.h | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 726761e24003..038578362b47 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -26,7 +26,7 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | |||
26 | #define cpuset_current_mems_allowed (current->mems_allowed) | 26 | #define cpuset_current_mems_allowed (current->mems_allowed) |
27 | void cpuset_init_current_mems_allowed(void); | 27 | void cpuset_init_current_mems_allowed(void); |
28 | void cpuset_update_task_memory_state(void); | 28 | void cpuset_update_task_memory_state(void); |
29 | int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl); | 29 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
30 | 30 | ||
31 | extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); | 31 | extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); |
32 | extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); | 32 | extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); |
@@ -103,7 +103,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | |||
103 | static inline void cpuset_init_current_mems_allowed(void) {} | 103 | static inline void cpuset_init_current_mems_allowed(void) {} |
104 | static inline void cpuset_update_task_memory_state(void) {} | 104 | static inline void cpuset_update_task_memory_state(void) {} |
105 | 105 | ||
106 | static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) | 106 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
107 | { | 107 | { |
108 | return 1; | 108 | return 1; |
109 | } | 109 | } |