diff options
author | Mel Gorman <mel@csn.ul.ie> | 2008-04-28 05:12:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:19 -0400 |
commit | 19770b32609b6bf97a3dece2529089494cbfc549 (patch) | |
tree | 3b5922d1b20aabdf929bde9309f323841717747a /fs | |
parent | dd1a239f6f2d4d3eedd318583ec319aa145b324c (diff) |
mm: filter based on a nodemask as well as a gfp_mask
The MPOL_BIND policy creates a zonelist that is used for allocations
controlled by that mempolicy. As the per-node zonelist is already being
filtered based on a zone id, this patch adds a version of __alloc_pages() that
takes a nodemask for further filtering. This eliminates the need for
MPOL_BIND to create a custom zonelist.
A positive benefit of this is that allocations using MPOL_BIND now use the
local node's distance-ordered zonelist instead of a custom node-id-ordered
zonelist. I.e., pages will be allocated from the closest allowed node with
available memory.
[Lee.Schermerhorn@hp.com: Mempolicy: update stale documentation and comments]
[Lee.Schermerhorn@hp.com: Mempolicy: make dequeue_huge_page_vma() obey MPOL_BIND nodemask]
[Lee.Schermerhorn@hp.com: Mempolicy: make dequeue_huge_page_vma() obey MPOL_BIND nodemask rework]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/buffer.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index ac84cd13075d..7d51e649b19a 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -360,16 +360,17 @@ void invalidate_bdev(struct block_device *bdev) | |||
360 | */ | 360 | */ |
361 | static void free_more_memory(void) | 361 | static void free_more_memory(void) |
362 | { | 362 | { |
363 | struct zoneref *zrefs; | 363 | struct zone *zone; |
364 | int nid; | 364 | int nid; |
365 | 365 | ||
366 | wakeup_pdflush(1024); | 366 | wakeup_pdflush(1024); |
367 | yield(); | 367 | yield(); |
368 | 368 | ||
369 | for_each_online_node(nid) { | 369 | for_each_online_node(nid) { |
370 | zrefs = first_zones_zonelist(node_zonelist(nid, GFP_NOFS), | 370 | (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS), |
371 | gfp_zone(GFP_NOFS)); | 371 | gfp_zone(GFP_NOFS), NULL, |
372 | if (zrefs->zone) | 372 | &zone); |
373 | if (zone) | ||
373 | try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, | 374 | try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, |
374 | GFP_NOFS); | 375 | GFP_NOFS); |
375 | } | 376 | } |