diff options
author | Mel Gorman <mel@csn.ul.ie> | 2009-06-16 18:31:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:47:32 -0400 |
commit | 6484eb3e2a81807722c5f28efef94d8338b7b996 (patch) | |
tree | 10ce36f412c2ff0c7eb399af1a189f8e354f56db /mm/hugetlb.c | |
parent | b3c466ce512923298ae8c0121d3e9f397a3f1210 (diff) |
page allocator: do not check NUMA node ID when the caller knows the node is valid
Callers of alloc_pages_node() can optionally specify -1 as a node to mean
"allocate from the current node". However, a number of the callers in
fast paths know for a fact their node is valid. To avoid a comparison and
branch, this patch adds alloc_pages_exact_node() that only checks the nid
with VM_BUG_ON(). Callers that know their node is valid are then
converted.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi>
Acked-by: Paul Mundt <lethal@linux-sh.org> [for the SLOB NUMA bits]
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e83ad2c9228c..2f8241f300f5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -630,7 +630,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) | |||
630 | if (h->order >= MAX_ORDER) | 630 | if (h->order >= MAX_ORDER) |
631 | return NULL; | 631 | return NULL; |
632 | 632 | ||
633 | page = alloc_pages_node(nid, | 633 | page = alloc_pages_exact_node(nid, |
634 | htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| | 634 | htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| |
635 | __GFP_REPEAT|__GFP_NOWARN, | 635 | __GFP_REPEAT|__GFP_NOWARN, |
636 | huge_page_order(h)); | 636 | huge_page_order(h)); |
@@ -649,7 +649,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) | |||
649 | * Use a helper variable to find the next node and then | 649 | * Use a helper variable to find the next node and then |
650 | * copy it back to hugetlb_next_nid afterwards: | 650 | * copy it back to hugetlb_next_nid afterwards: |
651 | * otherwise there's a window in which a racer might | 651 | * otherwise there's a window in which a racer might |
652 | * pass invalid nid MAX_NUMNODES to alloc_pages_node. | 652 | * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node. |
653 | * But we don't need to use a spin_lock here: it really | 653 | * But we don't need to use a spin_lock here: it really |
654 | * doesn't matter if occasionally a racer chooses the | 654 | * doesn't matter if occasionally a racer chooses the |
655 | * same nid as we do. Move nid forward in the mask even | 655 | * same nid as we do. Move nid forward in the mask even |