aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-06-16 18:31:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:32 -0400
commit6484eb3e2a81807722c5f28efef94d8338b7b996 (patch)
tree10ce36f412c2ff0c7eb399af1a189f8e354f56db /mm
parentb3c466ce512923298ae8c0121d3e9f397a3f1210 (diff)
page allocator: do not check NUMA node ID when the caller knows the node is valid
Callers of alloc_pages_node() can optionally specify -1 as a node to mean "allocate from the current node". However, a number of the callers in fast paths know for a fact their node is valid. To avoid a comparison and branch, this patch adds alloc_pages_exact_node() that only checks the nid with VM_BUG_ON(). Callers that know their node is valid are then converted. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi> Acked-by: Paul Mundt <lethal@linux-sh.org> [for the SLOB NUMA bits] Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slob.c4
6 files changed, 9 insertions, 9 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 6846a902f5cf..22396713feb9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -521,7 +521,7 @@ struct page *__page_cache_alloc(gfp_t gfp)
521{ 521{
522 if (cpuset_do_page_mem_spread()) { 522 if (cpuset_do_page_mem_spread()) {
523 int n = cpuset_mem_spread_node(); 523 int n = cpuset_mem_spread_node();
524 return alloc_pages_node(n, gfp, 0); 524 return alloc_pages_exact_node(n, gfp, 0);
525 } 525 }
526 return alloc_pages(gfp, 0); 526 return alloc_pages(gfp, 0);
527} 527}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e83ad2c9228c..2f8241f300f5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -630,7 +630,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
630 if (h->order >= MAX_ORDER) 630 if (h->order >= MAX_ORDER)
631 return NULL; 631 return NULL;
632 632
633 page = alloc_pages_node(nid, 633 page = alloc_pages_exact_node(nid,
634 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 634 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
635 __GFP_REPEAT|__GFP_NOWARN, 635 __GFP_REPEAT|__GFP_NOWARN,
636 huge_page_order(h)); 636 huge_page_order(h));
@@ -649,7 +649,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
649 * Use a helper variable to find the next node and then 649 * Use a helper variable to find the next node and then
650 * copy it back to hugetlb_next_nid afterwards: 650 * copy it back to hugetlb_next_nid afterwards:
651 * otherwise there's a window in which a racer might 651 * otherwise there's a window in which a racer might
652 * pass invalid nid MAX_NUMNODES to alloc_pages_node. 652 * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
653 * But we don't need to use a spin_lock here: it really 653 * But we don't need to use a spin_lock here: it really
654 * doesn't matter if occasionally a racer chooses the 654 * doesn't matter if occasionally a racer chooses the
655 * same nid as we do. Move nid forward in the mask even 655 * same nid as we do. Move nid forward in the mask even
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 46bdf9ddf2ba..e08e2c4da63a 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -803,7 +803,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
803 803
804static struct page *new_node_page(struct page *page, unsigned long node, int **x) 804static struct page *new_node_page(struct page *page, unsigned long node, int **x)
805{ 805{
806 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0); 806 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
807} 807}
808 808
809/* 809/*
diff --git a/mm/migrate.c b/mm/migrate.c
index 068655d8f883..5a24923e7fd7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -802,7 +802,7 @@ static struct page *new_page_node(struct page *p, unsigned long private,
802 802
803 *result = &pm->status; 803 *result = &pm->status;
804 804
805 return alloc_pages_node(pm->node, 805 return alloc_pages_exact_node(pm->node,
806 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 806 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
807} 807}
808 808
diff --git a/mm/slab.c b/mm/slab.c
index 18e3164de09a..bb3254c95cd2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1707,7 +1707,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1707 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1707 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1708 flags |= __GFP_RECLAIMABLE; 1708 flags |= __GFP_RECLAIMABLE;
1709 1709
1710 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1710 page = alloc_pages_exact_node(nodeid, flags, cachep->gfporder);
1711 if (!page) 1711 if (!page)
1712 return NULL; 1712 return NULL;
1713 1713
@@ -3261,7 +3261,7 @@ retry:
3261 if (local_flags & __GFP_WAIT) 3261 if (local_flags & __GFP_WAIT)
3262 local_irq_enable(); 3262 local_irq_enable();
3263 kmem_flagcheck(cache, flags); 3263 kmem_flagcheck(cache, flags);
3264 obj = kmem_getpages(cache, local_flags, -1); 3264 obj = kmem_getpages(cache, local_flags, numa_node_id());
3265 if (local_flags & __GFP_WAIT) 3265 if (local_flags & __GFP_WAIT)
3266 local_irq_disable(); 3266 local_irq_disable();
3267 if (obj) { 3267 if (obj) {
diff --git a/mm/slob.c b/mm/slob.c
index 12f261499925..64f6db1943bf 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -46,7 +46,7 @@
46 * NUMA support in SLOB is fairly simplistic, pushing most of the real 46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
47 * logic down to the page allocator, and simply doing the node accounting 47 * logic down to the page allocator, and simply doing the node accounting
48 * on the upper levels. In the event that a node id is explicitly 48 * on the upper levels. In the event that a node id is explicitly
49 * provided, alloc_pages_node() with the specified node id is used 49 * provided, alloc_pages_exact_node() with the specified node id is used
50 * instead. The common case (or when the node id isn't explicitly provided) 50 * instead. The common case (or when the node id isn't explicitly provided)
51 * will default to the current node, as per numa_node_id(). 51 * will default to the current node, as per numa_node_id().
52 * 52 *
@@ -244,7 +244,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
244 244
245#ifdef CONFIG_NUMA 245#ifdef CONFIG_NUMA
246 if (node != -1) 246 if (node != -1)
247 page = alloc_pages_node(node, gfp, order); 247 page = alloc_pages_exact_node(node, gfp, order);
248 else 248 else
249#endif 249#endif
250 page = alloc_pages(gfp, order); 250 page = alloc_pages(gfp, order);