aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-06-16 18:31:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:32 -0400
commit6484eb3e2a81807722c5f28efef94d8338b7b996 (patch)
tree10ce36f412c2ff0c7eb399af1a189f8e354f56db /kernel
parentb3c466ce512923298ae8c0121d3e9f397a3f1210 (diff)
page allocator: do not check NUMA node ID when the caller knows the node is valid
Callers of alloc_pages_node() can optionally specify -1 as a node to mean "allocate from the current node". However, a number of the callers in fast paths know for a fact their node is valid. To avoid a comparison and branch, this patch adds alloc_pages_exact_node() that only checks the nid with VM_BUG_ON(). Callers that know their node is valid are then converted. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi> Acked-by: Paul Mundt <lethal@linux-sh.org> [for the SLOB NUMA bits] Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/profile.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/profile.c b/kernel/profile.c
index 28cf26ad2d24..69911b5745eb 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -365,7 +365,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
365 node = cpu_to_node(cpu); 365 node = cpu_to_node(cpu);
366 per_cpu(cpu_profile_flip, cpu) = 0; 366 per_cpu(cpu_profile_flip, cpu) = 0;
367 if (!per_cpu(cpu_profile_hits, cpu)[1]) { 367 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
368 page = alloc_pages_node(node, 368 page = alloc_pages_exact_node(node,
369 GFP_KERNEL | __GFP_ZERO, 369 GFP_KERNEL | __GFP_ZERO,
370 0); 370 0);
371 if (!page) 371 if (!page)
@@ -373,7 +373,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
373 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); 373 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
374 } 374 }
375 if (!per_cpu(cpu_profile_hits, cpu)[0]) { 375 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
376 page = alloc_pages_node(node, 376 page = alloc_pages_exact_node(node,
377 GFP_KERNEL | __GFP_ZERO, 377 GFP_KERNEL | __GFP_ZERO,
378 0); 378 0);
379 if (!page) 379 if (!page)
@@ -564,14 +564,14 @@ static int create_hash_tables(void)
564 int node = cpu_to_node(cpu); 564 int node = cpu_to_node(cpu);
565 struct page *page; 565 struct page *page;
566 566
567 page = alloc_pages_node(node, 567 page = alloc_pages_exact_node(node,
568 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 568 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
569 0); 569 0);
570 if (!page) 570 if (!page)
571 goto out_cleanup; 571 goto out_cleanup;
572 per_cpu(cpu_profile_hits, cpu)[1] 572 per_cpu(cpu_profile_hits, cpu)[1]
573 = (struct profile_hit *)page_address(page); 573 = (struct profile_hit *)page_address(page);
574 page = alloc_pages_node(node, 574 page = alloc_pages_exact_node(node,
575 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 575 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
576 0); 576 0);
577 if (!page) 577 if (!page)