aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-07-12 04:46:31 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-07-14 14:45:32 -0400
commitb2fea988f4f3b38ff4edfc1556a843c91932804c (patch)
tree4ab1227ab0a607ea44ba554468bda547a4d51eb5 /mm/memblock.c
parentc13291a536b835b2ab278ab201f2cb1ce22f2785 (diff)
memblock: Improve generic memblock_nid_range() using for_each_mem_pfn_range()
Given an address range, memblock_nid_range() determines the node the start of the range belongs to and upto where the range stays in the same node. It's implemented by calling get_pfn_range_for_nid(), which determines min and max pfns for a given node, for each node and testing whether start address falls in there. This is not only inefficient but also incorrect when nodes interleave as min-max ranges for nodes overlap. This patch reimplements memblock_nid_range() using for_each_mem_pfn_range(). It's simpler, walks the mem ranges once and can find the exact range the start address falls in. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1310460395-30913-5-git-send-email-tj@kernel.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c20
1 files changed, 3 insertions, 17 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 0f9626f01b5e..97f3486ce4d6 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -511,28 +511,14 @@ phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
511phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) 511phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
512{ 512{
513#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 513#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
514 /*
515 * This code originates from sparc which really wants use to walk by addresses
516 * and returns the nid. This is not very convenient for early_pfn_map[] users
517 * as the map isn't sorted yet, and it really wants to be walked by nid.
518 *
519 * For now, I implement the inefficient method below which walks the early
520 * map multiple times. Eventually we may want to use an ARCH config option
521 * to implement a completely different method for both case.
522 */
523 unsigned long start_pfn, end_pfn; 514 unsigned long start_pfn, end_pfn;
524 int i; 515 int i;
525 516
526 for (i = 0; i < MAX_NUMNODES; i++) { 517 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid)
527 get_pfn_range_for_nid(i, &start_pfn, &end_pfn); 518 if (start >= PFN_PHYS(start_pfn) && start < PFN_PHYS(end_pfn))
528 if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn)) 519 return min(end, PFN_PHYS(end_pfn));
529 continue;
530 *nid = i;
531 return min(end, PFN_PHYS(end_pfn));
532 }
533#endif 520#endif
534 *nid = 0; 521 *nid = 0;
535
536 return end; 522 return end;
537} 523}
538 524