diff options
author | Tejun Heo <tj@kernel.org> | 2011-07-12 04:46:33 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-07-14 14:45:34 -0400 |
commit | 34e1845548418e5cecee0568ba721e1f089c092c (patch) | |
tree | 85e9cafe7a9184fc0f5efd77964f1df04239c89f /mm/memblock.c | |
parent | f9b18db3b1cedc75e5d002a4d7097891c3399736 (diff) |
memblock: Make memblock_alloc_[try_]nid() top-down
NUMA aware memblock alloc functions - memblock_alloc_[try_]nid() -
weren't properly top-down because memblock_nid_range() scanned
forward. This patch reverses memblock_nid_range(), renames it to
memblock_nid_range_rev() and updates related functions to implement
proper top-down allocation.
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310460395-30913-7-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 29 |
1 files changed, 11 insertions, 18 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index 22cd999b0d4e..447cf64304ba 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -499,27 +499,26 @@ phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) | |||
499 | 499 | ||
500 | 500 | ||
501 | /* | 501 | /* |
502 | * Additional node-local allocators. Search for node memory is bottom up | 502 | * Additional node-local top-down allocators. |
503 | * and walks memblock regions within that node bottom-up as well, but allocation | ||
504 | * within an memblock region is top-down. XXX I plan to fix that at some stage | ||
505 | * | 503 | * |
506 | * WARNING: Only available after early_node_map[] has been populated, | 504 | * WARNING: Only available after early_node_map[] has been populated, |
507 | * on some architectures, that is after all the calls to add_active_range() | 505 | * on some architectures, that is after all the calls to add_active_range() |
508 | * have been done to populate it. | 506 | * have been done to populate it. |
509 | */ | 507 | */ |
510 | 508 | ||
511 | static phys_addr_t __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) | 509 | static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start, |
510 | phys_addr_t end, int *nid) | ||
512 | { | 511 | { |
513 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 512 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
514 | unsigned long start_pfn, end_pfn; | 513 | unsigned long start_pfn, end_pfn; |
515 | int i; | 514 | int i; |
516 | 515 | ||
517 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid) | 516 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid) |
518 | if (start >= PFN_PHYS(start_pfn) && start < PFN_PHYS(end_pfn)) | 517 | if (end > PFN_PHYS(start_pfn) && end <= PFN_PHYS(end_pfn)) |
519 | return min(end, PFN_PHYS(end_pfn)); | 518 | return max(start, PFN_PHYS(start_pfn)); |
520 | #endif | 519 | #endif |
521 | *nid = 0; | 520 | *nid = 0; |
522 | return end; | 521 | return start; |
523 | } | 522 | } |
524 | 523 | ||
525 | static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, | 524 | static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, |
@@ -531,21 +530,19 @@ static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, | |||
531 | start = mp->base; | 530 | start = mp->base; |
532 | end = start + mp->size; | 531 | end = start + mp->size; |
533 | 532 | ||
534 | start = round_up(start, align); | ||
535 | while (start < end) { | 533 | while (start < end) { |
536 | phys_addr_t this_end; | 534 | phys_addr_t this_start; |
537 | int this_nid; | 535 | int this_nid; |
538 | 536 | ||
539 | this_end = memblock_nid_range(start, end, &this_nid); | 537 | this_start = memblock_nid_range_rev(start, end, &this_nid); |
540 | if (this_nid == nid) { | 538 | if (this_nid == nid) { |
541 | phys_addr_t ret = memblock_find_region(start, this_end, size, align); | 539 | phys_addr_t ret = memblock_find_region(this_start, end, size, align); |
542 | if (ret && | 540 | if (ret && |
543 | !memblock_add_region(&memblock.reserved, ret, size)) | 541 | !memblock_add_region(&memblock.reserved, ret, size)) |
544 | return ret; | 542 | return ret; |
545 | } | 543 | } |
546 | start = this_end; | 544 | end = this_start; |
547 | } | 545 | } |
548 | |||
549 | return 0; | 546 | return 0; |
550 | } | 547 | } |
551 | 548 | ||
@@ -561,11 +558,7 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n | |||
561 | */ | 558 | */ |
562 | size = round_up(size, align); | 559 | size = round_up(size, align); |
563 | 560 | ||
564 | /* We do a bottom-up search for a region with the right | 561 | for (i = mem->cnt - 1; i >= 0; i--) { |
565 | * nid since that's easier considering how memblock_nid_range() | ||
566 | * works | ||
567 | */ | ||
568 | for (i = 0; i < mem->cnt; i++) { | ||
569 | phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], | 562 | phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], |
570 | size, align, nid); | 563 | size, align, nid); |
571 | if (ret) | 564 | if (ret) |