diff options
author | Tejun Heo <tj@kernel.org> | 2011-07-12 04:46:34 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-07-14 14:45:35 -0400 |
commit | e64980405cc6aa74ef178d8d9aa4018c867ceed1 (patch) | |
tree | a405d3de9c244ed2fb7899bdb66b1e1569aeae97 | |
parent | 34e1845548418e5cecee0568ba721e1f089c092c (diff) |
memblock: Separate out memblock_find_in_range_node()
Node affine memblock allocation logic is currently implemented across
memblock_alloc_nid() and memblock_alloc_nid_region(). This
reorganizes it such that it resembles that of non-NUMA allocation API.
Area finding is collected and moved into new exported function
memblock_find_in_range_node() which is symmetrical to non-NUMA
counterpart - it handles @start/@end and understands ANYWHERE and
ACCESSIBLE. memblock_alloc_nid() now simply calls
memblock_find_in_range_node() and reserves the returned area.
This makes memblock_alloc[_try]_nid() observe ACCESSIBLE limit on node
affine allocations too (again, this doesn't make any difference for
the current sole user - sparc64).
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310460395-30913-8-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r-- | include/linux/memblock.h | 4 | ||||
-rw-r--r-- | mm/memblock.c | 57 |
2 files changed, 36 insertions, 25 deletions
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 329ffb26c1c9..7400d029df48 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -61,6 +61,10 @@ extern long memblock_reserve(phys_addr_t base, phys_addr_t size); | |||
61 | /* The numa aware allocator is only available if | 61 | /* The numa aware allocator is only available if |
62 | * CONFIG_ARCH_POPULATES_NODE_MAP is set | 62 | * CONFIG_ARCH_POPULATES_NODE_MAP is set |
63 | */ | 63 | */ |
64 | extern phys_addr_t memblock_find_in_range_node(phys_addr_t start, | ||
65 | phys_addr_t end, | ||
66 | phys_addr_t size, | ||
67 | phys_addr_t align, int nid); | ||
64 | extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, | 68 | extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, |
65 | int nid); | 69 | int nid); |
66 | extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, | 70 | extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, |
diff --git a/mm/memblock.c b/mm/memblock.c index 447cf64304ba..a8edb422795b 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -521,49 +521,56 @@ static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start, | |||
521 | return start; | 521 | return start; |
522 | } | 522 | } |
523 | 523 | ||
524 | static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, | 524 | phys_addr_t __init memblock_find_in_range_node(phys_addr_t start, |
525 | phys_addr_t end, | ||
525 | phys_addr_t size, | 526 | phys_addr_t size, |
526 | phys_addr_t align, int nid) | 527 | phys_addr_t align, int nid) |
527 | { | 528 | { |
528 | phys_addr_t start, end; | 529 | struct memblock_type *mem = &memblock.memory; |
530 | int i; | ||
529 | 531 | ||
530 | start = mp->base; | 532 | BUG_ON(0 == size); |
531 | end = start + mp->size; | ||
532 | 533 | ||
533 | while (start < end) { | 534 | /* Pump up max_addr */ |
534 | phys_addr_t this_start; | 535 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
535 | int this_nid; | 536 | end = memblock.current_limit; |
536 | 537 | ||
537 | this_start = memblock_nid_range_rev(start, end, &this_nid); | 538 | for (i = mem->cnt - 1; i >= 0; i--) { |
538 | if (this_nid == nid) { | 539 | struct memblock_region *r = &mem->regions[i]; |
539 | phys_addr_t ret = memblock_find_region(this_start, end, size, align); | 540 | phys_addr_t base = max(start, r->base); |
540 | if (ret && | 541 | phys_addr_t top = min(end, r->base + r->size); |
541 | !memblock_add_region(&memblock.reserved, ret, size)) | 542 | |
542 | return ret; | 543 | while (base < top) { |
544 | phys_addr_t tbase, ret; | ||
545 | int tnid; | ||
546 | |||
547 | tbase = memblock_nid_range_rev(base, top, &tnid); | ||
548 | if (nid == MAX_NUMNODES || tnid == nid) { | ||
549 | ret = memblock_find_region(tbase, top, size, align); | ||
550 | if (ret) | ||
551 | return ret; | ||
552 | } | ||
553 | top = tbase; | ||
543 | } | 554 | } |
544 | end = this_start; | ||
545 | } | 555 | } |
556 | |||
546 | return 0; | 557 | return 0; |
547 | } | 558 | } |
548 | 559 | ||
549 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) | 560 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) |
550 | { | 561 | { |
551 | struct memblock_type *mem = &memblock.memory; | 562 | phys_addr_t found; |
552 | int i; | ||
553 | |||
554 | BUG_ON(0 == size); | ||
555 | 563 | ||
556 | /* We align the size to limit fragmentation. Without this, a lot of | 564 | /* |
565 | * We align the size to limit fragmentation. Without this, a lot of | ||
557 | * small allocs quickly eat up the whole reserve array on sparc | 566 | * small allocs quickly eat up the whole reserve array on sparc |
558 | */ | 567 | */ |
559 | size = round_up(size, align); | 568 | size = round_up(size, align); |
560 | 569 | ||
561 | for (i = mem->cnt - 1; i >= 0; i--) { | 570 | found = memblock_find_in_range_node(0, MEMBLOCK_ALLOC_ACCESSIBLE, |
562 | phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], | 571 | size, align, nid); |
563 | size, align, nid); | 572 | if (found && !memblock_add_region(&memblock.reserved, found, size)) |
564 | if (ret) | 573 | return found; |
565 | return ret; | ||
566 | } | ||
567 | 574 | ||
568 | return 0; | 575 | return 0; |
569 | } | 576 | } |