diff options
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 117 |
1 files changed, 78 insertions, 39 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index 5bbb87f59aee..a1e96a0fda00 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -400,6 +400,77 @@ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) | |||
400 | return memblock_add_region(&memblock.memory, base, size); | 400 | return memblock_add_region(&memblock.memory, base, size); |
401 | } | 401 | } |
402 | 402 | ||
403 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | ||
404 | /** | ||
405 | * memblock_isolate_range - isolate given range into disjoint memblocks | ||
406 | * @type: memblock type to isolate range for | ||
407 | * @base: base of range to isolate | ||
408 | * @size: size of range to isolate | ||
409 | * @start_rgn: out parameter for the start of isolated region | ||
410 | * @end_rgn: out parameter for the end of isolated region | ||
411 | * | ||
412 | * Walk @type and ensure that regions don't cross the boundaries defined by | ||
413 | * [@base,@base+@size). Crossing regions are split at the boundaries, | ||
414 | * which may create at most two more regions. The index of the first | ||
415 | * region inside the range is returned in *@start_rgn and end in *@end_rgn. | ||
416 | * | ||
417 | * RETURNS: | ||
418 | * 0 on success, -errno on failure. | ||
419 | */ | ||
420 | static int __init_memblock memblock_isolate_range(struct memblock_type *type, | ||
421 | phys_addr_t base, phys_addr_t size, | ||
422 | int *start_rgn, int *end_rgn) | ||
423 | { | ||
424 | phys_addr_t end = base + size; | ||
425 | int i; | ||
426 | |||
427 | *start_rgn = *end_rgn = 0; | ||
428 | |||
429 | /* we'll create at most two more regions */ | ||
430 | while (type->cnt + 2 > type->max) | ||
431 | if (memblock_double_array(type) < 0) | ||
432 | return -ENOMEM; | ||
433 | |||
434 | for (i = 0; i < type->cnt; i++) { | ||
435 | struct memblock_region *rgn = &type->regions[i]; | ||
436 | phys_addr_t rbase = rgn->base; | ||
437 | phys_addr_t rend = rbase + rgn->size; | ||
438 | |||
439 | if (rbase >= end) | ||
440 | break; | ||
441 | if (rend <= base) | ||
442 | continue; | ||
443 | |||
444 | if (rbase < base) { | ||
445 | /* | ||
446 | * @rgn intersects from below. Split and continue | ||
447 | * to process the next region - the new top half. | ||
448 | */ | ||
449 | rgn->base = base; | ||
450 | rgn->size = rend - rgn->base; | ||
451 | memblock_insert_region(type, i, rbase, base - rbase, | ||
452 | rgn->nid); | ||
453 | } else if (rend > end) { | ||
454 | /* | ||
455 | * @rgn intersects from above. Split and redo the | ||
456 | * current region - the new bottom half. | ||
457 | */ | ||
458 | rgn->base = end; | ||
459 | rgn->size = rend - rgn->base; | ||
460 | memblock_insert_region(type, i--, rbase, end - rbase, | ||
461 | rgn->nid); | ||
462 | } else { | ||
463 | /* @rgn is fully contained, record it */ | ||
464 | if (!*end_rgn) | ||
465 | *start_rgn = i; | ||
466 | *end_rgn = i + 1; | ||
467 | } | ||
468 | } | ||
469 | |||
470 | return 0; | ||
471 | } | ||
472 | #endif | ||
473 | |||
403 | static int __init_memblock __memblock_remove(struct memblock_type *type, | 474 | static int __init_memblock __memblock_remove(struct memblock_type *type, |
404 | phys_addr_t base, phys_addr_t size) | 475 | phys_addr_t base, phys_addr_t size) |
405 | { | 476 | { |
@@ -603,47 +674,15 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, | |||
603 | int nid) | 674 | int nid) |
604 | { | 675 | { |
605 | struct memblock_type *type = &memblock.memory; | 676 | struct memblock_type *type = &memblock.memory; |
606 | phys_addr_t end = base + size; | 677 | int start_rgn, end_rgn; |
607 | int i; | 678 | int i, ret; |
608 | 679 | ||
609 | /* we'll create at most two more regions */ | 680 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
610 | while (type->cnt + 2 > type->max) | 681 | if (ret) |
611 | if (memblock_double_array(type) < 0) | 682 | return ret; |
612 | return -ENOMEM; | ||
613 | 683 | ||
614 | for (i = 0; i < type->cnt; i++) { | 684 | for (i = start_rgn; i < end_rgn; i++) |
615 | struct memblock_region *rgn = &type->regions[i]; | 685 | type->regions[i].nid = nid; |
616 | phys_addr_t rbase = rgn->base; | ||
617 | phys_addr_t rend = rbase + rgn->size; | ||
618 | |||
619 | if (rbase >= end) | ||
620 | break; | ||
621 | if (rend <= base) | ||
622 | continue; | ||
623 | |||
624 | if (rbase < base) { | ||
625 | /* | ||
626 | * @rgn intersects from below. Split and continue | ||
627 | * to process the next region - the new top half. | ||
628 | */ | ||
629 | rgn->base = base; | ||
630 | rgn->size = rend - rgn->base; | ||
631 | memblock_insert_region(type, i, rbase, base - rbase, | ||
632 | rgn->nid); | ||
633 | } else if (rend > end) { | ||
634 | /* | ||
635 | * @rgn intersects from above. Split and redo the | ||
636 | * current region - the new bottom half. | ||
637 | */ | ||
638 | rgn->base = end; | ||
639 | rgn->size = rend - rgn->base; | ||
640 | memblock_insert_region(type, i--, rbase, end - rbase, | ||
641 | rgn->nid); | ||
642 | } else { | ||
643 | /* @rgn is fully contained, set ->nid */ | ||
644 | rgn->nid = nid; | ||
645 | } | ||
646 | } | ||
647 | 686 | ||
648 | memblock_merge_regions(type); | 687 | memblock_merge_regions(type); |
649 | return 0; | 688 | return 0; |