aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c398
1 files changed, 357 insertions, 41 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 53e477bb5558..7fe5354e7552 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -21,6 +21,9 @@
21#include <linux/memblock.h> 21#include <linux/memblock.h>
22 22
23#include <asm-generic/sections.h> 23#include <asm-generic/sections.h>
24#include <linux/io.h>
25
26#include "internal.h"
24 27
25static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 28static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
26static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 29static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
@@ -39,6 +42,9 @@ struct memblock memblock __initdata_memblock = {
39}; 42};
40 43
41int memblock_debug __initdata_memblock; 44int memblock_debug __initdata_memblock;
45#ifdef CONFIG_MOVABLE_NODE
46bool movable_node_enabled __initdata_memblock = false;
47#endif
42static int memblock_can_resize __initdata_memblock; 48static int memblock_can_resize __initdata_memblock;
43static int memblock_memory_in_slab __initdata_memblock = 0; 49static int memblock_memory_in_slab __initdata_memblock = 0;
44static int memblock_reserved_in_slab __initdata_memblock = 0; 50static int memblock_reserved_in_slab __initdata_memblock = 0;
@@ -91,7 +97,7 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
91 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 97 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
92 * @size: size of free area to find 98 * @size: size of free area to find
93 * @align: alignment of free area to find 99 * @align: alignment of free area to find
94 * @nid: nid of the free area to find, %MAX_NUMNODES for any node 100 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
95 * 101 *
96 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 102 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
97 * 103 *
@@ -123,7 +129,7 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
123 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 129 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
124 * @size: size of free area to find 130 * @size: size of free area to find
125 * @align: alignment of free area to find 131 * @align: alignment of free area to find
126 * @nid: nid of the free area to find, %MAX_NUMNODES for any node 132 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
127 * 133 *
128 * Utility called from memblock_find_in_range_node(), find free area top-down. 134 * Utility called from memblock_find_in_range_node(), find free area top-down.
129 * 135 *
@@ -154,11 +160,11 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
154 160
155/** 161/**
156 * memblock_find_in_range_node - find free area in given range and node 162 * memblock_find_in_range_node - find free area in given range and node
157 * @start: start of candidate range
158 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
159 * @size: size of free area to find 163 * @size: size of free area to find
160 * @align: alignment of free area to find 164 * @align: alignment of free area to find
161 * @nid: nid of the free area to find, %MAX_NUMNODES for any node 165 * @start: start of candidate range
166 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
167 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
162 * 168 *
163 * Find @size free area aligned to @align in the specified range and node. 169 * Find @size free area aligned to @align in the specified range and node.
164 * 170 *
@@ -173,9 +179,9 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
173 * RETURNS: 179 * RETURNS:
174 * Found address on success, 0 on failure. 180 * Found address on success, 0 on failure.
175 */ 181 */
176phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, 182phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
177 phys_addr_t end, phys_addr_t size, 183 phys_addr_t align, phys_addr_t start,
178 phys_addr_t align, int nid) 184 phys_addr_t end, int nid)
179{ 185{
180 int ret; 186 int ret;
181 phys_addr_t kernel_end; 187 phys_addr_t kernel_end;
@@ -238,8 +244,8 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
238 phys_addr_t end, phys_addr_t size, 244 phys_addr_t end, phys_addr_t size,
239 phys_addr_t align) 245 phys_addr_t align)
240{ 246{
241 return memblock_find_in_range_node(start, end, size, align, 247 return memblock_find_in_range_node(size, align, start, end,
242 MAX_NUMNODES); 248 NUMA_NO_NODE);
243} 249}
244 250
245static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 251static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
@@ -255,10 +261,13 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
255 type->cnt = 1; 261 type->cnt = 1;
256 type->regions[0].base = 0; 262 type->regions[0].base = 0;
257 type->regions[0].size = 0; 263 type->regions[0].size = 0;
264 type->regions[0].flags = 0;
258 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 265 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
259 } 266 }
260} 267}
261 268
269#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
270
262phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( 271phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
263 phys_addr_t *addr) 272 phys_addr_t *addr)
264{ 273{
@@ -271,6 +280,20 @@ phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
271 memblock.reserved.max); 280 memblock.reserved.max);
272} 281}
273 282
283phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
284 phys_addr_t *addr)
285{
286 if (memblock.memory.regions == memblock_memory_init_regions)
287 return 0;
288
289 *addr = __pa(memblock.memory.regions);
290
291 return PAGE_ALIGN(sizeof(struct memblock_region) *
292 memblock.memory.max);
293}
294
295#endif
296
274/** 297/**
275 * memblock_double_array - double the size of the memblock regions array 298 * memblock_double_array - double the size of the memblock regions array
276 * @type: memblock type of the regions array being doubled 299 * @type: memblock type of the regions array being doubled
@@ -405,7 +428,8 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
405 428
406 if (this->base + this->size != next->base || 429 if (this->base + this->size != next->base ||
407 memblock_get_region_node(this) != 430 memblock_get_region_node(this) !=
408 memblock_get_region_node(next)) { 431 memblock_get_region_node(next) ||
432 this->flags != next->flags) {
409 BUG_ON(this->base + this->size > next->base); 433 BUG_ON(this->base + this->size > next->base);
410 i++; 434 i++;
411 continue; 435 continue;
@@ -425,13 +449,15 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
425 * @base: base address of the new region 449 * @base: base address of the new region
426 * @size: size of the new region 450 * @size: size of the new region
427 * @nid: node id of the new region 451 * @nid: node id of the new region
452 * @flags: flags of the new region
428 * 453 *
429 * Insert new memblock region [@base,@base+@size) into @type at @idx. 454 * Insert new memblock region [@base,@base+@size) into @type at @idx.
430 * @type must already have extra room to accomodate the new region. 455 * @type must already have extra room to accomodate the new region.
431 */ 456 */
432static void __init_memblock memblock_insert_region(struct memblock_type *type, 457static void __init_memblock memblock_insert_region(struct memblock_type *type,
433 int idx, phys_addr_t base, 458 int idx, phys_addr_t base,
434 phys_addr_t size, int nid) 459 phys_addr_t size,
460 int nid, unsigned long flags)
435{ 461{
436 struct memblock_region *rgn = &type->regions[idx]; 462 struct memblock_region *rgn = &type->regions[idx];
437 463
@@ -439,6 +465,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type,
439 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 465 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
440 rgn->base = base; 466 rgn->base = base;
441 rgn->size = size; 467 rgn->size = size;
468 rgn->flags = flags;
442 memblock_set_region_node(rgn, nid); 469 memblock_set_region_node(rgn, nid);
443 type->cnt++; 470 type->cnt++;
444 type->total_size += size; 471 type->total_size += size;
@@ -450,6 +477,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type,
450 * @base: base address of the new region 477 * @base: base address of the new region
451 * @size: size of the new region 478 * @size: size of the new region
452 * @nid: nid of the new region 479 * @nid: nid of the new region
480 * @flags: flags of the new region
453 * 481 *
454 * Add new memblock region [@base,@base+@size) into @type. The new region 482 * Add new memblock region [@base,@base+@size) into @type. The new region
455 * is allowed to overlap with existing ones - overlaps don't affect already 483 * is allowed to overlap with existing ones - overlaps don't affect already
@@ -460,7 +488,8 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type,
460 * 0 on success, -errno on failure. 488 * 0 on success, -errno on failure.
461 */ 489 */
462static int __init_memblock memblock_add_region(struct memblock_type *type, 490static int __init_memblock memblock_add_region(struct memblock_type *type,
463 phys_addr_t base, phys_addr_t size, int nid) 491 phys_addr_t base, phys_addr_t size,
492 int nid, unsigned long flags)
464{ 493{
465 bool insert = false; 494 bool insert = false;
466 phys_addr_t obase = base; 495 phys_addr_t obase = base;
@@ -475,6 +504,7 @@ static int __init_memblock memblock_add_region(struct memblock_type *type,
475 WARN_ON(type->cnt != 1 || type->total_size); 504 WARN_ON(type->cnt != 1 || type->total_size);
476 type->regions[0].base = base; 505 type->regions[0].base = base;
477 type->regions[0].size = size; 506 type->regions[0].size = size;
507 type->regions[0].flags = flags;
478 memblock_set_region_node(&type->regions[0], nid); 508 memblock_set_region_node(&type->regions[0], nid);
479 type->total_size = size; 509 type->total_size = size;
480 return 0; 510 return 0;
@@ -505,7 +535,8 @@ repeat:
505 nr_new++; 535 nr_new++;
506 if (insert) 536 if (insert)
507 memblock_insert_region(type, i++, base, 537 memblock_insert_region(type, i++, base,
508 rbase - base, nid); 538 rbase - base, nid,
539 flags);
509 } 540 }
510 /* area below @rend is dealt with, forget about it */ 541 /* area below @rend is dealt with, forget about it */
511 base = min(rend, end); 542 base = min(rend, end);
@@ -515,7 +546,8 @@ repeat:
515 if (base < end) { 546 if (base < end) {
516 nr_new++; 547 nr_new++;
517 if (insert) 548 if (insert)
518 memblock_insert_region(type, i, base, end - base, nid); 549 memblock_insert_region(type, i, base, end - base,
550 nid, flags);
519 } 551 }
520 552
521 /* 553 /*
@@ -537,12 +569,13 @@ repeat:
537int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 569int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
538 int nid) 570 int nid)
539{ 571{
540 return memblock_add_region(&memblock.memory, base, size, nid); 572 return memblock_add_region(&memblock.memory, base, size, nid, 0);
541} 573}
542 574
543int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 575int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
544{ 576{
545 return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES); 577 return memblock_add_region(&memblock.memory, base, size,
578 MAX_NUMNODES, 0);
546} 579}
547 580
548/** 581/**
@@ -597,7 +630,8 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type,
597 rgn->size -= base - rbase; 630 rgn->size -= base - rbase;
598 type->total_size -= base - rbase; 631 type->total_size -= base - rbase;
599 memblock_insert_region(type, i, rbase, base - rbase, 632 memblock_insert_region(type, i, rbase, base - rbase,
600 memblock_get_region_node(rgn)); 633 memblock_get_region_node(rgn),
634 rgn->flags);
601 } else if (rend > end) { 635 } else if (rend > end) {
602 /* 636 /*
603 * @rgn intersects from above. Split and redo the 637 * @rgn intersects from above. Split and redo the
@@ -607,7 +641,8 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type,
607 rgn->size -= end - rbase; 641 rgn->size -= end - rbase;
608 type->total_size -= end - rbase; 642 type->total_size -= end - rbase;
609 memblock_insert_region(type, i--, rbase, end - rbase, 643 memblock_insert_region(type, i--, rbase, end - rbase,
610 memblock_get_region_node(rgn)); 644 memblock_get_region_node(rgn),
645 rgn->flags);
611 } else { 646 } else {
612 /* @rgn is fully contained, record it */ 647 /* @rgn is fully contained, record it */
613 if (!*end_rgn) 648 if (!*end_rgn)
@@ -643,28 +678,89 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
643{ 678{
644 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", 679 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
645 (unsigned long long)base, 680 (unsigned long long)base,
646 (unsigned long long)base + size, 681 (unsigned long long)base + size - 1,
647 (void *)_RET_IP_); 682 (void *)_RET_IP_);
648 683
649 return __memblock_remove(&memblock.reserved, base, size); 684 return __memblock_remove(&memblock.reserved, base, size);
650} 685}
651 686
652int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 687static int __init_memblock memblock_reserve_region(phys_addr_t base,
688 phys_addr_t size,
689 int nid,
690 unsigned long flags)
653{ 691{
654 struct memblock_type *_rgn = &memblock.reserved; 692 struct memblock_type *_rgn = &memblock.reserved;
655 693
656 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n", 694 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
657 (unsigned long long)base, 695 (unsigned long long)base,
658 (unsigned long long)base + size, 696 (unsigned long long)base + size - 1,
659 (void *)_RET_IP_); 697 flags, (void *)_RET_IP_);
698
699 return memblock_add_region(_rgn, base, size, nid, flags);
700}
701
702int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
703{
704 return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
705}
706
707/**
708 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
709 * @base: the base phys addr of the region
710 * @size: the size of the region
711 *
712 * This function isolates region [@base, @base + @size), and mark it with flag
713 * MEMBLOCK_HOTPLUG.
714 *
715 * Return 0 on succees, -errno on failure.
716 */
717int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
718{
719 struct memblock_type *type = &memblock.memory;
720 int i, ret, start_rgn, end_rgn;
721
722 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
723 if (ret)
724 return ret;
725
726 for (i = start_rgn; i < end_rgn; i++)
727 memblock_set_region_flags(&type->regions[i], MEMBLOCK_HOTPLUG);
728
729 memblock_merge_regions(type);
730 return 0;
731}
732
733/**
734 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
735 * @base: the base phys addr of the region
736 * @size: the size of the region
737 *
738 * This function isolates region [@base, @base + @size), and clear flag
739 * MEMBLOCK_HOTPLUG for the isolated regions.
740 *
741 * Return 0 on succees, -errno on failure.
742 */
743int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
744{
745 struct memblock_type *type = &memblock.memory;
746 int i, ret, start_rgn, end_rgn;
747
748 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
749 if (ret)
750 return ret;
751
752 for (i = start_rgn; i < end_rgn; i++)
753 memblock_clear_region_flags(&type->regions[i],
754 MEMBLOCK_HOTPLUG);
660 755
661 return memblock_add_region(_rgn, base, size, MAX_NUMNODES); 756 memblock_merge_regions(type);
757 return 0;
662} 758}
663 759
664/** 760/**
665 * __next_free_mem_range - next function for for_each_free_mem_range() 761 * __next_free_mem_range - next function for for_each_free_mem_range()
666 * @idx: pointer to u64 loop variable 762 * @idx: pointer to u64 loop variable
667 * @nid: node selector, %MAX_NUMNODES for all nodes 763 * @nid: node selector, %NUMA_NO_NODE for all nodes
668 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 764 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
669 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 765 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
670 * @out_nid: ptr to int for nid of the range, can be %NULL 766 * @out_nid: ptr to int for nid of the range, can be %NULL
@@ -693,13 +789,16 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
693 int mi = *idx & 0xffffffff; 789 int mi = *idx & 0xffffffff;
694 int ri = *idx >> 32; 790 int ri = *idx >> 32;
695 791
792 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
793 nid = NUMA_NO_NODE;
794
696 for ( ; mi < mem->cnt; mi++) { 795 for ( ; mi < mem->cnt; mi++) {
697 struct memblock_region *m = &mem->regions[mi]; 796 struct memblock_region *m = &mem->regions[mi];
698 phys_addr_t m_start = m->base; 797 phys_addr_t m_start = m->base;
699 phys_addr_t m_end = m->base + m->size; 798 phys_addr_t m_end = m->base + m->size;
700 799
701 /* only memory regions are associated with nodes, check it */ 800 /* only memory regions are associated with nodes, check it */
702 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) 801 if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m))
703 continue; 802 continue;
704 803
705 /* scan areas before each reservation for intersection */ 804 /* scan areas before each reservation for intersection */
@@ -740,12 +839,17 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
740/** 839/**
741 * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() 840 * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
742 * @idx: pointer to u64 loop variable 841 * @idx: pointer to u64 loop variable
743 * @nid: nid: node selector, %MAX_NUMNODES for all nodes 842 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
744 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 843 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
745 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 844 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
746 * @out_nid: ptr to int for nid of the range, can be %NULL 845 * @out_nid: ptr to int for nid of the range, can be %NULL
747 * 846 *
748 * Reverse of __next_free_mem_range(). 847 * Reverse of __next_free_mem_range().
848 *
849 * Linux kernel cannot migrate pages used by itself. Memory hotplug users won't
850 * be able to hot-remove hotpluggable memory used by the kernel. So this
851 * function skip hotpluggable regions if needed when allocating memory for the
852 * kernel.
749 */ 853 */
750void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, 854void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
751 phys_addr_t *out_start, 855 phys_addr_t *out_start,
@@ -756,6 +860,9 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
756 int mi = *idx & 0xffffffff; 860 int mi = *idx & 0xffffffff;
757 int ri = *idx >> 32; 861 int ri = *idx >> 32;
758 862
863 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
864 nid = NUMA_NO_NODE;
865
759 if (*idx == (u64)ULLONG_MAX) { 866 if (*idx == (u64)ULLONG_MAX) {
760 mi = mem->cnt - 1; 867 mi = mem->cnt - 1;
761 ri = rsv->cnt; 868 ri = rsv->cnt;
@@ -767,7 +874,11 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
767 phys_addr_t m_end = m->base + m->size; 874 phys_addr_t m_end = m->base + m->size;
768 875
769 /* only memory regions are associated with nodes, check it */ 876 /* only memory regions are associated with nodes, check it */
770 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) 877 if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m))
878 continue;
879
880 /* skip hotpluggable memory regions if needed */
881 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
771 continue; 882 continue;
772 883
773 /* scan areas before each reservation for intersection */ 884 /* scan areas before each reservation for intersection */
@@ -837,18 +948,18 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
837 * memblock_set_node - set node ID on memblock regions 948 * memblock_set_node - set node ID on memblock regions
838 * @base: base of area to set node ID for 949 * @base: base of area to set node ID for
839 * @size: size of area to set node ID for 950 * @size: size of area to set node ID for
951 * @type: memblock type to set node ID for
840 * @nid: node ID to set 952 * @nid: node ID to set
841 * 953 *
842 * Set the nid of memblock memory regions in [@base,@base+@size) to @nid. 954 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
843 * Regions which cross the area boundaries are split as necessary. 955 * Regions which cross the area boundaries are split as necessary.
844 * 956 *
845 * RETURNS: 957 * RETURNS:
846 * 0 on success, -errno on failure. 958 * 0 on success, -errno on failure.
847 */ 959 */
848int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 960int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
849 int nid) 961 struct memblock_type *type, int nid)
850{ 962{
851 struct memblock_type *type = &memblock.memory;
852 int start_rgn, end_rgn; 963 int start_rgn, end_rgn;
853 int i, ret; 964 int i, ret;
854 965
@@ -870,13 +981,10 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
870{ 981{
871 phys_addr_t found; 982 phys_addr_t found;
872 983
873 if (WARN_ON(!align)) 984 if (!align)
874 align = __alignof__(long long); 985 align = SMP_CACHE_BYTES;
875 986
876 /* align @size to avoid excessive fragmentation on reserved array */ 987 found = memblock_find_in_range_node(size, align, 0, max_addr, nid);
877 size = round_up(size, align);
878
879 found = memblock_find_in_range_node(0, max_addr, size, align, nid);
880 if (found && !memblock_reserve(found, size)) 988 if (found && !memblock_reserve(found, size))
881 return found; 989 return found;
882 990
@@ -890,7 +998,7 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n
890 998
891phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 999phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
892{ 1000{
893 return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES); 1001 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE);
894} 1002}
895 1003
896phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1004phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
@@ -920,6 +1028,207 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
920 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1028 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
921} 1029}
922 1030
1031/**
1032 * memblock_virt_alloc_internal - allocate boot memory block
1033 * @size: size of memory block to be allocated in bytes
1034 * @align: alignment of the region and block's size
1035 * @min_addr: the lower bound of the memory region to allocate (phys address)
1036 * @max_addr: the upper bound of the memory region to allocate (phys address)
1037 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1038 *
1039 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1040 * will fall back to memory below @min_addr. Also, allocation may fall back
1041 * to any node in the system if the specified node can not
1042 * hold the requested memory.
1043 *
1044 * The allocation is performed from memory region limited by
1045 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1046 *
1047 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1048 *
1049 * The phys address of allocated boot memory block is converted to virtual and
1050 * allocated memory is reset to 0.
1051 *
1052 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1053 * allocated boot memory block, so that it is never reported as leaks.
1054 *
1055 * RETURNS:
1056 * Virtual address of allocated memory block on success, NULL on failure.
1057 */
1058static void * __init memblock_virt_alloc_internal(
1059 phys_addr_t size, phys_addr_t align,
1060 phys_addr_t min_addr, phys_addr_t max_addr,
1061 int nid)
1062{
1063 phys_addr_t alloc;
1064 void *ptr;
1065
1066 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1067 nid = NUMA_NO_NODE;
1068
1069 /*
1070 * Detect any accidental use of these APIs after slab is ready, as at
1071 * this moment memblock may be deinitialized already and its
1072 * internal data may be destroyed (after execution of free_all_bootmem)
1073 */
1074 if (WARN_ON_ONCE(slab_is_available()))
1075 return kzalloc_node(size, GFP_NOWAIT, nid);
1076
1077 if (!align)
1078 align = SMP_CACHE_BYTES;
1079
1080 if (max_addr > memblock.current_limit)
1081 max_addr = memblock.current_limit;
1082
1083again:
1084 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1085 nid);
1086 if (alloc)
1087 goto done;
1088
1089 if (nid != NUMA_NO_NODE) {
1090 alloc = memblock_find_in_range_node(size, align, min_addr,
1091 max_addr, NUMA_NO_NODE);
1092 if (alloc)
1093 goto done;
1094 }
1095
1096 if (min_addr) {
1097 min_addr = 0;
1098 goto again;
1099 } else {
1100 goto error;
1101 }
1102
1103done:
1104 memblock_reserve(alloc, size);
1105 ptr = phys_to_virt(alloc);
1106 memset(ptr, 0, size);
1107
1108 /*
1109 * The min_count is set to 0 so that bootmem allocated blocks
1110 * are never reported as leaks. This is because many of these blocks
1111 * are only referred via the physical address which is not
1112 * looked up by kmemleak.
1113 */
1114 kmemleak_alloc(ptr, size, 0, 0);
1115
1116 return ptr;
1117
1118error:
1119 return NULL;
1120}
1121
1122/**
1123 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
1124 * @size: size of memory block to be allocated in bytes
1125 * @align: alignment of the region and block's size
1126 * @min_addr: the lower bound of the memory region from where the allocation
1127 * is preferred (phys address)
1128 * @max_addr: the upper bound of the memory region from where the allocation
1129 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1130 * allocate only from memory limited by memblock.current_limit value
1131 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1132 *
1133 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
1134 * additional debug information (including caller info), if enabled.
1135 *
1136 * RETURNS:
1137 * Virtual address of allocated memory block on success, NULL on failure.
1138 */
1139void * __init memblock_virt_alloc_try_nid_nopanic(
1140 phys_addr_t size, phys_addr_t align,
1141 phys_addr_t min_addr, phys_addr_t max_addr,
1142 int nid)
1143{
1144 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1145 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1146 (u64)max_addr, (void *)_RET_IP_);
1147 return memblock_virt_alloc_internal(size, align, min_addr,
1148 max_addr, nid);
1149}
1150
1151/**
1152 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1153 * @size: size of memory block to be allocated in bytes
1154 * @align: alignment of the region and block's size
1155 * @min_addr: the lower bound of the memory region from where the allocation
1156 * is preferred (phys address)
1157 * @max_addr: the upper bound of the memory region from where the allocation
1158 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1159 * allocate only from memory limited by memblock.current_limit value
1160 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1161 *
1162 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
1163 * which provides debug information (including caller info), if enabled,
1164 * and panics if the request can not be satisfied.
1165 *
1166 * RETURNS:
1167 * Virtual address of allocated memory block on success, NULL on failure.
1168 */
1169void * __init memblock_virt_alloc_try_nid(
1170 phys_addr_t size, phys_addr_t align,
1171 phys_addr_t min_addr, phys_addr_t max_addr,
1172 int nid)
1173{
1174 void *ptr;
1175
1176 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1177 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1178 (u64)max_addr, (void *)_RET_IP_);
1179 ptr = memblock_virt_alloc_internal(size, align,
1180 min_addr, max_addr, nid);
1181 if (ptr)
1182 return ptr;
1183
1184 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1185 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1186 (u64)max_addr);
1187 return NULL;
1188}
1189
1190/**
1191 * __memblock_free_early - free boot memory block
1192 * @base: phys starting address of the boot memory block
1193 * @size: size of the boot memory block in bytes
1194 *
1195 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1196 * The freeing memory will not be released to the buddy allocator.
1197 */
1198void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1199{
1200 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1201 __func__, (u64)base, (u64)base + size - 1,
1202 (void *)_RET_IP_);
1203 kmemleak_free_part(__va(base), size);
1204 __memblock_remove(&memblock.reserved, base, size);
1205}
1206
1207/*
1208 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1209 * @addr: phys starting address of the boot memory block
1210 * @size: size of the boot memory block in bytes
1211 *
1212 * This is only useful when the bootmem allocator has already been torn
1213 * down, but we are still initializing the system. Pages are released directly
1214 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1215 */
1216void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1217{
1218 u64 cursor, end;
1219
1220 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1221 __func__, (u64)base, (u64)base + size - 1,
1222 (void *)_RET_IP_);
1223 kmemleak_free_part(__va(base), size);
1224 cursor = PFN_UP(base);
1225 end = PFN_DOWN(base + size);
1226
1227 for (; cursor < end; cursor++) {
1228 __free_pages_bootmem(pfn_to_page(cursor), 0);
1229 totalram_pages++;
1230 }
1231}
923 1232
924/* 1233/*
925 * Remaining API functions 1234 * Remaining API functions
@@ -1098,9 +1407,15 @@ void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1098 memblock.current_limit = limit; 1407 memblock.current_limit = limit;
1099} 1408}
1100 1409
1410phys_addr_t __init_memblock memblock_get_current_limit(void)
1411{
1412 return memblock.current_limit;
1413}
1414
1101static void __init_memblock memblock_dump(struct memblock_type *type, char *name) 1415static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
1102{ 1416{
1103 unsigned long long base, size; 1417 unsigned long long base, size;
1418 unsigned long flags;
1104 int i; 1419 int i;
1105 1420
1106 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); 1421 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
@@ -1111,13 +1426,14 @@ static void __init_memblock memblock_dump(struct memblock_type *type, char *name
1111 1426
1112 base = rgn->base; 1427 base = rgn->base;
1113 size = rgn->size; 1428 size = rgn->size;
1429 flags = rgn->flags;
1114#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1430#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1115 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1431 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1116 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1432 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1117 memblock_get_region_node(rgn)); 1433 memblock_get_region_node(rgn));
1118#endif 1434#endif
1119 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n", 1435 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
1120 name, i, base, base + size - 1, size, nid_buf); 1436 name, i, base, base + size - 1, size, nid_buf, flags);
1121 } 1437 }
1122} 1438}
1123 1439