summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-12 13:39:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-12 13:39:53 -0400
commita667cb7a94d48a483fb5d6006fe04a440f1a42ce (patch)
treeccb9e596db96d53fcc4ba13a3370ec84912d0f22 /mm
parentcb1d150d809e2409725ba275c5101c4fc4465b8e (diff)
parent586187d7de71b4da7956ba588ae42253b9ff6482 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - a few misc things - the rest of MM - remove flex_arrays, replace with new simple radix-tree implementation * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (38 commits) Drop flex_arrays sctp: convert to genradix proc: commit to genradix generic radix trees selinux: convert to kvmalloc md: convert to kvmalloc openvswitch: convert to kvmalloc of: fix kmemleak crash caused by imbalance in early memory reservation mm: memblock: update comments and kernel-doc memblock: split checks whether a region should be skipped to a helper function memblock: remove memblock_{set,clear}_region_flags memblock: drop memblock_alloc_*_nopanic() variants memblock: memblock_alloc_try_nid: don't panic treewide: add checks for the return value of memblock_alloc*() swiotlb: add checks for the return value of memblock_alloc*() init/main: add checks for the return value of memblock_alloc*() mm/percpu: add checks for the return value of memblock_alloc*() sparc: add checks for the return value of memblock_alloc*() ia64: add checks for the return value of memblock_alloc*() arch: don't memset(0) memory returned by memblock_alloc() ...
Diffstat (limited to 'mm')
-rw-r--r--mm/cma.c10
-rw-r--r--mm/hmm.c2
-rw-r--r--mm/kasan/init.c10
-rw-r--r--mm/memblock.c371
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/page_ext.c2
-rw-r--r--mm/percpu.c84
-rw-r--r--mm/sparse.c27
8 files changed, 254 insertions, 262 deletions
diff --git a/mm/cma.c b/mm/cma.c
index f4f3a8a57d86..bb2d333ffcb3 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -327,16 +327,14 @@ int __init cma_declare_contiguous(phys_addr_t base,
327 * memory in case of failure. 327 * memory in case of failure.
328 */ 328 */
329 if (base < highmem_start && limit > highmem_start) { 329 if (base < highmem_start && limit > highmem_start) {
330 addr = memblock_alloc_range(size, alignment, 330 addr = memblock_phys_alloc_range(size, alignment,
331 highmem_start, limit, 331 highmem_start, limit);
332 MEMBLOCK_NONE);
333 limit = highmem_start; 332 limit = highmem_start;
334 } 333 }
335 334
336 if (!addr) { 335 if (!addr) {
337 addr = memblock_alloc_range(size, alignment, base, 336 addr = memblock_phys_alloc_range(size, alignment, base,
338 limit, 337 limit);
339 MEMBLOCK_NONE);
340 if (!addr) { 338 if (!addr) {
341 ret = -ENOMEM; 339 ret = -ENOMEM;
342 goto err; 340 goto err;
diff --git a/mm/hmm.c b/mm/hmm.c
index a04e4b810610..fe1cd87e49ac 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -990,7 +990,7 @@ static void hmm_devmem_ref_kill(struct percpu_ref *ref)
990 percpu_ref_kill(ref); 990 percpu_ref_kill(ref);
991} 991}
992 992
993static int hmm_devmem_fault(struct vm_area_struct *vma, 993static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
994 unsigned long addr, 994 unsigned long addr,
995 const struct page *page, 995 const struct page *page,
996 unsigned int flags, 996 unsigned int flags,
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index fcaa1ca03175..ce45c491ebcd 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -83,8 +83,14 @@ static inline bool kasan_early_shadow_page_entry(pte_t pte)
83 83
84static __init void *early_alloc(size_t size, int node) 84static __init void *early_alloc(size_t size, int node)
85{ 85{
86 return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), 86 void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
87 MEMBLOCK_ALLOC_ACCESSIBLE, node); 87 MEMBLOCK_ALLOC_ACCESSIBLE, node);
88
89 if (!ptr)
90 panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
91 __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
92
93 return ptr;
88} 94}
89 95
90static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, 96static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
diff --git a/mm/memblock.c b/mm/memblock.c
index 470601115892..e7665cf914b1 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -76,8 +76,19 @@
76 * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node` 76 * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
77 * performs such an assignment directly. 77 * performs such an assignment directly.
78 * 78 *
79 * Once memblock is setup the memory can be allocated using either 79 * Once memblock is setup the memory can be allocated using one of the
80 * memblock or bootmem APIs. 80 * API variants:
81 *
82 * * :c:func:`memblock_phys_alloc*` - these functions return the
83 * **physical** address of the allocated memory
84 * * :c:func:`memblock_alloc*` - these functions return the **virtual**
85 * address of the allocated memory.
86 *
87 * Note, that both API variants use implict assumptions about allowed
88 * memory ranges and the fallback methods. Consult the documentation
89 * of :c:func:`memblock_alloc_internal` and
90 * :c:func:`memblock_alloc_range_nid` functions for more elaboarte
91 * description.
81 * 92 *
82 * As the system boot progresses, the architecture specific 93 * As the system boot progresses, the architecture specific
83 * :c:func:`mem_init` function frees all the memory to the buddy page 94 * :c:func:`mem_init` function frees all the memory to the buddy page
@@ -132,7 +143,7 @@ static int memblock_can_resize __initdata_memblock;
132static int memblock_memory_in_slab __initdata_memblock = 0; 143static int memblock_memory_in_slab __initdata_memblock = 0;
133static int memblock_reserved_in_slab __initdata_memblock = 0; 144static int memblock_reserved_in_slab __initdata_memblock = 0;
134 145
135enum memblock_flags __init_memblock choose_memblock_flags(void) 146static enum memblock_flags __init_memblock choose_memblock_flags(void)
136{ 147{
137 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 148 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
138} 149}
@@ -261,7 +272,7 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
261 * Return: 272 * Return:
262 * Found address on success, 0 on failure. 273 * Found address on success, 0 on failure.
263 */ 274 */
264phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 275static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
265 phys_addr_t align, phys_addr_t start, 276 phys_addr_t align, phys_addr_t start,
266 phys_addr_t end, int nid, 277 phys_addr_t end, int nid,
267 enum memblock_flags flags) 278 enum memblock_flags flags)
@@ -435,17 +446,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
435 else 446 else
436 in_slab = &memblock_reserved_in_slab; 447 in_slab = &memblock_reserved_in_slab;
437 448
438 /* Try to find some space for it. 449 /* Try to find some space for it */
439 *
440 * WARNING: We assume that either slab_is_available() and we use it or
441 * we use MEMBLOCK for allocations. That means that this is unsafe to
442 * use when bootmem is currently active (unless bootmem itself is
443 * implemented on top of MEMBLOCK which isn't the case yet)
444 *
445 * This should however not be an issue for now, as we currently only
446 * call into MEMBLOCK while it's still active, or much later when slab
447 * is active for memory hotplug operations
448 */
449 if (use_slab) { 450 if (use_slab) {
450 new_array = kmalloc(new_size, GFP_KERNEL); 451 new_array = kmalloc(new_size, GFP_KERNEL);
451 addr = new_array ? __pa(new_array) : 0; 452 addr = new_array ? __pa(new_array) : 0;
@@ -858,11 +859,14 @@ static int __init_memblock memblock_setclr_flag(phys_addr_t base,
858 if (ret) 859 if (ret)
859 return ret; 860 return ret;
860 861
861 for (i = start_rgn; i < end_rgn; i++) 862 for (i = start_rgn; i < end_rgn; i++) {
863 struct memblock_region *r = &type->regions[i];
864
862 if (set) 865 if (set)
863 memblock_set_region_flags(&type->regions[i], flag); 866 r->flags |= flag;
864 else 867 else
865 memblock_clear_region_flags(&type->regions[i], flag); 868 r->flags &= ~flag;
869 }
866 870
867 memblock_merge_regions(type); 871 memblock_merge_regions(type);
868 return 0; 872 return 0;
@@ -962,8 +966,31 @@ void __init_memblock __next_reserved_mem_region(u64 *idx,
962 *idx = ULLONG_MAX; 966 *idx = ULLONG_MAX;
963} 967}
964 968
969static bool should_skip_region(struct memblock_region *m, int nid, int flags)
970{
971 int m_nid = memblock_get_region_node(m);
972
973 /* only memory regions are associated with nodes, check it */
974 if (nid != NUMA_NO_NODE && nid != m_nid)
975 return true;
976
977 /* skip hotpluggable memory regions if needed */
978 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
979 return true;
980
981 /* if we want mirror memory skip non-mirror memory regions */
982 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
983 return true;
984
985 /* skip nomap memory unless we were asked for it explicitly */
986 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
987 return true;
988
989 return false;
990}
991
965/** 992/**
966 * __next__mem_range - next function for for_each_free_mem_range() etc. 993 * __next_mem_range - next function for for_each_free_mem_range() etc.
967 * @idx: pointer to u64 loop variable 994 * @idx: pointer to u64 loop variable
968 * @nid: node selector, %NUMA_NO_NODE for all nodes 995 * @nid: node selector, %NUMA_NO_NODE for all nodes
969 * @flags: pick from blocks based on memory attributes 996 * @flags: pick from blocks based on memory attributes
@@ -1009,20 +1036,7 @@ void __init_memblock __next_mem_range(u64 *idx, int nid,
1009 phys_addr_t m_end = m->base + m->size; 1036 phys_addr_t m_end = m->base + m->size;
1010 int m_nid = memblock_get_region_node(m); 1037 int m_nid = memblock_get_region_node(m);
1011 1038
1012 /* only memory regions are associated with nodes, check it */ 1039 if (should_skip_region(m, nid, flags))
1013 if (nid != NUMA_NO_NODE && nid != m_nid)
1014 continue;
1015
1016 /* skip hotpluggable memory regions if needed */
1017 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
1018 continue;
1019
1020 /* if we want mirror memory skip non-mirror memory regions */
1021 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1022 continue;
1023
1024 /* skip nomap memory unless we were asked for it explicitly */
1025 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1026 continue; 1040 continue;
1027 1041
1028 if (!type_b) { 1042 if (!type_b) {
@@ -1126,20 +1140,7 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1126 phys_addr_t m_end = m->base + m->size; 1140 phys_addr_t m_end = m->base + m->size;
1127 int m_nid = memblock_get_region_node(m); 1141 int m_nid = memblock_get_region_node(m);
1128 1142
1129 /* only memory regions are associated with nodes, check it */ 1143 if (should_skip_region(m, nid, flags))
1130 if (nid != NUMA_NO_NODE && nid != m_nid)
1131 continue;
1132
1133 /* skip hotpluggable memory regions if needed */
1134 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
1135 continue;
1136
1137 /* if we want mirror memory skip non-mirror memory regions */
1138 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1139 continue;
1140
1141 /* skip nomap memory unless we were asked for it explicitly */
1142 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1143 continue; 1144 continue;
1144 1145
1145 if (!type_b) { 1146 if (!type_b) {
@@ -1255,94 +1256,123 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1255} 1256}
1256#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1257#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1257 1258
1259/**
1260 * memblock_alloc_range_nid - allocate boot memory block
1261 * @size: size of memory block to be allocated in bytes
1262 * @align: alignment of the region and block's size
1263 * @start: the lower bound of the memory region to allocate (phys address)
1264 * @end: the upper bound of the memory region to allocate (phys address)
1265 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1266 *
1267 * The allocation is performed from memory region limited by
1268 * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
1269 *
1270 * If the specified node can not hold the requested memory the
1271 * allocation falls back to any node in the system
1272 *
1273 * For systems with memory mirroring, the allocation is attempted first
1274 * from the regions with mirroring enabled and then retried from any
1275 * memory region.
1276 *
1277 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1278 * allocated boot memory block, so that it is never reported as leaks.
1279 *
1280 * Return:
1281 * Physical address of allocated memory block on success, %0 on failure.
1282 */
1258static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1283static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1259 phys_addr_t align, phys_addr_t start, 1284 phys_addr_t align, phys_addr_t start,
1260 phys_addr_t end, int nid, 1285 phys_addr_t end, int nid)
1261 enum memblock_flags flags)
1262{ 1286{
1287 enum memblock_flags flags = choose_memblock_flags();
1263 phys_addr_t found; 1288 phys_addr_t found;
1264 1289
1290 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1291 nid = NUMA_NO_NODE;
1292
1265 if (!align) { 1293 if (!align) {
1266 /* Can't use WARNs this early in boot on powerpc */ 1294 /* Can't use WARNs this early in boot on powerpc */
1267 dump_stack(); 1295 dump_stack();
1268 align = SMP_CACHE_BYTES; 1296 align = SMP_CACHE_BYTES;
1269 } 1297 }
1270 1298
1299 if (end > memblock.current_limit)
1300 end = memblock.current_limit;
1301
1302again:
1271 found = memblock_find_in_range_node(size, align, start, end, nid, 1303 found = memblock_find_in_range_node(size, align, start, end, nid,
1272 flags); 1304 flags);
1273 if (found && !memblock_reserve(found, size)) { 1305 if (found && !memblock_reserve(found, size))
1274 /* 1306 goto done;
1275 * The min_count is set to 0 so that memblock allocations are
1276 * never reported as leaks.
1277 */
1278 kmemleak_alloc_phys(found, size, 0, 0);
1279 return found;
1280 }
1281 return 0;
1282}
1283
1284phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1285 phys_addr_t start, phys_addr_t end,
1286 enum memblock_flags flags)
1287{
1288 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1289 flags);
1290}
1291
1292phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1293 phys_addr_t align, phys_addr_t max_addr,
1294 int nid, enum memblock_flags flags)
1295{
1296 return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1297}
1298
1299phys_addr_t __init memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1300{
1301 enum memblock_flags flags = choose_memblock_flags();
1302 phys_addr_t ret;
1303 1307
1304again: 1308 if (nid != NUMA_NO_NODE) {
1305 ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, 1309 found = memblock_find_in_range_node(size, align, start,
1306 nid, flags); 1310 end, NUMA_NO_NODE,
1311 flags);
1312 if (found && !memblock_reserve(found, size))
1313 goto done;
1314 }
1307 1315
1308 if (!ret && (flags & MEMBLOCK_MIRROR)) { 1316 if (flags & MEMBLOCK_MIRROR) {
1309 flags &= ~MEMBLOCK_MIRROR; 1317 flags &= ~MEMBLOCK_MIRROR;
1318 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1319 &size);
1310 goto again; 1320 goto again;
1311 } 1321 }
1312 return ret;
1313}
1314
1315phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1316{
1317 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
1318 MEMBLOCK_NONE);
1319}
1320 1322
1321phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1323 return 0;
1322{
1323 phys_addr_t alloc;
1324
1325 alloc = __memblock_alloc_base(size, align, max_addr);
1326 1324
1327 if (alloc == 0) 1325done:
1328 panic("ERROR: Failed to allocate %pa bytes below %pa.\n", 1326 /* Skip kmemleak for kasan_init() due to high volume. */
1329 &size, &max_addr); 1327 if (end != MEMBLOCK_ALLOC_KASAN)
1328 /*
1329 * The min_count is set to 0 so that memblock allocated
1330 * blocks are never reported as leaks. This is because many
1331 * of these blocks are only referred via the physical
1332 * address which is not looked up by kmemleak.
1333 */
1334 kmemleak_alloc_phys(found, size, 0, 0);
1330 1335
1331 return alloc; 1336 return found;
1332} 1337}
1333 1338
1334phys_addr_t __init memblock_phys_alloc(phys_addr_t size, phys_addr_t align) 1339/**
1340 * memblock_phys_alloc_range - allocate a memory block inside specified range
1341 * @size: size of memory block to be allocated in bytes
1342 * @align: alignment of the region and block's size
1343 * @start: the lower bound of the memory region to allocate (physical address)
1344 * @end: the upper bound of the memory region to allocate (physical address)
1345 *
1346 * Allocate @size bytes in the between @start and @end.
1347 *
1348 * Return: physical address of the allocated memory block on success,
1349 * %0 on failure.
1350 */
1351phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1352 phys_addr_t align,
1353 phys_addr_t start,
1354 phys_addr_t end)
1335{ 1355{
1336 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1356 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
1337} 1357}
1338 1358
1359/**
1360 * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
1361 * @size: size of memory block to be allocated in bytes
1362 * @align: alignment of the region and block's size
1363 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1364 *
1365 * Allocates memory block from the specified NUMA node. If the node
1366 * has no available memory, attempts to allocated from any node in the
1367 * system.
1368 *
1369 * Return: physical address of the allocated memory block on success,
1370 * %0 on failure.
1371 */
1339phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1372phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1340{ 1373{
1341 phys_addr_t res = memblock_phys_alloc_nid(size, align, nid); 1374 return memblock_alloc_range_nid(size, align, 0,
1342 1375 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
1343 if (res)
1344 return res;
1345 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1346} 1376}
1347 1377
1348/** 1378/**
@@ -1353,19 +1383,13 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali
1353 * @max_addr: the upper bound of the memory region to allocate (phys address) 1383 * @max_addr: the upper bound of the memory region to allocate (phys address)
1354 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1384 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1355 * 1385 *
1356 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1386 * Allocates memory block using memblock_alloc_range_nid() and
1357 * will fall back to memory below @min_addr. Also, allocation may fall back 1387 * converts the returned physical address to virtual.
1358 * to any node in the system if the specified node can not
1359 * hold the requested memory.
1360 * 1388 *
1361 * The allocation is performed from memory region limited by 1389 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1362 * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE. 1390 * will fall back to memory below @min_addr. Other constraints, such
1363 * 1391 * as node and mirrored memory will be handled again in
1364 * The phys address of allocated boot memory block is converted to virtual and 1392 * memblock_alloc_range_nid().
1365 * allocated memory is reset to 0.
1366 *
1367 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1368 * allocated boot memory block, so that it is never reported as leaks.
1369 * 1393 *
1370 * Return: 1394 * Return:
1371 * Virtual address of allocated memory block on success, NULL on failure. 1395 * Virtual address of allocated memory block on success, NULL on failure.
@@ -1376,11 +1400,6 @@ static void * __init memblock_alloc_internal(
1376 int nid) 1400 int nid)
1377{ 1401{
1378 phys_addr_t alloc; 1402 phys_addr_t alloc;
1379 void *ptr;
1380 enum memblock_flags flags = choose_memblock_flags();
1381
1382 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1383 nid = NUMA_NO_NODE;
1384 1403
1385 /* 1404 /*
1386 * Detect any accidental use of these APIs after slab is ready, as at 1405 * Detect any accidental use of these APIs after slab is ready, as at
@@ -1390,54 +1409,16 @@ static void * __init memblock_alloc_internal(
1390 if (WARN_ON_ONCE(slab_is_available())) 1409 if (WARN_ON_ONCE(slab_is_available()))
1391 return kzalloc_node(size, GFP_NOWAIT, nid); 1410 return kzalloc_node(size, GFP_NOWAIT, nid);
1392 1411
1393 if (!align) { 1412 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
1394 dump_stack();
1395 align = SMP_CACHE_BYTES;
1396 }
1397 1413
1398 if (max_addr > memblock.current_limit) 1414 /* retry allocation without lower limit */
1399 max_addr = memblock.current_limit; 1415 if (!alloc && min_addr)
1400again: 1416 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid);
1401 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1402 nid, flags);
1403 if (alloc && !memblock_reserve(alloc, size))
1404 goto done;
1405 1417
1406 if (nid != NUMA_NO_NODE) { 1418 if (!alloc)
1407 alloc = memblock_find_in_range_node(size, align, min_addr, 1419 return NULL;
1408 max_addr, NUMA_NO_NODE,
1409 flags);
1410 if (alloc && !memblock_reserve(alloc, size))
1411 goto done;
1412 }
1413
1414 if (min_addr) {
1415 min_addr = 0;
1416 goto again;
1417 }
1418 1420
1419 if (flags & MEMBLOCK_MIRROR) { 1421 return phys_to_virt(alloc);
1420 flags &= ~MEMBLOCK_MIRROR;
1421 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1422 &size);
1423 goto again;
1424 }
1425
1426 return NULL;
1427done:
1428 ptr = phys_to_virt(alloc);
1429
1430 /* Skip kmemleak for kasan_init() due to high volume. */
1431 if (max_addr != MEMBLOCK_ALLOC_KASAN)
1432 /*
1433 * The min_count is set to 0 so that bootmem allocated
1434 * blocks are never reported as leaks. This is because many
1435 * of these blocks are only referred via the physical
1436 * address which is not looked up by kmemleak.
1437 */
1438 kmemleak_alloc(ptr, size, 0, 0);
1439
1440 return ptr;
1441} 1422}
1442 1423
1443/** 1424/**
@@ -1479,7 +1460,7 @@ void * __init memblock_alloc_try_nid_raw(
1479} 1460}
1480 1461
1481/** 1462/**
1482 * memblock_alloc_try_nid_nopanic - allocate boot memory block 1463 * memblock_alloc_try_nid - allocate boot memory block
1483 * @size: size of memory block to be allocated in bytes 1464 * @size: size of memory block to be allocated in bytes
1484 * @align: alignment of the region and block's size 1465 * @align: alignment of the region and block's size
1485 * @min_addr: the lower bound of the memory region from where the allocation 1466 * @min_addr: the lower bound of the memory region from where the allocation
@@ -1495,42 +1476,6 @@ void * __init memblock_alloc_try_nid_raw(
1495 * Return: 1476 * Return:
1496 * Virtual address of allocated memory block on success, NULL on failure. 1477 * Virtual address of allocated memory block on success, NULL on failure.
1497 */ 1478 */
1498void * __init memblock_alloc_try_nid_nopanic(
1499 phys_addr_t size, phys_addr_t align,
1500 phys_addr_t min_addr, phys_addr_t max_addr,
1501 int nid)
1502{
1503 void *ptr;
1504
1505 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
1506 __func__, (u64)size, (u64)align, nid, &min_addr,
1507 &max_addr, (void *)_RET_IP_);
1508
1509 ptr = memblock_alloc_internal(size, align,
1510 min_addr, max_addr, nid);
1511 if (ptr)
1512 memset(ptr, 0, size);
1513 return ptr;
1514}
1515
1516/**
1517 * memblock_alloc_try_nid - allocate boot memory block with panicking
1518 * @size: size of memory block to be allocated in bytes
1519 * @align: alignment of the region and block's size
1520 * @min_addr: the lower bound of the memory region from where the allocation
1521 * is preferred (phys address)
1522 * @max_addr: the upper bound of the memory region from where the allocation
1523 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1524 * allocate only from memory limited by memblock.current_limit value
1525 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1526 *
1527 * Public panicking version of memblock_alloc_try_nid_nopanic()
1528 * which provides debug information (including caller info), if enabled,
1529 * and panics if the request can not be satisfied.
1530 *
1531 * Return:
1532 * Virtual address of allocated memory block on success, NULL on failure.
1533 */
1534void * __init memblock_alloc_try_nid( 1479void * __init memblock_alloc_try_nid(
1535 phys_addr_t size, phys_addr_t align, 1480 phys_addr_t size, phys_addr_t align,
1536 phys_addr_t min_addr, phys_addr_t max_addr, 1481 phys_addr_t min_addr, phys_addr_t max_addr,
@@ -1543,24 +1488,20 @@ void * __init memblock_alloc_try_nid(
1543 &max_addr, (void *)_RET_IP_); 1488 &max_addr, (void *)_RET_IP_);
1544 ptr = memblock_alloc_internal(size, align, 1489 ptr = memblock_alloc_internal(size, align,
1545 min_addr, max_addr, nid); 1490 min_addr, max_addr, nid);
1546 if (ptr) { 1491 if (ptr)
1547 memset(ptr, 0, size); 1492 memset(ptr, 0, size);
1548 return ptr;
1549 }
1550 1493
1551 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa\n", 1494 return ptr;
1552 __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr);
1553 return NULL;
1554} 1495}
1555 1496
1556/** 1497/**
1557 * __memblock_free_late - free bootmem block pages directly to buddy allocator 1498 * __memblock_free_late - free pages directly to buddy allocator
1558 * @base: phys starting address of the boot memory block 1499 * @base: phys starting address of the boot memory block
1559 * @size: size of the boot memory block in bytes 1500 * @size: size of the boot memory block in bytes
1560 * 1501 *
1561 * This is only useful when the bootmem allocator has already been torn 1502 * This is only useful when the memblock allocator has already been torn
1562 * down, but we are still initializing the system. Pages are released directly 1503 * down, but we are still initializing the system. Pages are released directly
1563 * to the buddy allocator, no bootmem metadata is updated because it is gone. 1504 * to the buddy allocator.
1564 */ 1505 */
1565void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 1506void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1566{ 1507{
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3eb01dedfb50..03fcf73d47da 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6445,8 +6445,8 @@ static void __ref setup_usemap(struct pglist_data *pgdat,
6445 zone->pageblock_flags = NULL; 6445 zone->pageblock_flags = NULL;
6446 if (usemapsize) { 6446 if (usemapsize) {
6447 zone->pageblock_flags = 6447 zone->pageblock_flags =
6448 memblock_alloc_node_nopanic(usemapsize, 6448 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
6449 pgdat->node_id); 6449 pgdat->node_id);
6450 if (!zone->pageblock_flags) 6450 if (!zone->pageblock_flags)
6451 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", 6451 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
6452 usemapsize, zone->name, pgdat->node_id); 6452 usemapsize, zone->name, pgdat->node_id);
@@ -6679,7 +6679,8 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
6679 end = pgdat_end_pfn(pgdat); 6679 end = pgdat_end_pfn(pgdat);
6680 end = ALIGN(end, MAX_ORDER_NR_PAGES); 6680 end = ALIGN(end, MAX_ORDER_NR_PAGES);
6681 size = (end - start) * sizeof(struct page); 6681 size = (end - start) * sizeof(struct page);
6682 map = memblock_alloc_node_nopanic(size, pgdat->node_id); 6682 map = memblock_alloc_node(size, SMP_CACHE_BYTES,
6683 pgdat->node_id);
6683 if (!map) 6684 if (!map)
6684 panic("Failed to allocate %ld bytes for node %d memory map\n", 6685 panic("Failed to allocate %ld bytes for node %d memory map\n",
6685 size, pgdat->node_id); 6686 size, pgdat->node_id);
@@ -7959,8 +7960,7 @@ void *__init alloc_large_system_hash(const char *tablename,
7959 size = bucketsize << log2qty; 7960 size = bucketsize << log2qty;
7960 if (flags & HASH_EARLY) { 7961 if (flags & HASH_EARLY) {
7961 if (flags & HASH_ZERO) 7962 if (flags & HASH_ZERO)
7962 table = memblock_alloc_nopanic(size, 7963 table = memblock_alloc(size, SMP_CACHE_BYTES);
7963 SMP_CACHE_BYTES);
7964 else 7964 else
7965 table = memblock_alloc_raw(size, 7965 table = memblock_alloc_raw(size,
7966 SMP_CACHE_BYTES); 7966 SMP_CACHE_BYTES);
diff --git a/mm/page_ext.c b/mm/page_ext.c
index ab4244920e0f..d8f1aca4ad43 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -161,7 +161,7 @@ static int __init alloc_node_page_ext(int nid)
161 161
162 table_size = get_entry_size() * nr_pages; 162 table_size = get_entry_size() * nr_pages;
163 163
164 base = memblock_alloc_try_nid_nopanic( 164 base = memblock_alloc_try_nid(
165 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 165 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
166 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 166 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
167 if (!base) 167 if (!base)
diff --git a/mm/percpu.c b/mm/percpu.c
index c5c750781628..2e6fc8d552c9 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1086,6 +1086,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1086 struct pcpu_chunk *chunk; 1086 struct pcpu_chunk *chunk;
1087 unsigned long aligned_addr, lcm_align; 1087 unsigned long aligned_addr, lcm_align;
1088 int start_offset, offset_bits, region_size, region_bits; 1088 int start_offset, offset_bits, region_size, region_bits;
1089 size_t alloc_size;
1089 1090
1090 /* region calculations */ 1091 /* region calculations */
1091 aligned_addr = tmp_addr & PAGE_MASK; 1092 aligned_addr = tmp_addr & PAGE_MASK;
@@ -1101,9 +1102,12 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1101 region_size = ALIGN(start_offset + map_size, lcm_align); 1102 region_size = ALIGN(start_offset + map_size, lcm_align);
1102 1103
1103 /* allocate chunk */ 1104 /* allocate chunk */
1104 chunk = memblock_alloc(sizeof(struct pcpu_chunk) + 1105 alloc_size = sizeof(struct pcpu_chunk) +
1105 BITS_TO_LONGS(region_size >> PAGE_SHIFT), 1106 BITS_TO_LONGS(region_size >> PAGE_SHIFT);
1106 SMP_CACHE_BYTES); 1107 chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1108 if (!chunk)
1109 panic("%s: Failed to allocate %zu bytes\n", __func__,
1110 alloc_size);
1107 1111
1108 INIT_LIST_HEAD(&chunk->list); 1112 INIT_LIST_HEAD(&chunk->list);
1109 1113
@@ -1114,12 +1118,25 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1114 chunk->nr_pages = region_size >> PAGE_SHIFT; 1118 chunk->nr_pages = region_size >> PAGE_SHIFT;
1115 region_bits = pcpu_chunk_map_bits(chunk); 1119 region_bits = pcpu_chunk_map_bits(chunk);
1116 1120
1117 chunk->alloc_map = memblock_alloc(BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]), 1121 alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1118 SMP_CACHE_BYTES); 1122 chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1119 chunk->bound_map = memblock_alloc(BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]), 1123 if (!chunk->alloc_map)
1120 SMP_CACHE_BYTES); 1124 panic("%s: Failed to allocate %zu bytes\n", __func__,
1121 chunk->md_blocks = memblock_alloc(pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]), 1125 alloc_size);
1122 SMP_CACHE_BYTES); 1126
1127 alloc_size =
1128 BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1129 chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1130 if (!chunk->bound_map)
1131 panic("%s: Failed to allocate %zu bytes\n", __func__,
1132 alloc_size);
1133
1134 alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1135 chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1136 if (!chunk->md_blocks)
1137 panic("%s: Failed to allocate %zu bytes\n", __func__,
1138 alloc_size);
1139
1123 pcpu_init_md_blocks(chunk); 1140 pcpu_init_md_blocks(chunk);
1124 1141
1125 /* manage populated page bitmap */ 1142 /* manage populated page bitmap */
@@ -1888,7 +1905,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1888 __alignof__(ai->groups[0].cpu_map[0])); 1905 __alignof__(ai->groups[0].cpu_map[0]));
1889 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1906 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1890 1907
1891 ptr = memblock_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE); 1908 ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
1892 if (!ptr) 1909 if (!ptr)
1893 return NULL; 1910 return NULL;
1894 ai = ptr; 1911 ai = ptr;
@@ -2044,6 +2061,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2044 int group, unit, i; 2061 int group, unit, i;
2045 int map_size; 2062 int map_size;
2046 unsigned long tmp_addr; 2063 unsigned long tmp_addr;
2064 size_t alloc_size;
2047 2065
2048#define PCPU_SETUP_BUG_ON(cond) do { \ 2066#define PCPU_SETUP_BUG_ON(cond) do { \
2049 if (unlikely(cond)) { \ 2067 if (unlikely(cond)) { \
@@ -2075,14 +2093,29 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2075 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 2093 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2076 2094
2077 /* process group information and build config tables accordingly */ 2095 /* process group information and build config tables accordingly */
2078 group_offsets = memblock_alloc(ai->nr_groups * sizeof(group_offsets[0]), 2096 alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2079 SMP_CACHE_BYTES); 2097 group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2080 group_sizes = memblock_alloc(ai->nr_groups * sizeof(group_sizes[0]), 2098 if (!group_offsets)
2081 SMP_CACHE_BYTES); 2099 panic("%s: Failed to allocate %zu bytes\n", __func__,
2082 unit_map = memblock_alloc(nr_cpu_ids * sizeof(unit_map[0]), 2100 alloc_size);
2083 SMP_CACHE_BYTES); 2101
2084 unit_off = memblock_alloc(nr_cpu_ids * sizeof(unit_off[0]), 2102 alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2085 SMP_CACHE_BYTES); 2103 group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2104 if (!group_sizes)
2105 panic("%s: Failed to allocate %zu bytes\n", __func__,
2106 alloc_size);
2107
2108 alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2109 unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2110 if (!unit_map)
2111 panic("%s: Failed to allocate %zu bytes\n", __func__,
2112 alloc_size);
2113
2114 alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2115 unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2116 if (!unit_off)
2117 panic("%s: Failed to allocate %zu bytes\n", __func__,
2118 alloc_size);
2086 2119
2087 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 2120 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2088 unit_map[cpu] = UINT_MAX; 2121 unit_map[cpu] = UINT_MAX;
@@ -2148,6 +2181,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2148 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 2181 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
2149 pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]), 2182 pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]),
2150 SMP_CACHE_BYTES); 2183 SMP_CACHE_BYTES);
2184 if (!pcpu_slot)
2185 panic("%s: Failed to allocate %zu bytes\n", __func__,
2186 pcpu_nr_slots * sizeof(pcpu_slot[0]));
2151 for (i = 0; i < pcpu_nr_slots; i++) 2187 for (i = 0; i < pcpu_nr_slots; i++)
2152 INIT_LIST_HEAD(&pcpu_slot[i]); 2188 INIT_LIST_HEAD(&pcpu_slot[i]);
2153 2189
@@ -2460,7 +2496,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2460 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2496 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2461 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 2497 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
2462 2498
2463 areas = memblock_alloc_nopanic(areas_size, SMP_CACHE_BYTES); 2499 areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
2464 if (!areas) { 2500 if (!areas) {
2465 rc = -ENOMEM; 2501 rc = -ENOMEM;
2466 goto out_free; 2502 goto out_free;
@@ -2602,6 +2638,9 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
2602 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 2638 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2603 sizeof(pages[0])); 2639 sizeof(pages[0]));
2604 pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); 2640 pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
2641 if (!pages)
2642 panic("%s: Failed to allocate %zu bytes\n", __func__,
2643 pages_size);
2605 2644
2606 /* allocate pages */ 2645 /* allocate pages */
2607 j = 0; 2646 j = 0;
@@ -2690,8 +2729,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
2690static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 2729static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2691 size_t align) 2730 size_t align)
2692{ 2731{
2693 return memblock_alloc_from_nopanic( 2732 return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
2694 size, align, __pa(MAX_DMA_ADDRESS));
2695} 2733}
2696 2734
2697static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 2735static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
@@ -2739,9 +2777,7 @@ void __init setup_per_cpu_areas(void)
2739 void *fc; 2777 void *fc;
2740 2778
2741 ai = pcpu_alloc_alloc_info(1, 1); 2779 ai = pcpu_alloc_alloc_info(1, 1);
2742 fc = memblock_alloc_from_nopanic(unit_size, 2780 fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
2743 PAGE_SIZE,
2744 __pa(MAX_DMA_ADDRESS));
2745 if (!ai || !fc) 2781 if (!ai || !fc)
2746 panic("Failed to allocate memory for percpu areas."); 2782 panic("Failed to allocate memory for percpu areas.");
2747 /* kmemleak tracks the percpu allocations separately */ 2783 /* kmemleak tracks the percpu allocations separately */
diff --git a/mm/sparse.c b/mm/sparse.c
index 77a0554fa5bd..69904aa6165b 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -65,11 +65,15 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
65 unsigned long array_size = SECTIONS_PER_ROOT * 65 unsigned long array_size = SECTIONS_PER_ROOT *
66 sizeof(struct mem_section); 66 sizeof(struct mem_section);
67 67
68 if (slab_is_available()) 68 if (slab_is_available()) {
69 section = kzalloc_node(array_size, GFP_KERNEL, nid); 69 section = kzalloc_node(array_size, GFP_KERNEL, nid);
70 else 70 } else {
71 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, 71 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
72 nid); 72 nid);
73 if (!section)
74 panic("%s: Failed to allocate %lu bytes nid=%d\n",
75 __func__, array_size, nid);
76 }
73 77
74 return section; 78 return section;
75} 79}
@@ -218,6 +222,9 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
218 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; 222 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
219 align = 1 << (INTERNODE_CACHE_SHIFT); 223 align = 1 << (INTERNODE_CACHE_SHIFT);
220 mem_section = memblock_alloc(size, align); 224 mem_section = memblock_alloc(size, align);
225 if (!mem_section)
226 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
227 __func__, size, align);
221 } 228 }
222#endif 229#endif
223 230
@@ -323,9 +330,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
323 limit = goal + (1UL << PA_SECTION_SHIFT); 330 limit = goal + (1UL << PA_SECTION_SHIFT);
324 nid = early_pfn_to_nid(goal >> PAGE_SHIFT); 331 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
325again: 332again:
326 p = memblock_alloc_try_nid_nopanic(size, 333 p = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
327 SMP_CACHE_BYTES, goal, limit,
328 nid);
329 if (!p && limit) { 334 if (!p && limit) {
330 limit = 0; 335 limit = 0;
331 goto again; 336 goto again;
@@ -379,7 +384,7 @@ static unsigned long * __init
379sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 384sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
380 unsigned long size) 385 unsigned long size)
381{ 386{
382 return memblock_alloc_node_nopanic(size, pgdat->node_id); 387 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
383} 388}
384 389
385static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 390static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -404,13 +409,18 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
404{ 409{
405 unsigned long size = section_map_size(); 410 unsigned long size = section_map_size();
406 struct page *map = sparse_buffer_alloc(size); 411 struct page *map = sparse_buffer_alloc(size);
412 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
407 413
408 if (map) 414 if (map)
409 return map; 415 return map;
410 416
411 map = memblock_alloc_try_nid(size, 417 map = memblock_alloc_try_nid(size,
412 PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 418 PAGE_SIZE, addr,
413 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 419 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
420 if (!map)
421 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
422 __func__, size, PAGE_SIZE, nid, &addr);
423
414 return map; 424 return map;
415} 425}
416#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 426#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -420,10 +430,11 @@ static void *sparsemap_buf_end __meminitdata;
420 430
421static void __init sparse_buffer_init(unsigned long size, int nid) 431static void __init sparse_buffer_init(unsigned long size, int nid)
422{ 432{
433 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
423 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ 434 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
424 sparsemap_buf = 435 sparsemap_buf =
425 memblock_alloc_try_nid_raw(size, PAGE_SIZE, 436 memblock_alloc_try_nid_raw(size, PAGE_SIZE,
426 __pa(MAX_DMA_ADDRESS), 437 addr,
427 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 438 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
428 sparsemap_buf_end = sparsemap_buf + size; 439 sparsemap_buf_end = sparsemap_buf + size;
429} 440}