aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c171
1 files changed, 77 insertions, 94 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 42fe65447d8b..31e89dac9a23 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1255,30 +1255,84 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1255} 1255}
1256#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1256#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1257 1257
1258/**
1259 * memblock_alloc_range_nid - allocate boot memory block
1260 * @size: size of memory block to be allocated in bytes
1261 * @align: alignment of the region and block's size
1262 * @start: the lower bound of the memory region to allocate (phys address)
1263 * @end: the upper bound of the memory region to allocate (phys address)
1264 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1265 *
1266 * The allocation is performed from memory region limited by
1267 * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
1268 *
1269 * If the specified node can not hold the requested memory the
1270 * allocation falls back to any node in the system
1271 *
1272 * For systems with memory mirroring, the allocation is attempted first
1273 * from the regions with mirroring enabled and then retried from any
1274 * memory region.
1275 *
1276 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1277 * allocated boot memory block, so that it is never reported as leaks.
1278 *
1279 * Return:
1280 * Physical address of allocated memory block on success, %0 on failure.
1281 */
1258static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1282static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1259 phys_addr_t align, phys_addr_t start, 1283 phys_addr_t align, phys_addr_t start,
1260 phys_addr_t end, int nid, 1284 phys_addr_t end, int nid)
1261 enum memblock_flags flags)
1262{ 1285{
1286 enum memblock_flags flags = choose_memblock_flags();
1263 phys_addr_t found; 1287 phys_addr_t found;
1264 1288
1289 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1290 nid = NUMA_NO_NODE;
1291
1265 if (!align) { 1292 if (!align) {
1266 /* Can't use WARNs this early in boot on powerpc */ 1293 /* Can't use WARNs this early in boot on powerpc */
1267 dump_stack(); 1294 dump_stack();
1268 align = SMP_CACHE_BYTES; 1295 align = SMP_CACHE_BYTES;
1269 } 1296 }
1270 1297
1298 if (end > memblock.current_limit)
1299 end = memblock.current_limit;
1300
1301again:
1271 found = memblock_find_in_range_node(size, align, start, end, nid, 1302 found = memblock_find_in_range_node(size, align, start, end, nid,
1272 flags); 1303 flags);
1273 if (found && !memblock_reserve(found, size)) { 1304 if (found && !memblock_reserve(found, size))
1305 goto done;
1306
1307 if (nid != NUMA_NO_NODE) {
1308 found = memblock_find_in_range_node(size, align, start,
1309 end, NUMA_NO_NODE,
1310 flags);
1311 if (found && !memblock_reserve(found, size))
1312 goto done;
1313 }
1314
1315 if (flags & MEMBLOCK_MIRROR) {
1316 flags &= ~MEMBLOCK_MIRROR;
1317 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1318 &size);
1319 goto again;
1320 }
1321
1322 return 0;
1323
1324done:
1325 /* Skip kmemleak for kasan_init() due to high volume. */
1326 if (end != MEMBLOCK_ALLOC_KASAN)
1274 /* 1327 /*
1275 * The min_count is set to 0 so that memblock allocations are 1328 * The min_count is set to 0 so that memblock allocated
1276 * never reported as leaks. 1329 * blocks are never reported as leaks. This is because many
1330 * of these blocks are only referred via the physical
1331 * address which is not looked up by kmemleak.
1277 */ 1332 */
1278 kmemleak_alloc_phys(found, size, 0, 0); 1333 kmemleak_alloc_phys(found, size, 0, 0);
1279 return found; 1334
1280 } 1335 return found;
1281 return 0;
1282} 1336}
1283 1337
1284phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, 1338phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
@@ -1286,35 +1340,13 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1286 phys_addr_t start, 1340 phys_addr_t start,
1287 phys_addr_t end) 1341 phys_addr_t end)
1288{ 1342{
1289 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, 1343 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
1290 MEMBLOCK_NONE);
1291}
1292
1293phys_addr_t __init memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1294{
1295 enum memblock_flags flags = choose_memblock_flags();
1296 phys_addr_t ret;
1297
1298again:
1299 ret = memblock_alloc_range_nid(size, align, 0,
1300 MEMBLOCK_ALLOC_ACCESSIBLE, nid, flags);
1301
1302 if (!ret && (flags & MEMBLOCK_MIRROR)) {
1303 flags &= ~MEMBLOCK_MIRROR;
1304 goto again;
1305 }
1306 return ret;
1307} 1344}
1308 1345
1309phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1346phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1310{ 1347{
1311 phys_addr_t res = memblock_phys_alloc_nid(size, align, nid);
1312
1313 if (res)
1314 return res;
1315 return memblock_alloc_range_nid(size, align, 0, 1348 return memblock_alloc_range_nid(size, align, 0,
1316 MEMBLOCK_ALLOC_ACCESSIBLE, 1349 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
1317 NUMA_NO_NODE, MEMBLOCK_NONE);
1318} 1350}
1319 1351
1320/** 1352/**
@@ -1325,19 +1357,13 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali
1325 * @max_addr: the upper bound of the memory region to allocate (phys address) 1357 * @max_addr: the upper bound of the memory region to allocate (phys address)
1326 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1358 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1327 * 1359 *
1328 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1360 * Allocates memory block using memblock_alloc_range_nid() and
1329 * will fall back to memory below @min_addr. Also, allocation may fall back 1361 * converts the returned physical address to virtual.
1330 * to any node in the system if the specified node can not
1331 * hold the requested memory.
1332 *
1333 * The allocation is performed from memory region limited by
1334 * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
1335 *
1336 * The phys address of allocated boot memory block is converted to virtual and
1337 * allocated memory is reset to 0.
1338 * 1362 *
1339 * In addition, function sets the min_count to 0 using kmemleak_alloc for 1363 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1340 * allocated boot memory block, so that it is never reported as leaks. 1364 * will fall back to memory below @min_addr. Other constraints, such
1365 * as node and mirrored memory will be handled again in
1366 * memblock_alloc_range_nid().
1341 * 1367 *
1342 * Return: 1368 * Return:
1343 * Virtual address of allocated memory block on success, NULL on failure. 1369 * Virtual address of allocated memory block on success, NULL on failure.
@@ -1348,11 +1374,6 @@ static void * __init memblock_alloc_internal(
1348 int nid) 1374 int nid)
1349{ 1375{
1350 phys_addr_t alloc; 1376 phys_addr_t alloc;
1351 void *ptr;
1352 enum memblock_flags flags = choose_memblock_flags();
1353
1354 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1355 nid = NUMA_NO_NODE;
1356 1377
1357 /* 1378 /*
1358 * Detect any accidental use of these APIs after slab is ready, as at 1379 * Detect any accidental use of these APIs after slab is ready, as at
@@ -1362,54 +1383,16 @@ static void * __init memblock_alloc_internal(
1362 if (WARN_ON_ONCE(slab_is_available())) 1383 if (WARN_ON_ONCE(slab_is_available()))
1363 return kzalloc_node(size, GFP_NOWAIT, nid); 1384 return kzalloc_node(size, GFP_NOWAIT, nid);
1364 1385
1365 if (!align) { 1386 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
1366 dump_stack();
1367 align = SMP_CACHE_BYTES;
1368 }
1369
1370 if (max_addr > memblock.current_limit)
1371 max_addr = memblock.current_limit;
1372again:
1373 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1374 nid, flags);
1375 if (alloc && !memblock_reserve(alloc, size))
1376 goto done;
1377
1378 if (nid != NUMA_NO_NODE) {
1379 alloc = memblock_find_in_range_node(size, align, min_addr,
1380 max_addr, NUMA_NO_NODE,
1381 flags);
1382 if (alloc && !memblock_reserve(alloc, size))
1383 goto done;
1384 }
1385
1386 if (min_addr) {
1387 min_addr = 0;
1388 goto again;
1389 }
1390
1391 if (flags & MEMBLOCK_MIRROR) {
1392 flags &= ~MEMBLOCK_MIRROR;
1393 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1394 &size);
1395 goto again;
1396 }
1397 1387
1398 return NULL; 1388 /* retry allocation without lower limit */
1399done: 1389 if (!alloc && min_addr)
1400 ptr = phys_to_virt(alloc); 1390 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid);
1401 1391
1402 /* Skip kmemleak for kasan_init() due to high volume. */ 1392 if (!alloc)
1403 if (max_addr != MEMBLOCK_ALLOC_KASAN) 1393 return NULL;
1404 /*
1405 * The min_count is set to 0 so that bootmem allocated
1406 * blocks are never reported as leaks. This is because many
1407 * of these blocks are only referred via the physical
1408 * address which is not looked up by kmemleak.
1409 */
1410 kmemleak_alloc(ptr, size, 0, 0);
1411 1394
1412 return ptr; 1395 return phys_to_virt(alloc);
1413} 1396}
1414 1397
1415/** 1398/**