aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-07-12 00:24:57 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-08-04 22:56:18 -0400
commit7f219c736f9439acb1c50d264fbee93c353773ca (patch)
tree24e4aa4f3135f639954243fcd015b44786a9a11d
parent7590abe891c85fbc65dc906516d0bf89e070c19a (diff)
memblock: split memblock_find_base() out of __memblock_alloc_base()
This will be used by the array resize code and might prove useful to some arch code as well at which point it can be made non-static. Also add comment as to why aligning size is important Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> --- v2. Fix loss of size alignment v3. Fix result code
-rw-r--r--mm/memblock.c58
1 files changed, 38 insertions, 20 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index ae856d4e25a3..b775fca4fba5 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -345,12 +345,15 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n
345 345
346 BUG_ON(0 == size); 346 BUG_ON(0 == size);
347 347
348 /* We align the size to limit fragmentation. Without this, a lot of
349 * small allocs quickly eat up the whole reserve array on sparc
350 */
351 size = memblock_align_up(size, align);
352
348 /* We do a bottom-up search for a region with the right 353 /* We do a bottom-up search for a region with the right
349 * nid since that's easier considering how memblock_nid_range() 354 * nid since that's easier considering how memblock_nid_range()
350 * works 355 * works
351 */ 356 */
352 size = memblock_align_up(size, align);
353
354 for (i = 0; i < mem->cnt; i++) { 357 for (i = 0; i < mem->cnt; i++) {
355 phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], 358 phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
356 size, align, nid); 359 size, align, nid);
@@ -366,20 +369,7 @@ phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
366 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 369 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
367} 370}
368 371
369phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 372static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
370{
371 phys_addr_t alloc;
372
373 alloc = __memblock_alloc_base(size, align, max_addr);
374
375 if (alloc == 0)
376 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
377 (unsigned long long) size, (unsigned long long) max_addr);
378
379 return alloc;
380}
381
382phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
383{ 373{
384 long i; 374 long i;
385 phys_addr_t base = 0; 375 phys_addr_t base = 0;
@@ -387,8 +377,6 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph
387 377
388 BUG_ON(0 == size); 378 BUG_ON(0 == size);
389 379
390 size = memblock_align_up(size, align);
391
392 /* Pump up max_addr */ 380 /* Pump up max_addr */
393 if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE) 381 if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE)
394 max_addr = memblock.current_limit; 382 max_addr = memblock.current_limit;
@@ -405,13 +393,43 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph
405 continue; 393 continue;
406 base = min(memblockbase + memblocksize, max_addr); 394 base = min(memblockbase + memblocksize, max_addr);
407 res_base = memblock_find_region(memblockbase, base, size, align); 395 res_base = memblock_find_region(memblockbase, base, size, align);
408 if (res_base != MEMBLOCK_ERROR && 396 if (res_base != MEMBLOCK_ERROR)
409 memblock_add_region(&memblock.reserved, res_base, size) >= 0)
410 return res_base; 397 return res_base;
411 } 398 }
399 return MEMBLOCK_ERROR;
400}
401
402phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
403{
404 phys_addr_t found;
405
406 /* We align the size to limit fragmentation. Without this, a lot of
407 * small allocs quickly eat up the whole reserve array on sparc
408 */
409 size = memblock_align_up(size, align);
410
411 found = memblock_find_base(size, align, max_addr);
412 if (found != MEMBLOCK_ERROR &&
413 memblock_add_region(&memblock.reserved, found, size) >= 0)
414 return found;
415
412 return 0; 416 return 0;
413} 417}
414 418
419phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
420{
421 phys_addr_t alloc;
422
423 alloc = __memblock_alloc_base(size, align, max_addr);
424
425 if (alloc == 0)
426 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
427 (unsigned long long) size, (unsigned long long) max_addr);
428
429 return alloc;
430}
431
432
415/* You must call memblock_analyze() before this. */ 433/* You must call memblock_analyze() before this. */
416phys_addr_t __init memblock_phys_mem_size(void) 434phys_addr_t __init memblock_phys_mem_size(void)
417{ 435{