aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-07-06 18:38:59 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-08-04 22:56:05 -0400
commitc3f72b5706716ada7923def513486ab7bb3a5301 (patch)
tree24a7a4939164f2183bdc68212232d0a3e94274d3 /mm/memblock.c
parent35a1f0bd07015dde66501b47cfb6ddc72ebe7346 (diff)
memblock: Factor the lowest level alloc function
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c59
1 files changed, 27 insertions, 32 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 13807f280ada..e264e8c70892 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -294,8 +294,8 @@ static u64 memblock_align_up(u64 addr, u64 size)
294 return (addr + (size - 1)) & ~(size - 1); 294 return (addr + (size - 1)) & ~(size - 1);
295} 295}
296 296
297static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, 297static u64 __init memblock_alloc_region(u64 start, u64 end,
298 u64 size, u64 align) 298 u64 size, u64 align)
299{ 299{
300 u64 base, res_base; 300 u64 base, res_base;
301 long j; 301 long j;
@@ -318,6 +318,13 @@ static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end,
318 return ~(u64)0; 318 return ~(u64)0;
319} 319}
320 320
321u64 __weak __init memblock_nid_range(u64 start, u64 end, int *nid)
322{
323 *nid = 0;
324
325 return end;
326}
327
321static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, 328static u64 __init memblock_alloc_nid_region(struct memblock_region *mp,
322 u64 size, u64 align, int nid) 329 u64 size, u64 align, int nid)
323{ 330{
@@ -333,8 +340,7 @@ static u64 __init memblock_alloc_nid_region(struct memblock_region *mp,
333 340
334 this_end = memblock_nid_range(start, end, &this_nid); 341 this_end = memblock_nid_range(start, end, &this_nid);
335 if (this_nid == nid) { 342 if (this_nid == nid) {
336 u64 ret = memblock_alloc_nid_unreserved(start, this_end, 343 u64 ret = memblock_alloc_region(start, this_end, size, align);
337 size, align);
338 if (ret != ~(u64)0) 344 if (ret != ~(u64)0)
339 return ret; 345 return ret;
340 } 346 }
@@ -351,6 +357,10 @@ u64 __init memblock_alloc_nid(u64 size, u64 align, int nid)
351 357
352 BUG_ON(0 == size); 358 BUG_ON(0 == size);
353 359
360 /* We do a bottom-up search for a region with the right
361 * nid since that's easier considering how memblock_nid_range()
362 * works
363 */
354 size = memblock_align_up(size, align); 364 size = memblock_align_up(size, align);
355 365
356 for (i = 0; i < mem->cnt; i++) { 366 for (i = 0; i < mem->cnt; i++) {
@@ -383,7 +393,7 @@ u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr)
383 393
384u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) 394u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr)
385{ 395{
386 long i, j; 396 long i;
387 u64 base = 0; 397 u64 base = 0;
388 u64 res_base; 398 u64 res_base;
389 399
@@ -396,33 +406,24 @@ u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr)
396 if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) 406 if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
397 max_addr = MEMBLOCK_REAL_LIMIT; 407 max_addr = MEMBLOCK_REAL_LIMIT;
398 408
409 /* Pump up max_addr */
410 if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
411 max_addr = ~(u64)0;
412
413 /* We do a top-down search, this tends to limit memory
414 * fragmentation by keeping early boot allocs near the
415 * top of memory
416 */
399 for (i = memblock.memory.cnt - 1; i >= 0; i--) { 417 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
400 u64 memblockbase = memblock.memory.regions[i].base; 418 u64 memblockbase = memblock.memory.regions[i].base;
401 u64 memblocksize = memblock.memory.regions[i].size; 419 u64 memblocksize = memblock.memory.regions[i].size;
402 420
403 if (memblocksize < size) 421 if (memblocksize < size)
404 continue; 422 continue;
405 if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) 423 base = min(memblockbase + memblocksize, max_addr);
406 base = memblock_align_down(memblockbase + memblocksize - size, align); 424 res_base = memblock_alloc_region(memblockbase, base, size, align);
407 else if (memblockbase < max_addr) { 425 if (res_base != ~(u64)0)
408 base = min(memblockbase + memblocksize, max_addr); 426 return res_base;
409 base = memblock_align_down(base - size, align);
410 } else
411 continue;
412
413 while (base && memblockbase <= base) {
414 j = memblock_overlaps_region(&memblock.reserved, base, size);
415 if (j < 0) {
416 /* this area isn't reserved, take it */
417 if (memblock_add_region(&memblock.reserved, base, size) < 0)
418 return 0;
419 return base;
420 }
421 res_base = memblock.reserved.regions[j].base;
422 if (res_base < size)
423 break;
424 base = memblock_align_down(res_base - size, align);
425 }
426 } 427 }
427 return 0; 428 return 0;
428} 429}
@@ -528,9 +529,3 @@ int memblock_is_region_reserved(u64 base, u64 size)
528 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 529 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
529} 530}
530 531
531u64 __weak memblock_nid_range(u64 start, u64 end, int *nid)
532{
533 *nid = 0;
534
535 return end;
536}