diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-07-06 18:39:01 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-08-04 22:56:07 -0400 |
commit | e63075a3c9377536d085bc013cd3fe6323162449 (patch) | |
tree | 28fde124dde6df867947882fc686d228502846df /mm/memblock.c | |
parent | 27f574c223d2c09610058b3ec7a29582d63a3e06 (diff) |
memblock: Introduce default allocation limit and use it to replace explicit ones
This introduce memblock.current_limit which is used to limit allocations
from memblock_alloc() or memblock_alloc_base(..., MEMBLOCK_ALLOC_ACCESSIBLE).
The old MEMBLOCK_ALLOC_ANYWHERE changes value from 0 to ~(u64)0 and can still
be used with memblock_alloc_base() to allocate really anywhere.
It is -no-longer- cropped to MEMBLOCK_REAL_LIMIT which disappears.
Note to archs: I'm leaving the default limit to MEMBLOCK_ALLOC_ANYWHERE. I
strongly recommend that you ensure that you set an appropriate limit
during boot in order to guarantee that an memblock_alloc() at any time
results in something that is accessible with a simple __va().
The reason is that a subsequent patch will introduce the ability for
the array to resize itself by reallocating itself. The MEMBLOCK core will
honor the current limit when performing those allocations.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index 0131684c42f8..770c5bfac2cd 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -115,6 +115,8 @@ void __init memblock_init(void) | |||
115 | memblock.reserved.regions[0].base = 0; | 115 | memblock.reserved.regions[0].base = 0; |
116 | memblock.reserved.regions[0].size = 0; | 116 | memblock.reserved.regions[0].size = 0; |
117 | memblock.reserved.cnt = 1; | 117 | memblock.reserved.cnt = 1; |
118 | |||
119 | memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; | ||
118 | } | 120 | } |
119 | 121 | ||
120 | void __init memblock_analyze(void) | 122 | void __init memblock_analyze(void) |
@@ -373,7 +375,7 @@ u64 __init memblock_alloc_nid(u64 size, u64 align, int nid) | |||
373 | 375 | ||
374 | u64 __init memblock_alloc(u64 size, u64 align) | 376 | u64 __init memblock_alloc(u64 size, u64 align) |
375 | { | 377 | { |
376 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); | 378 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
377 | } | 379 | } |
378 | 380 | ||
379 | u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) | 381 | u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) |
@@ -399,14 +401,9 @@ u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) | |||
399 | 401 | ||
400 | size = memblock_align_up(size, align); | 402 | size = memblock_align_up(size, align); |
401 | 403 | ||
402 | /* On some platforms, make sure we allocate lowmem */ | ||
403 | /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */ | ||
404 | if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) | ||
405 | max_addr = MEMBLOCK_REAL_LIMIT; | ||
406 | |||
407 | /* Pump up max_addr */ | 404 | /* Pump up max_addr */ |
408 | if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) | 405 | if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE) |
409 | max_addr = ~(u64)0; | 406 | max_addr = memblock.current_limit; |
410 | 407 | ||
411 | /* We do a top-down search, this tends to limit memory | 408 | /* We do a top-down search, this tends to limit memory |
412 | * fragmentation by keeping early boot allocs near the | 409 | * fragmentation by keeping early boot allocs near the |
@@ -527,3 +524,9 @@ int memblock_is_region_reserved(u64 base, u64 size) | |||
527 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; | 524 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; |
528 | } | 525 | } |
529 | 526 | ||
527 | |||
528 | void __init memblock_set_current_limit(u64 limit) | ||
529 | { | ||
530 | memblock.current_limit = limit; | ||
531 | } | ||
532 | |||