diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2006-05-17 04:00:46 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-05-19 01:02:15 -0400 |
commit | 2babf5c2ec2f2d5de3e38d20f7df7fd815fd10c9 (patch) | |
tree | 9ecda21067fe36f36fbefae87141150b62c39acd /arch/powerpc/mm/lmb.c | |
parent | 846f77b08c8301682ded5ce127c56397327a60d0 (diff) |
[PATCH] powerpc: Unify mem= handling
We currently do mem= handling in three seperate places. And as benh pointed out
I wrote two of them. Now that we parse command line parameters earlier we can
clean this mess up.
Moving the parsing out of prom_init means the device tree might be allocated
above the memory limit. If that happens we'd have to move it. As it happens
we already have logic to do that for kdump, so just genericise it.
This also means we might have reserved regions above the memory limit, if we
do the bootmem allocator will blow up, so we have to modify
lmb_enforce_memory_limit() to truncate the reserves as well.
Tested on P5 LPAR, iSeries, F50, 44p. Tested moving device tree on P5 and
44p and F50.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/lmb.c')
-rw-r--r-- | arch/powerpc/mm/lmb.c | 43 |
1 files changed, 32 insertions, 11 deletions
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c index 417d58518558..8b6f522655a6 100644 --- a/arch/powerpc/mm/lmb.c +++ b/arch/powerpc/mm/lmb.c | |||
@@ -89,20 +89,25 @@ static long __init lmb_regions_adjacent(struct lmb_region *rgn, | |||
89 | return lmb_addrs_adjacent(base1, size1, base2, size2); | 89 | return lmb_addrs_adjacent(base1, size1, base2, size2); |
90 | } | 90 | } |
91 | 91 | ||
92 | /* Assumption: base addr of region 1 < base addr of region 2 */ | 92 | static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) |
93 | static void __init lmb_coalesce_regions(struct lmb_region *rgn, | ||
94 | unsigned long r1, unsigned long r2) | ||
95 | { | 93 | { |
96 | unsigned long i; | 94 | unsigned long i; |
97 | 95 | ||
98 | rgn->region[r1].size += rgn->region[r2].size; | 96 | for (i = r; i < rgn->cnt - 1; i++) { |
99 | for (i=r2; i < rgn->cnt-1; i++) { | 97 | rgn->region[i].base = rgn->region[i + 1].base; |
100 | rgn->region[i].base = rgn->region[i+1].base; | 98 | rgn->region[i].size = rgn->region[i + 1].size; |
101 | rgn->region[i].size = rgn->region[i+1].size; | ||
102 | } | 99 | } |
103 | rgn->cnt--; | 100 | rgn->cnt--; |
104 | } | 101 | } |
105 | 102 | ||
103 | /* Assumption: base addr of region 1 < base addr of region 2 */ | ||
104 | static void __init lmb_coalesce_regions(struct lmb_region *rgn, | ||
105 | unsigned long r1, unsigned long r2) | ||
106 | { | ||
107 | rgn->region[r1].size += rgn->region[r2].size; | ||
108 | lmb_remove_region(rgn, r2); | ||
109 | } | ||
110 | |||
106 | /* This routine called with relocation disabled. */ | 111 | /* This routine called with relocation disabled. */ |
107 | void __init lmb_init(void) | 112 | void __init lmb_init(void) |
108 | { | 113 | { |
@@ -294,17 +299,16 @@ unsigned long __init lmb_end_of_DRAM(void) | |||
294 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); | 299 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); |
295 | } | 300 | } |
296 | 301 | ||
297 | /* | 302 | /* You must call lmb_analyze() after this. */ |
298 | * Truncate the lmb list to memory_limit if it's set | ||
299 | * You must call lmb_analyze() after this. | ||
300 | */ | ||
301 | void __init lmb_enforce_memory_limit(unsigned long memory_limit) | 303 | void __init lmb_enforce_memory_limit(unsigned long memory_limit) |
302 | { | 304 | { |
303 | unsigned long i, limit; | 305 | unsigned long i, limit; |
306 | struct lmb_property *p; | ||
304 | 307 | ||
305 | if (! memory_limit) | 308 | if (! memory_limit) |
306 | return; | 309 | return; |
307 | 310 | ||
311 | /* Truncate the lmb regions to satisfy the memory limit. */ | ||
308 | limit = memory_limit; | 312 | limit = memory_limit; |
309 | for (i = 0; i < lmb.memory.cnt; i++) { | 313 | for (i = 0; i < lmb.memory.cnt; i++) { |
310 | if (limit > lmb.memory.region[i].size) { | 314 | if (limit > lmb.memory.region[i].size) { |
@@ -316,4 +320,21 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit) | |||
316 | lmb.memory.cnt = i + 1; | 320 | lmb.memory.cnt = i + 1; |
317 | break; | 321 | break; |
318 | } | 322 | } |
323 | |||
324 | lmb.rmo_size = lmb.memory.region[0].size; | ||
325 | |||
326 | /* And truncate any reserves above the limit also. */ | ||
327 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
328 | p = &lmb.reserved.region[i]; | ||
329 | |||
330 | if (p->base > memory_limit) | ||
331 | p->size = 0; | ||
332 | else if ((p->base + p->size) > memory_limit) | ||
333 | p->size = memory_limit - p->base; | ||
334 | |||
335 | if (p->size == 0) { | ||
336 | lmb_remove_region(&lmb.reserved, i); | ||
337 | i--; | ||
338 | } | ||
339 | } | ||
319 | } | 340 | } |