diff options
author | Kumar Gala <galak@kernel.crashing.org> | 2008-01-09 12:27:23 -0500 |
---|---|---|
committer | Kumar Gala <galak@kernel.crashing.org> | 2008-01-23 20:29:08 -0500 |
commit | f98eeb4eb1c52de89dcefeb538029bcecc6dd42d (patch) | |
tree | da91da9e329d35360ece38eb7f9fbcbc740cec63 /arch/powerpc/mm | |
parent | 52920df4aa9dd25836b8ed4dc0b177ea14c09e53 (diff) |
[POWERPC] Fix handling of memreserve if the range lands in highmem
There were several issues if a memreserve range existed and happened
to be in highmem:
* The bootmem allocator is only aware of lowmem so calling
reserve_bootmem with a highmem address would cause a BUG_ON
* All highmem pages were provided to the buddy allocator
Added a lmb_is_reserved() api that we now use to determine if a highem
page should continue to be PageReserved or provided to the buddy
allocator.
Also, we incorrectly reported the amount of pages reserved since all
highmem pages are initally marked reserved and we clear the
PageReserved flag as we "free" up the highmem pages.
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/lmb.c | 13 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 21 |
2 files changed, 32 insertions, 2 deletions
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c index 8f4d2dc4cafb..4ce23bcf8a57 100644 --- a/arch/powerpc/mm/lmb.c +++ b/arch/powerpc/mm/lmb.c | |||
@@ -342,3 +342,16 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit) | |||
342 | } | 342 | } |
343 | } | 343 | } |
344 | } | 344 | } |
345 | |||
346 | int __init lmb_is_reserved(unsigned long addr) | ||
347 | { | ||
348 | int i; | ||
349 | |||
350 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
351 | unsigned long upper = lmb.reserved.region[i].base + | ||
352 | lmb.reserved.region[i].size - 1; | ||
353 | if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) | ||
354 | return 1; | ||
355 | } | ||
356 | return 0; | ||
357 | } | ||
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 5402fb6b3aae..e8122447f019 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -213,15 +213,30 @@ void __init do_init_bootmem(void) | |||
213 | */ | 213 | */ |
214 | #ifdef CONFIG_HIGHMEM | 214 | #ifdef CONFIG_HIGHMEM |
215 | free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT); | 215 | free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT); |
216 | |||
217 | /* reserve the sections we're already using */ | ||
218 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
219 | unsigned long addr = lmb.reserved.region[i].base + | ||
220 | lmb_size_bytes(&lmb.reserved, i) - 1; | ||
221 | if (addr < total_lowmem) | ||
222 | reserve_bootmem(lmb.reserved.region[i].base, | ||
223 | lmb_size_bytes(&lmb.reserved, i)); | ||
224 | else if (lmb.reserved.region[i].base < total_lowmem) { | ||
225 | unsigned long adjusted_size = total_lowmem - | ||
226 | lmb.reserved.region[i].base; | ||
227 | reserve_bootmem(lmb.reserved.region[i].base, | ||
228 | adjusted_size); | ||
229 | } | ||
230 | } | ||
216 | #else | 231 | #else |
217 | free_bootmem_with_active_regions(0, max_pfn); | 232 | free_bootmem_with_active_regions(0, max_pfn); |
218 | #endif | ||
219 | 233 | ||
220 | /* reserve the sections we're already using */ | 234 | /* reserve the sections we're already using */ |
221 | for (i = 0; i < lmb.reserved.cnt; i++) | 235 | for (i = 0; i < lmb.reserved.cnt; i++) |
222 | reserve_bootmem(lmb.reserved.region[i].base, | 236 | reserve_bootmem(lmb.reserved.region[i].base, |
223 | lmb_size_bytes(&lmb.reserved, i)); | 237 | lmb_size_bytes(&lmb.reserved, i)); |
224 | 238 | ||
239 | #endif | ||
225 | /* XXX need to clip this if using highmem? */ | 240 | /* XXX need to clip this if using highmem? */ |
226 | sparse_memory_present_with_active_regions(0); | 241 | sparse_memory_present_with_active_regions(0); |
227 | 242 | ||
@@ -334,11 +349,13 @@ void __init mem_init(void) | |||
334 | highmem_mapnr = total_lowmem >> PAGE_SHIFT; | 349 | highmem_mapnr = total_lowmem >> PAGE_SHIFT; |
335 | for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { | 350 | for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { |
336 | struct page *page = pfn_to_page(pfn); | 351 | struct page *page = pfn_to_page(pfn); |
337 | 352 | if (lmb_is_reserved(pfn << PAGE_SHIFT)) | |
353 | continue; | ||
338 | ClearPageReserved(page); | 354 | ClearPageReserved(page); |
339 | init_page_count(page); | 355 | init_page_count(page); |
340 | __free_page(page); | 356 | __free_page(page); |
341 | totalhigh_pages++; | 357 | totalhigh_pages++; |
358 | reservedpages--; | ||
342 | } | 359 | } |
343 | totalram_pages += totalhigh_pages; | 360 | totalram_pages += totalhigh_pages; |
344 | printk(KERN_DEBUG "High memory: %luk\n", | 361 | printk(KERN_DEBUG "High memory: %luk\n", |