aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/mem.c
diff options
context:
space:
mode:
authorKumar Gala <galak@kernel.crashing.org>2008-01-09 12:27:23 -0500
committerKumar Gala <galak@kernel.crashing.org>2008-01-23 20:29:08 -0500
commitf98eeb4eb1c52de89dcefeb538029bcecc6dd42d (patch)
treeda91da9e329d35360ece38eb7f9fbcbc740cec63 /arch/powerpc/mm/mem.c
parent52920df4aa9dd25836b8ed4dc0b177ea14c09e53 (diff)
[POWERPC] Fix handling of memreserve if the range lands in highmem
There were several issues if a memreserve range existed and happened to be in highmem: * The bootmem allocator is only aware of lowmem so calling reserve_bootmem with a highmem address would cause a BUG_ON * All highmem pages were provided to the buddy allocator Added a lmb_is_reserved() api that we now use to determine if a highem page should continue to be PageReserved or provided to the buddy allocator. Also, we incorrectly reported the amount of pages reserved since all highmem pages are initally marked reserved and we clear the PageReserved flag as we "free" up the highmem pages. Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/mem.c')
-rw-r--r--arch/powerpc/mm/mem.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 5402fb6b3aae..e8122447f019 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -213,15 +213,30 @@ void __init do_init_bootmem(void)
213 */ 213 */
214#ifdef CONFIG_HIGHMEM 214#ifdef CONFIG_HIGHMEM
215 free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT); 215 free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
216
217 /* reserve the sections we're already using */
218 for (i = 0; i < lmb.reserved.cnt; i++) {
219 unsigned long addr = lmb.reserved.region[i].base +
220 lmb_size_bytes(&lmb.reserved, i) - 1;
221 if (addr < total_lowmem)
222 reserve_bootmem(lmb.reserved.region[i].base,
223 lmb_size_bytes(&lmb.reserved, i));
224 else if (lmb.reserved.region[i].base < total_lowmem) {
225 unsigned long adjusted_size = total_lowmem -
226 lmb.reserved.region[i].base;
227 reserve_bootmem(lmb.reserved.region[i].base,
228 adjusted_size);
229 }
230 }
216#else 231#else
217 free_bootmem_with_active_regions(0, max_pfn); 232 free_bootmem_with_active_regions(0, max_pfn);
218#endif
219 233
220 /* reserve the sections we're already using */ 234 /* reserve the sections we're already using */
221 for (i = 0; i < lmb.reserved.cnt; i++) 235 for (i = 0; i < lmb.reserved.cnt; i++)
222 reserve_bootmem(lmb.reserved.region[i].base, 236 reserve_bootmem(lmb.reserved.region[i].base,
223 lmb_size_bytes(&lmb.reserved, i)); 237 lmb_size_bytes(&lmb.reserved, i));
224 238
239#endif
225 /* XXX need to clip this if using highmem? */ 240 /* XXX need to clip this if using highmem? */
226 sparse_memory_present_with_active_regions(0); 241 sparse_memory_present_with_active_regions(0);
227 242
@@ -334,11 +349,13 @@ void __init mem_init(void)
334 highmem_mapnr = total_lowmem >> PAGE_SHIFT; 349 highmem_mapnr = total_lowmem >> PAGE_SHIFT;
335 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 350 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
336 struct page *page = pfn_to_page(pfn); 351 struct page *page = pfn_to_page(pfn);
337 352 if (lmb_is_reserved(pfn << PAGE_SHIFT))
353 continue;
338 ClearPageReserved(page); 354 ClearPageReserved(page);
339 init_page_count(page); 355 init_page_count(page);
340 __free_page(page); 356 __free_page(page);
341 totalhigh_pages++; 357 totalhigh_pages++;
358 reservedpages--;
342 } 359 }
343 totalram_pages += totalhigh_pages; 360 totalram_pages += totalhigh_pages;
344 printk(KERN_DEBUG "High memory: %luk\n", 361 printk(KERN_DEBUG "High memory: %luk\n",