aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/mem.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-11-15 19:43:26 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-15 19:57:12 -0500
commitfb6d73d3014babb69f5cc2d1d78b31e9d09fc5df (patch)
tree9d8ee68b8754fc57948098945113871aee0b23bb /arch/powerpc/mm/mem.c
parent1e185b97b4364063f1135604b87f8d8469944233 (diff)
[PATCH] powerpc: Fix sparsemem with memory holes [was Re: ppc64 oops..]
This patch should fix the crashes we have been seeing on 64-bit powerpc systems with a memory hole when sparsemem is enabled. I'd appreciate it if people who know more about NUMA and sparsemem than me could look over it. There were two bugs. The first was that if NUMA was enabled but there was no NUMA information for the machine, the setup_nonnuma() function was adding a single region, assuming memory was contiguous. The second was that the loops in mem_init() and show_mem() assumed that all pages within the span of a pgdat were valid (had a valid struct page). I also fixed the incorrect setting of num_physpages that Mike Kravetz pointed out. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/powerpc/mm/mem.c')
-rw-r--r--arch/powerpc/mm/mem.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index e2c95fcb8055..4bd7b0a70996 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -200,6 +200,8 @@ void show_mem(void)
200 unsigned long flags; 200 unsigned long flags;
201 pgdat_resize_lock(pgdat, &flags); 201 pgdat_resize_lock(pgdat, &flags);
202 for (i = 0; i < pgdat->node_spanned_pages; i++) { 202 for (i = 0; i < pgdat->node_spanned_pages; i++) {
203 if (!pfn_valid(pgdat->node_start_pfn + i))
204 continue;
203 page = pgdat_page_nr(pgdat, i); 205 page = pgdat_page_nr(pgdat, i);
204 total++; 206 total++;
205 if (PageHighMem(page)) 207 if (PageHighMem(page))
@@ -336,7 +338,7 @@ void __init mem_init(void)
336 struct page *page; 338 struct page *page;
337 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; 339 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
338 340
339 num_physpages = max_pfn; /* RAM is assumed contiguous */ 341 num_physpages = lmb.memory.size >> PAGE_SHIFT;
340 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 342 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
341 343
342#ifdef CONFIG_NEED_MULTIPLE_NODES 344#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -348,11 +350,13 @@ void __init mem_init(void)
348 } 350 }
349 } 351 }
350#else 352#else
351 max_mapnr = num_physpages; 353 max_mapnr = max_pfn;
352 totalram_pages += free_all_bootmem(); 354 totalram_pages += free_all_bootmem();
353#endif 355#endif
354 for_each_pgdat(pgdat) { 356 for_each_pgdat(pgdat) {
355 for (i = 0; i < pgdat->node_spanned_pages; i++) { 357 for (i = 0; i < pgdat->node_spanned_pages; i++) {
358 if (!pfn_valid(pgdat->node_start_pfn + i))
359 continue;
356 page = pgdat_page_nr(pgdat, i); 360 page = pgdat_page_nr(pgdat, i);
357 if (PageReserved(page)) 361 if (PageReserved(page))
358 reservedpages++; 362 reservedpages++;