diff options
author | Mel Gorman <mel@csn.ul.ie> | 2008-07-24 00:26:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-24 13:47:13 -0400 |
commit | 6b74ab97bc12ce74acec900f1d89a4aee2e4d70d (patch) | |
tree | d9d7b522a4a8f5f605d2e0f7f7a1bcb9d5049a82 /mm/page_alloc.c | |
parent | 9483a578df27fe7603605d565eefe039c1ba5845 (diff) |
mm: add a basic debugging framework for memory initialisation
Boot initialisation is very complex, with significant numbers of
architecture-specific routines, hooks and code ordering. While significant
amounts of the initialisation is architecture-independent, it trusts the data
received from the architecture layer. This is a mistake, and has resulted in
a number of difficult-to-diagnose bugs.
This patchset adds some validation and tracing to memory initialisation. It
also introduces a few basic defensive measures. The validation code can be
explicitly disabled for embedded systems.
This patch:
Add additional debugging and verification code for memory initialisation.
Once enabled, the verification checks are always run and when required
additional debugging information may be outputted via a mminit_loglevel=
command-line parameter.
The verification code is placed in a new file mm/mm_init.c. Ideally other mm
initialisation code will be moved here over time.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 79ac4afc908c..0908352ba727 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2975,7 +2975,8 @@ void __init sparse_memory_present_with_active_regions(int nid) | |||
2975 | void __init push_node_boundaries(unsigned int nid, | 2975 | void __init push_node_boundaries(unsigned int nid, |
2976 | unsigned long start_pfn, unsigned long end_pfn) | 2976 | unsigned long start_pfn, unsigned long end_pfn) |
2977 | { | 2977 | { |
2978 | printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", | 2978 | mminit_dprintk(MMINIT_TRACE, "zoneboundary", |
2979 | "Entering push_node_boundaries(%u, %lu, %lu)\n", | ||
2979 | nid, start_pfn, end_pfn); | 2980 | nid, start_pfn, end_pfn); |
2980 | 2981 | ||
2981 | /* Initialise the boundary for this node if necessary */ | 2982 | /* Initialise the boundary for this node if necessary */ |
@@ -2993,7 +2994,8 @@ void __init push_node_boundaries(unsigned int nid, | |||
2993 | static void __meminit account_node_boundary(unsigned int nid, | 2994 | static void __meminit account_node_boundary(unsigned int nid, |
2994 | unsigned long *start_pfn, unsigned long *end_pfn) | 2995 | unsigned long *start_pfn, unsigned long *end_pfn) |
2995 | { | 2996 | { |
2996 | printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", | 2997 | mminit_dprintk(MMINIT_TRACE, "zoneboundary", |
2998 | "Entering account_node_boundary(%u, %lu, %lu)\n", | ||
2997 | nid, *start_pfn, *end_pfn); | 2999 | nid, *start_pfn, *end_pfn); |
2998 | 3000 | ||
2999 | /* Return if boundary information has not been provided */ | 3001 | /* Return if boundary information has not been provided */ |
@@ -3368,8 +3370,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
3368 | PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; | 3370 | PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; |
3369 | if (realsize >= memmap_pages) { | 3371 | if (realsize >= memmap_pages) { |
3370 | realsize -= memmap_pages; | 3372 | realsize -= memmap_pages; |
3371 | printk(KERN_DEBUG | 3373 | mminit_dprintk(MMINIT_TRACE, "memmap_init", |
3372 | " %s zone: %lu pages used for memmap\n", | 3374 | "%s zone: %lu pages used for memmap\n", |
3373 | zone_names[j], memmap_pages); | 3375 | zone_names[j], memmap_pages); |
3374 | } else | 3376 | } else |
3375 | printk(KERN_WARNING | 3377 | printk(KERN_WARNING |
@@ -3379,7 +3381,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
3379 | /* Account for reserved pages */ | 3381 | /* Account for reserved pages */ |
3380 | if (j == 0 && realsize > dma_reserve) { | 3382 | if (j == 0 && realsize > dma_reserve) { |
3381 | realsize -= dma_reserve; | 3383 | realsize -= dma_reserve; |
3382 | printk(KERN_DEBUG " %s zone: %lu pages reserved\n", | 3384 | mminit_dprintk(MMINIT_TRACE, "memmap_init", |
3385 | "%s zone: %lu pages reserved\n", | ||
3383 | zone_names[0], dma_reserve); | 3386 | zone_names[0], dma_reserve); |
3384 | } | 3387 | } |
3385 | 3388 | ||
@@ -3520,10 +3523,11 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn, | |||
3520 | { | 3523 | { |
3521 | int i; | 3524 | int i; |
3522 | 3525 | ||
3523 | printk(KERN_DEBUG "Entering add_active_range(%d, %#lx, %#lx) " | 3526 | mminit_dprintk(MMINIT_TRACE, "memory_register", |
3524 | "%d entries of %d used\n", | 3527 | "Entering add_active_range(%d, %#lx, %#lx) " |
3525 | nid, start_pfn, end_pfn, | 3528 | "%d entries of %d used\n", |
3526 | nr_nodemap_entries, MAX_ACTIVE_REGIONS); | 3529 | nid, start_pfn, end_pfn, |
3530 | nr_nodemap_entries, MAX_ACTIVE_REGIONS); | ||
3527 | 3531 | ||
3528 | /* Merge with existing active regions if possible */ | 3532 | /* Merge with existing active regions if possible */ |
3529 | for (i = 0; i < nr_nodemap_entries; i++) { | 3533 | for (i = 0; i < nr_nodemap_entries; i++) { |