aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2008-07-24 00:26:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 13:47:13 -0400
commit6b74ab97bc12ce74acec900f1d89a4aee2e4d70d (patch)
treed9d7b522a4a8f5f605d2e0f7f7a1bcb9d5049a82 /mm
parent9483a578df27fe7603605d565eefe039c1ba5845 (diff)
mm: add a basic debugging framework for memory initialisation
Boot initialisation is very complex, with significant numbers of architecture-specific routines, hooks and code ordering. While significant amounts of the initialisation is architecture-independent, it trusts the data received from the architecture layer. This is a mistake, and has resulted in a number of difficult-to-diagnose bugs. This patchset adds some validation and tracing to memory initialisation. It also introduces a few basic defensive measures. The validation code can be explicitly disabled for embedded systems. This patch: Add additional debugging and verification code for memory initialisation. Once enabled, the verification checks are always run and when required additional debugging information may be outputted via a mminit_loglevel= command-line parameter. The verification code is placed in a new file mm/mm_init.c. Ideally other mm initialisation code will be moved here over time. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile1
-rw-r--r--mm/internal.h27
-rw-r--r--mm/mm_init.c18
-rw-r--r--mm/page_alloc.c22
4 files changed, 59 insertions, 9 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 18c143b3c46c..4bbc8f094ff0 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o
26obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o 26obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o
27obj-$(CONFIG_SLOB) += slob.o 27obj-$(CONFIG_SLOB) += slob.o
28obj-$(CONFIG_SLAB) += slab.o 28obj-$(CONFIG_SLAB) += slab.o
29obj-$(CONFIG_DEBUG_MEMORY_INIT) += mm_init.o
29obj-$(CONFIG_SLUB) += slub.o 30obj-$(CONFIG_SLUB) += slub.o
30obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 31obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
31obj-$(CONFIG_FS_XIP) += filemap_xip.o 32obj-$(CONFIG_FS_XIP) += filemap_xip.o
diff --git a/mm/internal.h b/mm/internal.h
index 0034e947e4bc..a7ee05253294 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -59,4 +59,31 @@ static inline unsigned long page_order(struct page *page)
59#define __paginginit __init 59#define __paginginit __init
60#endif 60#endif
61 61
62/* Memory initialisation debug and verification */
63enum mminit_level {
64 MMINIT_WARNING,
65 MMINIT_VERIFY,
66 MMINIT_TRACE
67};
68
69#ifdef CONFIG_DEBUG_MEMORY_INIT
70
71extern int mminit_loglevel;
72
73#define mminit_dprintk(level, prefix, fmt, arg...) \
74do { \
75 if (level < mminit_loglevel) { \
76 printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
77 printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
78 } \
79} while (0)
80
81#else
82
83static inline void mminit_dprintk(enum mminit_level level,
84 const char *prefix, const char *fmt, ...)
85{
86}
87
88#endif /* CONFIG_DEBUG_MEMORY_INIT */
62#endif 89#endif
diff --git a/mm/mm_init.c b/mm/mm_init.c
new file mode 100644
index 000000000000..c01d8dfec817
--- /dev/null
+++ b/mm/mm_init.c
@@ -0,0 +1,18 @@
1/*
2 * mm_init.c - Memory initialisation verification and debugging
3 *
4 * Copyright 2008 IBM Corporation, 2008
5 * Author Mel Gorman <mel@csn.ul.ie>
6 *
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10
11int __meminitdata mminit_loglevel;
12
13static __init int set_mminit_loglevel(char *str)
14{
15 get_option(&str, &mminit_loglevel);
16 return 0;
17}
18early_param("mminit_loglevel", set_mminit_loglevel);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 79ac4afc908c..0908352ba727 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2975,7 +2975,8 @@ void __init sparse_memory_present_with_active_regions(int nid)
2975void __init push_node_boundaries(unsigned int nid, 2975void __init push_node_boundaries(unsigned int nid,
2976 unsigned long start_pfn, unsigned long end_pfn) 2976 unsigned long start_pfn, unsigned long end_pfn)
2977{ 2977{
2978 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", 2978 mminit_dprintk(MMINIT_TRACE, "zoneboundary",
2979 "Entering push_node_boundaries(%u, %lu, %lu)\n",
2979 nid, start_pfn, end_pfn); 2980 nid, start_pfn, end_pfn);
2980 2981
2981 /* Initialise the boundary for this node if necessary */ 2982 /* Initialise the boundary for this node if necessary */
@@ -2993,7 +2994,8 @@ void __init push_node_boundaries(unsigned int nid,
2993static void __meminit account_node_boundary(unsigned int nid, 2994static void __meminit account_node_boundary(unsigned int nid,
2994 unsigned long *start_pfn, unsigned long *end_pfn) 2995 unsigned long *start_pfn, unsigned long *end_pfn)
2995{ 2996{
2996 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", 2997 mminit_dprintk(MMINIT_TRACE, "zoneboundary",
2998 "Entering account_node_boundary(%u, %lu, %lu)\n",
2997 nid, *start_pfn, *end_pfn); 2999 nid, *start_pfn, *end_pfn);
2998 3000
2999 /* Return if boundary information has not been provided */ 3001 /* Return if boundary information has not been provided */
@@ -3368,8 +3370,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3368 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; 3370 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3369 if (realsize >= memmap_pages) { 3371 if (realsize >= memmap_pages) {
3370 realsize -= memmap_pages; 3372 realsize -= memmap_pages;
3371 printk(KERN_DEBUG 3373 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3372 " %s zone: %lu pages used for memmap\n", 3374 "%s zone: %lu pages used for memmap\n",
3373 zone_names[j], memmap_pages); 3375 zone_names[j], memmap_pages);
3374 } else 3376 } else
3375 printk(KERN_WARNING 3377 printk(KERN_WARNING
@@ -3379,7 +3381,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3379 /* Account for reserved pages */ 3381 /* Account for reserved pages */
3380 if (j == 0 && realsize > dma_reserve) { 3382 if (j == 0 && realsize > dma_reserve) {
3381 realsize -= dma_reserve; 3383 realsize -= dma_reserve;
3382 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 3384 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3385 "%s zone: %lu pages reserved\n",
3383 zone_names[0], dma_reserve); 3386 zone_names[0], dma_reserve);
3384 } 3387 }
3385 3388
@@ -3520,10 +3523,11 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3520{ 3523{
3521 int i; 3524 int i;
3522 3525
3523 printk(KERN_DEBUG "Entering add_active_range(%d, %#lx, %#lx) " 3526 mminit_dprintk(MMINIT_TRACE, "memory_register",
3524 "%d entries of %d used\n", 3527 "Entering add_active_range(%d, %#lx, %#lx) "
3525 nid, start_pfn, end_pfn, 3528 "%d entries of %d used\n",
3526 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 3529 nid, start_pfn, end_pfn,
3530 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3527 3531
3528 /* Merge with existing active regions if possible */ 3532 /* Merge with existing active regions if possible */
3529 for (i = 0; i < nr_nodemap_entries; i++) { 3533 for (i = 0; i < nr_nodemap_entries; i++) {