aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2008-07-24 00:26:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 13:47:13 -0400
commit2dbb51c49f4fecb8330e43247a0edfbc4b2b8974 (patch)
tree507f06335dd1e66144f176e8e863743895e43d79 /mm
parent708614e6180f398cd307ea0048d48ba6fa274610 (diff)
mm: make defensive checks around PFN values registered for memory usage
There are a number of different views to how much memory is currently active. There is the arch-independent zone-sizing view, the bootmem allocator and memory models view. Architectures register this information at different times and is not necessarily in sync particularly with respect to some SPARSEMEM limitations. This patch introduces mminit_validate_memmodel_limits() which is able to validate and correct PFN ranges with respect to the memory model. It is only SPARSEMEM that currently validates itself. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c1
-rw-r--r--mm/internal.h12
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/sparse.c37
4 files changed, 44 insertions, 8 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 8d9f60e06f62..9f4bbc5da73f 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -91,6 +91,7 @@ static unsigned long __init init_bootmem_core(pg_data_t *pgdat,
91 bootmem_data_t *bdata = pgdat->bdata; 91 bootmem_data_t *bdata = pgdat->bdata;
92 unsigned long mapsize; 92 unsigned long mapsize;
93 93
94 mminit_validate_memmodel_limits(&start, &end);
94 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart)); 95 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
95 bdata->node_boot_start = PFN_PHYS(start); 96 bdata->node_boot_start = PFN_PHYS(start);
96 bdata->node_low_pfn = end; 97 bdata->node_low_pfn = end;
diff --git a/mm/internal.h b/mm/internal.h
index 7a4a2885dc8e..5d17f3efac41 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -98,4 +98,16 @@ static inline void mminit_verify_page_links(struct page *page,
98{ 98{
99} 99}
100#endif /* CONFIG_DEBUG_MEMORY_INIT */ 100#endif /* CONFIG_DEBUG_MEMORY_INIT */
101
102/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
103#if defined(CONFIG_SPARSEMEM)
104extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
105 unsigned long *end_pfn);
106#else
107static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
108 unsigned long *end_pfn)
109{
110}
111#endif /* CONFIG_SPARSEMEM */
112
101#endif 113#endif
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index acab6ad326df..0adb66e711e6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3536,6 +3536,8 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3536 nid, start_pfn, end_pfn, 3536 nid, start_pfn, end_pfn,
3537 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 3537 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3538 3538
3539 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3540
3539 /* Merge with existing active regions if possible */ 3541 /* Merge with existing active regions if possible */
3540 for (i = 0; i < nr_nodemap_entries; i++) { 3542 for (i = 0; i < nr_nodemap_entries; i++) {
3541 if (early_node_map[i].nid != nid) 3543 if (early_node_map[i].nid != nid)
diff --git a/mm/sparse.c b/mm/sparse.c
index 36511c7b5e2c..7a3650923d9a 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -12,6 +12,7 @@
12#include <asm/dma.h> 12#include <asm/dma.h>
13#include <asm/pgalloc.h> 13#include <asm/pgalloc.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include "internal.h"
15 16
16/* 17/*
17 * Permanent SPARSEMEM data: 18 * Permanent SPARSEMEM data:
@@ -147,22 +148,41 @@ static inline int sparse_early_nid(struct mem_section *section)
147 return (section->section_mem_map >> SECTION_NID_SHIFT); 148 return (section->section_mem_map >> SECTION_NID_SHIFT);
148} 149}
149 150
150/* Record a memory area against a node. */ 151/* Validate the physical addressing limitations of the model */
151void __init memory_present(int nid, unsigned long start, unsigned long end) 152void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
153 unsigned long *end_pfn)
152{ 154{
153 unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); 155 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
154 unsigned long pfn;
155 156
156 /* 157 /*
157 * Sanity checks - do not allow an architecture to pass 158 * Sanity checks - do not allow an architecture to pass
158 * in larger pfns than the maximum scope of sparsemem: 159 * in larger pfns than the maximum scope of sparsemem:
159 */ 160 */
160 if (start >= max_arch_pfn) 161 if (*start_pfn > max_sparsemem_pfn) {
161 return; 162 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
162 if (end >= max_arch_pfn) 163 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
163 end = max_arch_pfn; 164 *start_pfn, *end_pfn, max_sparsemem_pfn);
165 WARN_ON_ONCE(1);
166 *start_pfn = max_sparsemem_pfn;
167 *end_pfn = max_sparsemem_pfn;
168 }
169
170 if (*end_pfn > max_sparsemem_pfn) {
171 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
172 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
173 *start_pfn, *end_pfn, max_sparsemem_pfn);
174 WARN_ON_ONCE(1);
175 *end_pfn = max_sparsemem_pfn;
176 }
177}
178
179/* Record a memory area against a node. */
180void __init memory_present(int nid, unsigned long start, unsigned long end)
181{
182 unsigned long pfn;
164 183
165 start &= PAGE_SECTION_MASK; 184 start &= PAGE_SECTION_MASK;
185 mminit_validate_memmodel_limits(&start, &end);
166 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 186 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
167 unsigned long section = pfn_to_section_nr(pfn); 187 unsigned long section = pfn_to_section_nr(pfn);
168 struct mem_section *ms; 188 struct mem_section *ms;
@@ -187,6 +207,7 @@ unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
187 unsigned long pfn; 207 unsigned long pfn;
188 unsigned long nr_pages = 0; 208 unsigned long nr_pages = 0;
189 209
210 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
190 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 211 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
191 if (nid != early_pfn_to_nid(pfn)) 212 if (nid != early_pfn_to_nid(pfn))
192 continue; 213 continue;