aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-02-16 11:11:09 -0500
committerTejun Heo <tj@kernel.org>2011-02-16 11:11:09 -0500
commit56e827fbde9a3cb886a2fe138db0d99e98efbfb1 (patch)
tree1a5eb3631d847669179ffd97b5274dd70405c931 /arch
parent2e756be44714d0ec2f9827e4f4797c60876167a1 (diff)
x86-64, NUMA: consolidate and improve memblk sanity checks
memblk sanity check was scattered around and incomplete. Consolidate and improve. * Confliction detection and cutoff_node() logic are moved to numa_cleanup_meminfo(). * numa_cleanup_meminfo() clears the unused memblks before returning. * Check and warn about invalid input parameters in numa_add_memblk(). * Check the maximum number of memblk isn't exceeded in numa_add_memblk(). * numa_cleanup_meminfo() is now called before numa_emulation() so that the emulation code also uses the cleaned up version. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Brian Gerst <brgerst@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Shaohui Zheng <shaohui.zheng@intel.com> Cc: David Rientjes <rientjes@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/numa_64.c99
1 files changed, 49 insertions, 50 deletions
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 4fd3368adc8f..20aa1d31e165 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -189,37 +189,23 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
189 return NULL; 189 return NULL;
190} 190}
191 191
192static __init int conflicting_memblks(unsigned long start, unsigned long end) 192int __init numa_add_memblk(int nid, u64 start, u64 end)
193{ 193{
194 struct numa_meminfo *mi = &numa_meminfo; 194 struct numa_meminfo *mi = &numa_meminfo;
195 int i;
196 195
197 for (i = 0; i < mi->nr_blks; i++) { 196 /* ignore zero length blks */
198 struct numa_memblk *blk = &mi->blk[i]; 197 if (start == end)
198 return 0;
199 199
200 if (blk->start == blk->end) 200 /* whine about and ignore invalid blks */
201 continue; 201 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
202 if (blk->end > start && blk->start < end) 202 pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
203 return blk->nid; 203 nid, start, end);
204 if (blk->end == end && blk->start == start) 204 return 0;
205 return blk->nid;
206 } 205 }
207 return -1;
208}
209
210int __init numa_add_memblk(int nid, u64 start, u64 end)
211{
212 struct numa_meminfo *mi = &numa_meminfo;
213 int i;
214 206
215 i = conflicting_memblks(start, end); 207 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
216 if (i == nid) { 208 pr_err("NUMA: too many memblk ranges\n");
217 printk(KERN_WARNING "NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
218 nid, start, end, numa_nodes[i].start, numa_nodes[i].end);
219 } else if (i >= 0) {
220 printk(KERN_ERR "NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
221 nid, start, end, i,
222 numa_nodes[i].start, numa_nodes[i].end);
223 return -EINVAL; 209 return -EINVAL;
224 } 210 }
225 211
@@ -237,22 +223,6 @@ static void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
237 (mi->nr_blks - idx) * sizeof(mi->blk[0])); 223 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
238} 224}
239 225
240static __init void cutoff_node(int i, unsigned long start, unsigned long end)
241{
242 struct bootnode *nd = &numa_nodes[i];
243
244 if (nd->start < start) {
245 nd->start = start;
246 if (nd->end < nd->start)
247 nd->start = nd->end;
248 }
249 if (nd->end > end) {
250 nd->end = end;
251 if (nd->start > nd->end)
252 nd->start = nd->end;
253 }
254}
255
256/* Initialize bootmem allocator for a node */ 226/* Initialize bootmem allocator for a node */
257void __init 227void __init
258setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) 228setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
@@ -301,24 +271,53 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
301 271
302static int __init numa_cleanup_meminfo(struct numa_meminfo *mi) 272static int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
303{ 273{
274 const u64 low = 0;
275 const u64 high = (u64)max_pfn << PAGE_SHIFT;
304 int i, j, k; 276 int i, j, k;
305 277
306 for (i = 0; i < mi->nr_blks; i++) { 278 for (i = 0; i < mi->nr_blks; i++) {
307 struct numa_memblk *bi = &mi->blk[i]; 279 struct numa_memblk *bi = &mi->blk[i];
308 280
281 /* make sure all blocks are inside the limits */
282 bi->start = max(bi->start, low);
283 bi->end = min(bi->end, high);
284
285 /* and there's no empty block */
286 if (bi->start == bi->end) {
287 numa_remove_memblk_from(i--, mi);
288 continue;
289 }
290
309 for (j = i + 1; j < mi->nr_blks; j++) { 291 for (j = i + 1; j < mi->nr_blks; j++) {
310 struct numa_memblk *bj = &mi->blk[j]; 292 struct numa_memblk *bj = &mi->blk[j];
311 unsigned long start, end; 293 unsigned long start, end;
312 294
313 /* 295 /*
296 * See whether there are overlapping blocks. Whine
297 * about but allow overlaps of the same nid. They
298 * will be merged below.
299 */
300 if (bi->end > bj->start && bi->start < bj->end) {
301 if (bi->nid != bj->nid) {
302 pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
303 bi->nid, bi->start, bi->end,
304 bj->nid, bj->start, bj->end);
305 return -EINVAL;
306 }
307 pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
308 bi->nid, bi->start, bi->end,
309 bj->start, bj->end);
310 }
311
312 /*
314 * Join together blocks on the same node, holes 313 * Join together blocks on the same node, holes
315 * between which don't overlap with memory on other 314 * between which don't overlap with memory on other
316 * nodes. 315 * nodes.
317 */ 316 */
318 if (bi->nid != bj->nid) 317 if (bi->nid != bj->nid)
319 continue; 318 continue;
320 start = min(bi->start, bj->start); 319 start = max(min(bi->start, bj->start), low);
321 end = max(bi->end, bj->end); 320 end = min(max(bi->end, bj->end), high);
322 for (k = 0; k < mi->nr_blks; k++) { 321 for (k = 0; k < mi->nr_blks; k++) {
323 struct numa_memblk *bk = &mi->blk[k]; 322 struct numa_memblk *bk = &mi->blk[k];
324 323
@@ -338,6 +337,11 @@ static int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
338 } 337 }
339 } 338 }
340 339
340 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
341 mi->blk[i].start = mi->blk[i].end = 0;
342 mi->blk[i].nid = NUMA_NO_NODE;
343 }
344
341 return 0; 345 return 0;
342} 346}
343 347
@@ -824,10 +828,8 @@ void __init initmem_init(void)
824 if (numa_init[i]() < 0) 828 if (numa_init[i]() < 0)
825 continue; 829 continue;
826 830
827 /* clean up the node list */ 831 if (numa_cleanup_meminfo(&numa_meminfo) < 0)
828 for (j = 0; j < MAX_NUMNODES; j++) 832 continue;
829 cutoff_node(j, 0, max_pfn << PAGE_SHIFT);
830
831#ifdef CONFIG_NUMA_EMU 833#ifdef CONFIG_NUMA_EMU
832 setup_physnodes(0, max_pfn << PAGE_SHIFT); 834 setup_physnodes(0, max_pfn << PAGE_SHIFT);
833 if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1)) 835 if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1))
@@ -836,9 +838,6 @@ void __init initmem_init(void)
836 nodes_clear(node_possible_map); 838 nodes_clear(node_possible_map);
837 nodes_clear(node_online_map); 839 nodes_clear(node_online_map);
838#endif 840#endif
839 if (numa_cleanup_meminfo(&numa_meminfo) < 0)
840 continue;
841
842 if (numa_register_memblks(&numa_meminfo) < 0) 841 if (numa_register_memblks(&numa_meminfo) < 0)
843 continue; 842 continue;
844 843