aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/numa_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/numa_64.c')
-rw-r--r--arch/x86/mm/numa_64.c83
1 files changed, 46 insertions, 37 deletions
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index c3496e2b5a7..f2721de30a4 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -292,40 +292,8 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
292 node_set_online(nodeid); 292 node_set_online(nodeid);
293} 293}
294 294
295/* 295static int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
296 * Sanity check to catch more bad NUMA configurations (they are amazingly
297 * common). Make sure the nodes cover all memory.
298 */
299static int __init nodes_cover_memory(const struct bootnode *nodes)
300{ 296{
301 unsigned long numaram, e820ram;
302 int i;
303
304 numaram = 0;
305 for_each_node_mask(i, mem_nodes_parsed) {
306 unsigned long s = nodes[i].start >> PAGE_SHIFT;
307 unsigned long e = nodes[i].end >> PAGE_SHIFT;
308 numaram += e - s;
309 numaram -= __absent_pages_in_range(i, s, e);
310 if ((long)numaram < 0)
311 numaram = 0;
312 }
313
314 e820ram = max_pfn -
315 (memblock_x86_hole_size(0, max_pfn<<PAGE_SHIFT) >> PAGE_SHIFT);
316 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
317 if ((long)(e820ram - numaram) >= (1<<(20 - PAGE_SHIFT))) {
318 printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n",
319 (numaram << PAGE_SHIFT) >> 20,
320 (e820ram << PAGE_SHIFT) >> 20);
321 return 0;
322 }
323 return 1;
324}
325
326static int __init numa_register_memblks(void)
327{
328 struct numa_meminfo *mi = &numa_meminfo;
329 int i; 297 int i;
330 298
331 /* 299 /*
@@ -368,6 +336,49 @@ static int __init numa_register_memblks(void)
368 } 336 }
369 } 337 }
370 338
339 return 0;
340}
341
342/*
343 * Sanity check to catch more bad NUMA configurations (they are amazingly
344 * common). Make sure the nodes cover all memory.
345 */
346static int __init nodes_cover_memory(const struct bootnode *nodes)
347{
348 unsigned long numaram, e820ram;
349 int i;
350
351 numaram = 0;
352 for_each_node_mask(i, mem_nodes_parsed) {
353 unsigned long s = nodes[i].start >> PAGE_SHIFT;
354 unsigned long e = nodes[i].end >> PAGE_SHIFT;
355 numaram += e - s;
356 numaram -= __absent_pages_in_range(i, s, e);
357 if ((long)numaram < 0)
358 numaram = 0;
359 }
360
361 e820ram = max_pfn - (memblock_x86_hole_size(0,
362 max_pfn << PAGE_SHIFT) >> PAGE_SHIFT);
363 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
364 if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
365 printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n",
366 (numaram << PAGE_SHIFT) >> 20,
367 (e820ram << PAGE_SHIFT) >> 20);
368 return 0;
369 }
370 return 1;
371}
372
373static int __init numa_register_memblks(struct numa_meminfo *mi)
374{
375 int i;
376
377 /* Account for nodes with cpus and no memory */
378 nodes_or(node_possible_map, mem_nodes_parsed, cpu_nodes_parsed);
379 if (WARN_ON(nodes_empty(node_possible_map)))
380 return -EINVAL;
381
371 memnode_shift = compute_hash_shift(mi); 382 memnode_shift = compute_hash_shift(mi);
372 if (memnode_shift < 0) { 383 if (memnode_shift < 0) {
373 printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n"); 384 printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n");
@@ -823,12 +834,10 @@ void __init initmem_init(void)
823 nodes_clear(node_possible_map); 834 nodes_clear(node_possible_map);
824 nodes_clear(node_online_map); 835 nodes_clear(node_online_map);
825#endif 836#endif
826 /* Account for nodes with cpus and no memory */ 837 if (numa_cleanup_meminfo(&numa_meminfo) < 0)
827 nodes_or(node_possible_map, mem_nodes_parsed, cpu_nodes_parsed);
828 if (WARN_ON(nodes_empty(node_possible_map)))
829 continue; 838 continue;
830 839
831 if (numa_register_memblks() < 0) 840 if (numa_register_memblks(&numa_meminfo) < 0)
832 continue; 841 continue;
833 842
834 for (j = 0; j < nr_cpu_ids; j++) { 843 for (j = 0; j < nr_cpu_ids; j++) {