aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/numa_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/numa_64.c')
-rw-r--r--arch/x86/mm/numa_64.c86
1 files changed, 20 insertions, 66 deletions
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index a7bcc23ef96..60f498511dd 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -7,6 +7,7 @@
7#include <linux/string.h> 7#include <linux/string.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/bootmem.h> 9#include <linux/bootmem.h>
10#include <linux/memblock.h>
10#include <linux/mmzone.h> 11#include <linux/mmzone.h>
11#include <linux/ctype.h> 12#include <linux/ctype.h>
12#include <linux/module.h> 13#include <linux/module.h>
@@ -18,7 +19,7 @@
18#include <asm/dma.h> 19#include <asm/dma.h>
19#include <asm/numa.h> 20#include <asm/numa.h>
20#include <asm/acpi.h> 21#include <asm/acpi.h>
21#include <asm/k8.h> 22#include <asm/amd_nb.h>
22 23
23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 24struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
24EXPORT_SYMBOL(node_data); 25EXPORT_SYMBOL(node_data);
@@ -86,16 +87,16 @@ static int __init allocate_cachealigned_memnodemap(void)
86 87
87 addr = 0x8000; 88 addr = 0x8000;
88 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); 89 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
89 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, 90 nodemap_addr = memblock_find_in_range(addr, max_pfn<<PAGE_SHIFT,
90 nodemap_size, L1_CACHE_BYTES); 91 nodemap_size, L1_CACHE_BYTES);
91 if (nodemap_addr == -1UL) { 92 if (nodemap_addr == MEMBLOCK_ERROR) {
92 printk(KERN_ERR 93 printk(KERN_ERR
93 "NUMA: Unable to allocate Memory to Node hash map\n"); 94 "NUMA: Unable to allocate Memory to Node hash map\n");
94 nodemap_addr = nodemap_size = 0; 95 nodemap_addr = nodemap_size = 0;
95 return -1; 96 return -1;
96 } 97 }
97 memnodemap = phys_to_virt(nodemap_addr); 98 memnodemap = phys_to_virt(nodemap_addr);
98 reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP"); 99 memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
99 100
100 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", 101 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
101 nodemap_addr, nodemap_addr + nodemap_size); 102 nodemap_addr, nodemap_addr + nodemap_size);
@@ -171,8 +172,8 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
171 if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) && 172 if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
172 end > (MAX_DMA32_PFN<<PAGE_SHIFT)) 173 end > (MAX_DMA32_PFN<<PAGE_SHIFT))
173 start = MAX_DMA32_PFN<<PAGE_SHIFT; 174 start = MAX_DMA32_PFN<<PAGE_SHIFT;
174 mem = find_e820_area(start, end, size, align); 175 mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align);
175 if (mem != -1L) 176 if (mem != MEMBLOCK_ERROR)
176 return __va(mem); 177 return __va(mem);
177 178
178 /* extend the search scope */ 179 /* extend the search scope */
@@ -181,8 +182,8 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
181 start = MAX_DMA32_PFN<<PAGE_SHIFT; 182 start = MAX_DMA32_PFN<<PAGE_SHIFT;
182 else 183 else
183 start = MAX_DMA_PFN<<PAGE_SHIFT; 184 start = MAX_DMA_PFN<<PAGE_SHIFT;
184 mem = find_e820_area(start, end, size, align); 185 mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align);
185 if (mem != -1L) 186 if (mem != MEMBLOCK_ERROR)
186 return __va(mem); 187 return __va(mem);
187 188
188 printk(KERN_ERR "Cannot find %lu bytes in node %d\n", 189 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
@@ -198,10 +199,6 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
198 unsigned long start_pfn, last_pfn, nodedata_phys; 199 unsigned long start_pfn, last_pfn, nodedata_phys;
199 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 200 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
200 int nid; 201 int nid;
201#ifndef CONFIG_NO_BOOTMEM
202 unsigned long bootmap_start, bootmap_pages, bootmap_size;
203 void *bootmap;
204#endif
205 202
206 if (!end) 203 if (!end)
207 return; 204 return;
@@ -226,7 +223,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
226 if (node_data[nodeid] == NULL) 223 if (node_data[nodeid] == NULL)
227 return; 224 return;
228 nodedata_phys = __pa(node_data[nodeid]); 225 nodedata_phys = __pa(node_data[nodeid]);
229 reserve_early(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA"); 226 memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
230 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys, 227 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
231 nodedata_phys + pgdat_size - 1); 228 nodedata_phys + pgdat_size - 1);
232 nid = phys_to_nid(nodedata_phys); 229 nid = phys_to_nid(nodedata_phys);
@@ -238,47 +235,6 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
238 NODE_DATA(nodeid)->node_start_pfn = start_pfn; 235 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
239 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn; 236 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
240 237
241#ifndef CONFIG_NO_BOOTMEM
242 NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid];
243
244 /*
245 * Find a place for the bootmem map
246 * nodedata_phys could be on other nodes by alloc_bootmem,
247 * so need to sure bootmap_start not to be small, otherwise
248 * early_node_mem will get that with find_e820_area instead
249 * of alloc_bootmem, that could clash with reserved range
250 */
251 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
252 bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
253 /*
254 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like
255 * to use that to align to PAGE_SIZE
256 */
257 bootmap = early_node_mem(nodeid, bootmap_start, end,
258 bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
259 if (bootmap == NULL) {
260 free_early(nodedata_phys, nodedata_phys + pgdat_size);
261 node_data[nodeid] = NULL;
262 return;
263 }
264 bootmap_start = __pa(bootmap);
265 reserve_early(bootmap_start, bootmap_start+(bootmap_pages<<PAGE_SHIFT),
266 "BOOTMAP");
267
268 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
269 bootmap_start >> PAGE_SHIFT,
270 start_pfn, last_pfn);
271
272 printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n",
273 bootmap_start, bootmap_start + bootmap_size - 1,
274 bootmap_pages);
275 nid = phys_to_nid(bootmap_start);
276 if (nid != nodeid)
277 printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid);
278
279 free_bootmem_with_active_regions(nodeid, end);
280#endif
281
282 node_set_online(nodeid); 238 node_set_online(nodeid);
283} 239}
284 240
@@ -416,7 +372,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
416 nr_nodes = MAX_NUMNODES; 372 nr_nodes = MAX_NUMNODES;
417 } 373 }
418 374
419 size = (max_addr - addr - e820_hole_size(addr, max_addr)) / nr_nodes; 375 size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
420 /* 376 /*
421 * Calculate the number of big nodes that can be allocated as a result 377 * Calculate the number of big nodes that can be allocated as a result
422 * of consolidating the remainder. 378 * of consolidating the remainder.
@@ -452,7 +408,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
452 * non-reserved memory is less than the per-node size. 408 * non-reserved memory is less than the per-node size.
453 */ 409 */
454 while (end - physnodes[i].start - 410 while (end - physnodes[i].start -
455 e820_hole_size(physnodes[i].start, end) < size) { 411 memblock_x86_hole_size(physnodes[i].start, end) < size) {
456 end += FAKE_NODE_MIN_SIZE; 412 end += FAKE_NODE_MIN_SIZE;
457 if (end > physnodes[i].end) { 413 if (end > physnodes[i].end) {
458 end = physnodes[i].end; 414 end = physnodes[i].end;
@@ -466,7 +422,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
466 * this one must extend to the boundary. 422 * this one must extend to the boundary.
467 */ 423 */
468 if (end < dma32_end && dma32_end - end - 424 if (end < dma32_end && dma32_end - end -
469 e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) 425 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
470 end = dma32_end; 426 end = dma32_end;
471 427
472 /* 428 /*
@@ -475,7 +431,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
475 * physical node. 431 * physical node.
476 */ 432 */
477 if (physnodes[i].end - end - 433 if (physnodes[i].end - end -
478 e820_hole_size(end, physnodes[i].end) < size) 434 memblock_x86_hole_size(end, physnodes[i].end) < size)
479 end = physnodes[i].end; 435 end = physnodes[i].end;
480 436
481 /* 437 /*
@@ -503,7 +459,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
503{ 459{
504 u64 end = start + size; 460 u64 end = start + size;
505 461
506 while (end - start - e820_hole_size(start, end) < size) { 462 while (end - start - memblock_x86_hole_size(start, end) < size) {
507 end += FAKE_NODE_MIN_SIZE; 463 end += FAKE_NODE_MIN_SIZE;
508 if (end > max_addr) { 464 if (end > max_addr) {
509 end = max_addr; 465 end = max_addr;
@@ -532,7 +488,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
532 * creates a uniform distribution of node sizes across the entire 488 * creates a uniform distribution of node sizes across the entire
533 * machine (but not necessarily over physical nodes). 489 * machine (but not necessarily over physical nodes).
534 */ 490 */
535 min_size = (max_addr - addr - e820_hole_size(addr, max_addr)) / 491 min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
536 MAX_NUMNODES; 492 MAX_NUMNODES;
537 min_size = max(min_size, FAKE_NODE_MIN_SIZE); 493 min_size = max(min_size, FAKE_NODE_MIN_SIZE);
538 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) 494 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
@@ -565,7 +521,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
565 * this one must extend to the boundary. 521 * this one must extend to the boundary.
566 */ 522 */
567 if (end < dma32_end && dma32_end - end - 523 if (end < dma32_end && dma32_end - end -
568 e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) 524 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
569 end = dma32_end; 525 end = dma32_end;
570 526
571 /* 527 /*
@@ -574,7 +530,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
574 * physical node. 530 * physical node.
575 */ 531 */
576 if (physnodes[i].end - end - 532 if (physnodes[i].end - end -
577 e820_hole_size(end, physnodes[i].end) < size) 533 memblock_x86_hole_size(end, physnodes[i].end) < size)
578 end = physnodes[i].end; 534 end = physnodes[i].end;
579 535
580 /* 536 /*
@@ -638,7 +594,7 @@ static int __init numa_emulation(unsigned long start_pfn,
638 */ 594 */
639 remove_all_active_ranges(); 595 remove_all_active_ranges();
640 for_each_node_mask(i, node_possible_map) { 596 for_each_node_mask(i, node_possible_map) {
641 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, 597 memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
642 nodes[i].end >> PAGE_SHIFT); 598 nodes[i].end >> PAGE_SHIFT);
643 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 599 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
644 } 600 }
@@ -691,7 +647,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
691 node_set(0, node_possible_map); 647 node_set(0, node_possible_map);
692 for (i = 0; i < nr_cpu_ids; i++) 648 for (i = 0; i < nr_cpu_ids; i++)
693 numa_set_node(i, 0); 649 numa_set_node(i, 0);
694 e820_register_active_regions(0, start_pfn, last_pfn); 650 memblock_x86_register_active_regions(0, start_pfn, last_pfn);
695 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); 651 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
696} 652}
697 653
@@ -703,9 +659,7 @@ unsigned long __init numa_free_all_bootmem(void)
703 for_each_online_node(i) 659 for_each_online_node(i)
704 pages += free_all_bootmem_node(NODE_DATA(i)); 660 pages += free_all_bootmem_node(NODE_DATA(i));
705 661
706#ifdef CONFIG_NO_BOOTMEM
707 pages += free_all_memory_core_early(MAX_NUMNODES); 662 pages += free_all_memory_core_early(MAX_NUMNODES);
708#endif
709 663
710 return pages; 664 return pages;
711} 665}