aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/numa_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/numa_64.c')
-rw-r--r--arch/x86/mm/numa_64.c75
1 files changed, 0 insertions, 75 deletions
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 14664f58a75..f548fbf75f4 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -224,28 +224,6 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
224 node_set_online(nodeid); 224 node_set_online(nodeid);
225} 225}
226 226
227/*
228 * There are unfortunately some poorly designed mainboards around that
229 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
230 * mapping. To avoid this fill in the mapping for all possible CPUs,
231 * as the number of CPUs is not known yet. We round robin the existing
232 * nodes.
233 */
234void __init numa_init_array(void)
235{
236 int rr, i;
237
238 rr = first_node(node_online_map);
239 for (i = 0; i < nr_cpu_ids; i++) {
240 if (early_cpu_to_node(i) != NUMA_NO_NODE)
241 continue;
242 numa_set_node(i, rr);
243 rr = next_node(rr, node_online_map);
244 if (rr == MAX_NUMNODES)
245 rr = first_node(node_online_map);
246 }
247}
248
249#ifdef CONFIG_NUMA_EMU 227#ifdef CONFIG_NUMA_EMU
250/* Numa emulation */ 228/* Numa emulation */
251static struct bootnode nodes[MAX_NUMNODES] __initdata; 229static struct bootnode nodes[MAX_NUMNODES] __initdata;
@@ -664,59 +642,6 @@ unsigned long __init numa_free_all_bootmem(void)
664 return pages; 642 return pages;
665} 643}
666 644
667#ifdef CONFIG_NUMA
668
669static __init int find_near_online_node(int node)
670{
671 int n, val;
672 int min_val = INT_MAX;
673 int best_node = -1;
674
675 for_each_online_node(n) {
676 val = node_distance(node, n);
677
678 if (val < min_val) {
679 min_val = val;
680 best_node = n;
681 }
682 }
683
684 return best_node;
685}
686
687/*
688 * Setup early cpu_to_node.
689 *
690 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
691 * and apicid_to_node[] tables have valid entries for a CPU.
692 * This means we skip cpu_to_node[] initialisation for NUMA
693 * emulation and faking node case (when running a kernel compiled
694 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
695 * is already initialized in a round robin manner at numa_init_array,
696 * prior to this call, and this initialization is good enough
697 * for the fake NUMA cases.
698 *
699 * Called before the per_cpu areas are setup.
700 */
701void __init init_cpu_to_node(void)
702{
703 int cpu;
704 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
705
706 BUG_ON(cpu_to_apicid == NULL);
707
708 for_each_possible_cpu(cpu) {
709 int node = numa_cpu_node(cpu);
710
711 if (node == NUMA_NO_NODE)
712 continue;
713 if (!node_online(node))
714 node = find_near_online_node(node);
715 numa_set_node(cpu, node);
716 }
717}
718#endif
719
720int __cpuinit numa_cpu_node(int cpu) 645int __cpuinit numa_cpu_node(int cpu)
721{ 646{
722 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 647 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);