aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2009-09-25 18:20:00 -0400
committerIngo Molnar <mingo@elte.hu>2009-10-12 16:56:45 -0400
commit8ee2debce32412118cf8c239e0026ace56ea1425 (patch)
tree644da3b622f5266cf61e0771c23cca33943a3614 /arch/x86
parent1af5ba514f0c2f2e2af965a4ffa5e8ab269271b9 (diff)
x86: Export k8 physical topology
To eventually interleave emulated nodes over physical nodes, we need to know the physical topology of the machine without actually registering it. This does the k8 node setup in two parts: detection and registration. NUMA emulation can then used the physical topology detected to setup the address ranges of emulated nodes accordingly. If emulation isn't used, the k8 nodes are registered as normal. Two formals are added to the x86 NUMA setup functions: `acpi' and `k8'. These represent whether ACPI or K8 NUMA has been detected; both cannot be true at the same time. This specifies to the NUMA emulation code whether an underlying physical NUMA topology exists and which interface to use. This patch deals solely with separating the k8 setup path into Northbridge detection and registration steps and leaves the ACPI changes for a subsequent patch. The `acpi' formal is added here, however, to avoid touching all the header files again in the next patch. This approach also ensures emulated nodes will not span physical nodes so the true memory latency is not misrepresented. k8_get_nodes() may now be used to export the k8 physical topology of the machine for NUMA emulation. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Andreas Herrmann <andreas.herrmann3@amd.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Ankita Garg <ankita@in.ibm.com> Cc: Len Brown <len.brown@intel.com> LKML-Reference: <alpine.DEB.1.00.0909251518400.14754@chino.kir.corp.google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/k8.h4
-rw-r--r--arch/x86/include/asm/page_types.h3
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/mm/init_32.c4
-rw-r--r--arch/x86/mm/init_64.c3
-rw-r--r--arch/x86/mm/k8topology_64.c52
-rw-r--r--arch/x86/mm/numa_32.c4
-rw-r--r--arch/x86/mm/numa_64.c6
8 files changed, 62 insertions, 24 deletions
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h
index c2d1f3b58e5f..c092f720bd60 100644
--- a/arch/x86/include/asm/k8.h
+++ b/arch/x86/include/asm/k8.h
@@ -10,7 +10,9 @@ extern struct pci_dev **k8_northbridges;
10extern int num_k8_northbridges; 10extern int num_k8_northbridges;
11extern int cache_k8_northbridges(void); 11extern int cache_k8_northbridges(void);
12extern void k8_flush_garts(void); 12extern void k8_flush_garts(void);
13extern int k8_scan_nodes(unsigned long start, unsigned long end); 13extern int k8_get_nodes(struct bootnode *nodes);
14extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
15extern int k8_scan_nodes(void);
14 16
15#ifdef CONFIG_K8_NB 17#ifdef CONFIG_K8_NB
16static inline struct pci_dev *node_to_k8_nb_misc(int node) 18static inline struct pci_dev *node_to_k8_nb_misc(int node)
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 6473f5ccff85..642fe34b36a2 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -49,7 +49,8 @@ extern unsigned long max_pfn_mapped;
49extern unsigned long init_memory_mapping(unsigned long start, 49extern unsigned long init_memory_mapping(unsigned long start,
50 unsigned long end); 50 unsigned long end);
51 51
52extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); 52extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn,
53 int acpi, int k8);
53extern void free_initmem(void); 54extern void free_initmem(void);
54 55
55#endif /* !__ASSEMBLY__ */ 56#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index e09f0e2c14b5..fda0032c25c6 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -106,6 +106,7 @@
106#include <asm/percpu.h> 106#include <asm/percpu.h>
107#include <asm/topology.h> 107#include <asm/topology.h>
108#include <asm/apicdef.h> 108#include <asm/apicdef.h>
109#include <asm/k8.h>
109#ifdef CONFIG_X86_64 110#ifdef CONFIG_X86_64
110#include <asm/numa_64.h> 111#include <asm/numa_64.h>
111#endif 112#endif
@@ -691,6 +692,9 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
691 692
692void __init setup_arch(char **cmdline_p) 693void __init setup_arch(char **cmdline_p)
693{ 694{
695 int acpi = 0;
696 int k8 = 0;
697
694#ifdef CONFIG_X86_32 698#ifdef CONFIG_X86_32
695 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); 699 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
696 visws_early_detect(); 700 visws_early_detect();
@@ -937,7 +941,11 @@ void __init setup_arch(char **cmdline_p)
937 acpi_numa_init(); 941 acpi_numa_init();
938#endif 942#endif
939 943
940 initmem_init(0, max_pfn); 944#ifdef CONFIG_K8_NUMA
945 k8 = !k8_numa_init(0, max_pfn);
946#endif
947
948 initmem_init(0, max_pfn, acpi, k8);
941 949
942#ifdef CONFIG_ACPI_SLEEP 950#ifdef CONFIG_ACPI_SLEEP
943 /* 951 /*
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 30938c1d8d5d..5e32b07b535d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -703,8 +703,8 @@ void __init find_low_pfn_range(void)
703} 703}
704 704
705#ifndef CONFIG_NEED_MULTIPLE_NODES 705#ifndef CONFIG_NEED_MULTIPLE_NODES
706void __init initmem_init(unsigned long start_pfn, 706void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
707 unsigned long end_pfn) 707 int acpi, int k8)
708{ 708{
709#ifdef CONFIG_HIGHMEM 709#ifdef CONFIG_HIGHMEM
710 highstart_pfn = highend_pfn = max_pfn; 710 highstart_pfn = highend_pfn = max_pfn;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 5a4398a6006b..c20d30b440de 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -568,7 +568,8 @@ kernel_physical_mapping_init(unsigned long start,
568} 568}
569 569
570#ifndef CONFIG_NUMA 570#ifndef CONFIG_NUMA
571void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn) 571void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
572 int acpi, int k8)
572{ 573{
573 unsigned long bootmap_size, bootmap; 574 unsigned long bootmap_size, bootmap;
574 575
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
index a81561acc20f..b9e2dbfe55c3 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/k8topology_64.c
@@ -24,6 +24,9 @@
24#include <asm/apic.h> 24#include <asm/apic.h>
25#include <asm/k8.h> 25#include <asm/k8.h>
26 26
27static struct bootnode __initdata nodes[8];
28static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE;
29
27static __init int find_northbridge(void) 30static __init int find_northbridge(void)
28{ 31{
29 int num; 32 int num;
@@ -76,12 +79,26 @@ static __init void early_get_boot_cpu_id(void)
76 early_init_lapic_mapping(); 79 early_init_lapic_mapping();
77} 80}
78 81
79int __init k8_scan_nodes(unsigned long start, unsigned long end) 82int __init k8_get_nodes(struct bootnode *physnodes)
80{ 83{
81 unsigned numnodes, cores, bits, apicid_base; 84 int i;
85 int ret = 0;
86
87 for_each_node_mask(i, nodes_parsed) {
88 physnodes[ret].start = nodes[i].start;
89 physnodes[ret].end = nodes[i].end;
90 ret++;
91 }
92 return ret;
93}
94
95int __init k8_numa_init(unsigned long start_pfn, unsigned long end_pfn)
96{
97 unsigned long start = PFN_PHYS(start_pfn);
98 unsigned long end = PFN_PHYS(end_pfn);
99 unsigned numnodes;
82 unsigned long prevbase; 100 unsigned long prevbase;
83 struct bootnode nodes[8]; 101 int i, nb, found = 0;
84 int i, j, nb, found = 0;
85 u32 nodeid, reg; 102 u32 nodeid, reg;
86 103
87 if (!early_pci_allowed()) 104 if (!early_pci_allowed())
@@ -98,9 +115,8 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
98 if (numnodes <= 1) 115 if (numnodes <= 1)
99 return -1; 116 return -1;
100 117
101 pr_info("Number of nodes %d\n", numnodes); 118 pr_info("Number of physical nodes %d\n", numnodes);
102 119
103 memset(&nodes, 0, sizeof(nodes));
104 prevbase = 0; 120 prevbase = 0;
105 for (i = 0; i < 8; i++) { 121 for (i = 0; i < 8; i++) {
106 unsigned long base, limit; 122 unsigned long base, limit;
@@ -130,7 +146,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
130 nodeid, (base >> 8) & 3, (limit >> 8) & 3); 146 nodeid, (base >> 8) & 3, (limit >> 8) & 3);
131 return -1; 147 return -1;
132 } 148 }
133 if (node_isset(nodeid, node_possible_map)) { 149 if (node_isset(nodeid, nodes_parsed)) {
134 pr_info("Node %d already present, skipping\n", 150 pr_info("Node %d already present, skipping\n",
135 nodeid); 151 nodeid);
136 continue; 152 continue;
@@ -141,8 +157,8 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
141 limit |= (1<<24)-1; 157 limit |= (1<<24)-1;
142 limit++; 158 limit++;
143 159
144 if (limit > max_pfn << PAGE_SHIFT) 160 if (limit > end)
145 limit = max_pfn << PAGE_SHIFT; 161 limit = end;
146 if (limit <= base) 162 if (limit <= base)
147 continue; 163 continue;
148 164
@@ -180,12 +196,23 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
180 196
181 prevbase = base; 197 prevbase = base;
182 198
183 node_set(nodeid, node_possible_map); 199 node_set(nodeid, nodes_parsed);
184 } 200 }
185 201
186 if (!found) 202 if (!found)
187 return -1; 203 return -1;
204 return 0;
205}
188 206
207int __init k8_scan_nodes(void)
208{
209 unsigned int bits;
210 unsigned int cores;
211 unsigned int apicid_base;
212 int i;
213
214 BUG_ON(nodes_empty(nodes_parsed));
215 node_possible_map = nodes_parsed;
189 memnode_shift = compute_hash_shift(nodes, 8, NULL); 216 memnode_shift = compute_hash_shift(nodes, 8, NULL);
190 if (memnode_shift < 0) { 217 if (memnode_shift < 0) {
191 pr_err("No NUMA node hash function found. Contact maintainer\n"); 218 pr_err("No NUMA node hash function found. Contact maintainer\n");
@@ -204,9 +231,8 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
204 apicid_base = boot_cpu_physical_apicid; 231 apicid_base = boot_cpu_physical_apicid;
205 } 232 }
206 233
207 for (i = 0; i < 8; i++) { 234 for_each_node_mask(i, node_possible_map) {
208 if (nodes[i].start == nodes[i].end) 235 int j;
209 continue;
210 236
211 e820_register_active_regions(i, 237 e820_register_active_regions(i,
212 nodes[i].start >> PAGE_SHIFT, 238 nodes[i].start >> PAGE_SHIFT,
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index d2530062fe00..b20760ca7244 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -347,8 +347,8 @@ static void init_remap_allocator(int nid)
347 (ulong) node_remap_end_vaddr[nid]); 347 (ulong) node_remap_end_vaddr[nid]);
348} 348}
349 349
350void __init initmem_init(unsigned long start_pfn, 350void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
351 unsigned long end_pfn) 351 int acpi, int k8)
352{ 352{
353 int nid; 353 int nid;
354 long kva_target_pfn; 354 long kva_target_pfn;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 459913beac71..dad5f42dd359 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -524,7 +524,8 @@ out:
524} 524}
525#endif /* CONFIG_NUMA_EMU */ 525#endif /* CONFIG_NUMA_EMU */
526 526
527void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn) 527void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
528 int acpi, int k8)
528{ 529{
529 int i; 530 int i;
530 531
@@ -547,8 +548,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn)
547#endif 548#endif
548 549
549#ifdef CONFIG_K8_NUMA 550#ifdef CONFIG_K8_NUMA
550 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, 551 if (!numa_off && k8 && !k8_scan_nodes())
551 last_pfn<<PAGE_SHIFT))
552 return; 552 return;
553 nodes_clear(node_possible_map); 553 nodes_clear(node_possible_map);
554 nodes_clear(node_online_map); 554 nodes_clear(node_online_map);