aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/numa.h2
-rw-r--r--arch/x86/kernel/smpboot.c23
-rw-r--r--arch/x86/mm/numa.c31
-rw-r--r--arch/x86/mm/numa_emulation.c20
4 files changed, 20 insertions, 56 deletions
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 3d4dab43c994..a50fc9f493b3 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -51,7 +51,7 @@ static inline void numa_remove_cpu(int cpu) { }
51#endif /* CONFIG_NUMA */ 51#endif /* CONFIG_NUMA */
52 52
53#ifdef CONFIG_DEBUG_PER_CPU_MAPS 53#ifdef CONFIG_DEBUG_PER_CPU_MAPS
54struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable); 54void debug_cpumask_set_cpu(int cpu, int node, bool enable);
55#endif 55#endif
56 56
57#endif /* _ASM_X86_NUMA_H */ 57#endif /* _ASM_X86_NUMA_H */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8ed8908cc9f7..c2871d3c71b6 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -312,26 +312,6 @@ void __cpuinit smp_store_cpu_info(int id)
312 identify_secondary_cpu(c); 312 identify_secondary_cpu(c);
313} 313}
314 314
315static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2)
316{
317 int node1 = early_cpu_to_node(cpu1);
318 int node2 = early_cpu_to_node(cpu2);
319
320 /*
321 * Our CPU scheduler assumes all logical cpus in the same physical cpu
322 * share the same node. But, buggy ACPI or NUMA emulation might assign
323 * them to different node. Fix it.
324 */
325 if (node1 != node2) {
326 pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n",
327 cpu1, node1, cpu2, node2, node2);
328
329 numa_remove_cpu(cpu1);
330 numa_set_node(cpu1, node2);
331 numa_add_cpu(cpu1);
332 }
333}
334
335static void __cpuinit link_thread_siblings(int cpu1, int cpu2) 315static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
336{ 316{
337 cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); 317 cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
@@ -340,7 +320,6 @@ static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
340 cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); 320 cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
341 cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); 321 cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
342 cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); 322 cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
343 check_cpu_siblings_on_same_node(cpu1, cpu2);
344} 323}
345 324
346 325
@@ -382,12 +361,10 @@ void __cpuinit set_cpu_sibling_map(int cpu)
382 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 361 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
383 cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); 362 cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
384 cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); 363 cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
385 check_cpu_siblings_on_same_node(cpu, i);
386 } 364 }
387 if (c->phys_proc_id == cpu_data(i).phys_proc_id) { 365 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
388 cpumask_set_cpu(i, cpu_core_mask(cpu)); 366 cpumask_set_cpu(i, cpu_core_mask(cpu));
389 cpumask_set_cpu(cpu, cpu_core_mask(i)); 367 cpumask_set_cpu(cpu, cpu_core_mask(i));
390 check_cpu_siblings_on_same_node(cpu, i);
391 /* 368 /*
392 * Does this new cpu bringup a new core? 369 * Does this new cpu bringup a new core?
393 */ 370 */
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 9559d360fde7..745258dfc4dc 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -213,53 +213,48 @@ int early_cpu_to_node(int cpu)
213 return per_cpu(x86_cpu_to_node_map, cpu); 213 return per_cpu(x86_cpu_to_node_map, cpu);
214} 214}
215 215
216struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) 216void debug_cpumask_set_cpu(int cpu, int node, bool enable)
217{ 217{
218 int node = early_cpu_to_node(cpu);
219 struct cpumask *mask; 218 struct cpumask *mask;
220 char buf[64]; 219 char buf[64];
221 220
222 if (node == NUMA_NO_NODE) { 221 if (node == NUMA_NO_NODE) {
223 /* early_cpu_to_node() already emits a warning and trace */ 222 /* early_cpu_to_node() already emits a warning and trace */
224 return NULL; 223 return;
225 } 224 }
226 mask = node_to_cpumask_map[node]; 225 mask = node_to_cpumask_map[node];
227 if (!mask) { 226 if (!mask) {
228 pr_err("node_to_cpumask_map[%i] NULL\n", node); 227 pr_err("node_to_cpumask_map[%i] NULL\n", node);
229 dump_stack(); 228 dump_stack();
230 return NULL; 229 return;
231 } 230 }
232 231
232 if (enable)
233 cpumask_set_cpu(cpu, mask);
234 else
235 cpumask_clear_cpu(cpu, mask);
236
233 cpulist_scnprintf(buf, sizeof(buf), mask); 237 cpulist_scnprintf(buf, sizeof(buf), mask);
234 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 238 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
235 enable ? "numa_add_cpu" : "numa_remove_cpu", 239 enable ? "numa_add_cpu" : "numa_remove_cpu",
236 cpu, node, buf); 240 cpu, node, buf);
237 return mask; 241 return;
238} 242}
239 243
240# ifndef CONFIG_NUMA_EMU 244# ifndef CONFIG_NUMA_EMU
241static void __cpuinit numa_set_cpumask(int cpu, int enable) 245static void __cpuinit numa_set_cpumask(int cpu, bool enable)
242{ 246{
243 struct cpumask *mask; 247 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
244
245 mask = debug_cpumask_set_cpu(cpu, enable);
246 if (!mask)
247 return;
248
249 if (enable)
250 cpumask_set_cpu(cpu, mask);
251 else
252 cpumask_clear_cpu(cpu, mask);
253} 248}
254 249
255void __cpuinit numa_add_cpu(int cpu) 250void __cpuinit numa_add_cpu(int cpu)
256{ 251{
257 numa_set_cpumask(cpu, 1); 252 numa_set_cpumask(cpu, true);
258} 253}
259 254
260void __cpuinit numa_remove_cpu(int cpu) 255void __cpuinit numa_remove_cpu(int cpu)
261{ 256{
262 numa_set_cpumask(cpu, 0); 257 numa_set_cpumask(cpu, false);
263} 258}
264# endif /* !CONFIG_NUMA_EMU */ 259# endif /* !CONFIG_NUMA_EMU */
265 260
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index ad091e4cff17..de84cc140379 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -454,10 +454,9 @@ void __cpuinit numa_remove_cpu(int cpu)
454 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); 454 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
455} 455}
456#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 456#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
457static void __cpuinit numa_set_cpumask(int cpu, int enable) 457static void __cpuinit numa_set_cpumask(int cpu, bool enable)
458{ 458{
459 struct cpumask *mask; 459 int nid, physnid;
460 int nid, physnid, i;
461 460
462 nid = early_cpu_to_node(cpu); 461 nid = early_cpu_to_node(cpu);
463 if (nid == NUMA_NO_NODE) { 462 if (nid == NUMA_NO_NODE) {
@@ -467,28 +466,21 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
467 466
468 physnid = emu_nid_to_phys[nid]; 467 physnid = emu_nid_to_phys[nid];
469 468
470 for_each_online_node(i) { 469 for_each_online_node(nid) {
471 if (emu_nid_to_phys[nid] != physnid) 470 if (emu_nid_to_phys[nid] != physnid)
472 continue; 471 continue;
473 472
474 mask = debug_cpumask_set_cpu(cpu, enable); 473 debug_cpumask_set_cpu(cpu, nid, enable);
475 if (!mask)
476 return;
477
478 if (enable)
479 cpumask_set_cpu(cpu, mask);
480 else
481 cpumask_clear_cpu(cpu, mask);
482 } 474 }
483} 475}
484 476
485void __cpuinit numa_add_cpu(int cpu) 477void __cpuinit numa_add_cpu(int cpu)
486{ 478{
487 numa_set_cpumask(cpu, 1); 479 numa_set_cpumask(cpu, true);
488} 480}
489 481
490void __cpuinit numa_remove_cpu(int cpu) 482void __cpuinit numa_remove_cpu(int cpu)
491{ 483{
492 numa_set_cpumask(cpu, 0); 484 numa_set_cpumask(cpu, false);
493} 485}
494#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 486#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */