aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-05-11 07:05:59 -0400
committerIngo Molnar <mingo@kernel.org>2012-05-14 09:05:25 -0400
commit316ad248307fba13be40f01e92a22b89457c32bc (patch)
tree8c775a149310e267993648021f80a3b3c2fd8c44
parentdd7d8634e619b715a537402672d1383535ff4c54 (diff)
sched/x86: Rewrite set_cpu_sibling_map()
Commit ad7687dde ("x86/numa: Check for nonsensical topologies on real hw as well") is broken in that the condition can trigger for valid setups but only changes the end result for invalid setups with no real means of discerning between those. Rewrite set_cpu_sibling_map() to make the code clearer and make sure to only warn when the check changes the end result. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-klcwahu3gx467uhfiqjyhdcs@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/smpboot.c112
1 files changed, 66 insertions, 46 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7c53d96d44ab..e84c1bbea339 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -315,70 +315,90 @@ void __cpuinit smp_store_cpu_info(int id)
315 identify_secondary_cpu(c); 315 identify_secondary_cpu(c);
316} 316}
317 317
318static void __cpuinit link_thread_siblings(int cpu1, int cpu2) 318static bool __cpuinit
319topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
319{ 320{
320 cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); 321 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
321 cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1)); 322
322 cpumask_set_cpu(cpu1, cpu_core_mask(cpu2)); 323 return !WARN_ONCE(cpu_to_node(cpu1) != cpu_to_node(cpu2),
323 cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); 324 "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
324 cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); 325 "[node: %d != %d]. Ignoring dependency.\n",
325 cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); 326 cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
326} 327}
327 328
329#define link_mask(_m, c1, c2) \
330do { \
331 cpumask_set_cpu((c1), cpu_##_m##_mask(c2)); \
332 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \
333} while (0)
328 334
329void __cpuinit set_cpu_sibling_map(int cpu) 335static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
330{ 336{
331 int i; 337 if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
332 struct cpuinfo_x86 *c = &cpu_data(cpu); 338 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
333 339
334 cpumask_set_cpu(cpu, cpu_sibling_setup_mask); 340 if (c->phys_proc_id == o->phys_proc_id &&
341 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
342 c->compute_unit_id == o->compute_unit_id)
343 return topology_sane(c, o, "smt");
335 344
336 if (smp_num_siblings > 1) { 345 } else if (c->phys_proc_id == o->phys_proc_id &&
337 for_each_cpu(i, cpu_sibling_setup_mask) { 346 c->cpu_core_id == o->cpu_core_id) {
338 struct cpuinfo_x86 *o = &cpu_data(i); 347 return topology_sane(c, o, "smt");
348 }
339 349
340 if (cpu_to_node(cpu) != cpu_to_node(i)) { 350 return false;
341 WARN_ONCE(1, "sched: CPU #%d's thread-sibling CPU #%d not on the same node! [node %d != %d]. Ignoring sibling dependency.\n", cpu, i, cpu_to_node(cpu), cpu_to_node(i)); 351}
342 continue;
343 }
344 352
345 if (cpu_has(c, X86_FEATURE_TOPOEXT)) { 353static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
346 if (c->phys_proc_id == o->phys_proc_id && 354{
347 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) && 355 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
348 c->compute_unit_id == o->compute_unit_id) 356
349 link_thread_siblings(cpu, i); 357 if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
350 } else if (c->phys_proc_id == o->phys_proc_id && 358 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
351 c->cpu_core_id == o->cpu_core_id) { 359 return topology_sane(c, o, "llc");
352 link_thread_siblings(cpu, i); 360
353 } 361 return false;
354 } 362}
355 } else { 363
356 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); 364static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
357 } 365{
366 if (c->phys_proc_id == o->phys_proc_id)
367 return topology_sane(c, o, "mc");
368
369 return false;
370}
371
372void __cpuinit set_cpu_sibling_map(int cpu)
373{
374 bool has_mc = boot_cpu_data.x86_max_cores > 1;
375 bool has_smt = smp_num_siblings > 1;
376 struct cpuinfo_x86 *c = &cpu_data(cpu);
377 struct cpuinfo_x86 *o;
378 int i;
358 379
359 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); 380 cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
360 381
361 if (__this_cpu_read(cpu_info.x86_max_cores) == 1) { 382 if (!has_smt && !has_mc) {
362 cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); 383 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
384 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
385 cpumask_set_cpu(cpu, cpu_core_mask(cpu));
363 c->booted_cores = 1; 386 c->booted_cores = 1;
364 return; 387 return;
365 } 388 }
366 389
367 for_each_cpu(i, cpu_sibling_setup_mask) { 390 for_each_cpu(i, cpu_sibling_setup_mask) {
368 if (cpu_to_node(cpu) != cpu_to_node(i)) { 391 o = &cpu_data(i);
369 WARN_ONCE(1, "sched: CPU #%d's core-sibling CPU #%d not on the same node! [node %d != %d]. Ignoring sibling dependency.\n", cpu, i, cpu_to_node(cpu), cpu_to_node(i));
370 continue;
371 }
372 392
373 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 393 if ((i == cpu) || (has_smt && match_smt(c, o)))
374 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 394 link_mask(sibling, cpu, i);
375 cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); 395
376 cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); 396 if ((i == cpu) || (has_mc && match_llc(c, o)))
377 } 397 link_mask(llc_shared, cpu, i);
398
399 if ((i == cpu) || (has_mc && match_mc(c, o))) {
400 link_mask(core, cpu, i);
378 401
379 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
380 cpumask_set_cpu(i, cpu_core_mask(cpu));
381 cpumask_set_cpu(cpu, cpu_core_mask(i));
382 /* 402 /*
383 * Does this new cpu bringup a new core? 403 * Does this new cpu bringup a new core?
384 */ 404 */