diff options
Diffstat (limited to 'arch/x86/kernel/smpboot.c')
-rw-r--r-- | arch/x86/kernel/smpboot.c | 38 |
1 files changed, 10 insertions, 28 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0cbe8c0b35ed..d396155f436c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -130,6 +130,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | |||
130 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); | 130 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); |
131 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | 131 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
132 | 132 | ||
133 | DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); | ||
134 | |||
133 | /* Per CPU bogomips and other parameters */ | 135 | /* Per CPU bogomips and other parameters */ |
134 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | 136 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
135 | EXPORT_PER_CPU_SYMBOL(cpu_info); | 137 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
@@ -355,23 +357,6 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
355 | cpu_idle(); | 357 | cpu_idle(); |
356 | } | 358 | } |
357 | 359 | ||
358 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
359 | /* In this case, llc_shared_map is a pointer to a cpumask. */ | ||
360 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | ||
361 | const struct cpuinfo_x86 *src) | ||
362 | { | ||
363 | struct cpumask *llc = dst->llc_shared_map; | ||
364 | *dst = *src; | ||
365 | dst->llc_shared_map = llc; | ||
366 | } | ||
367 | #else | ||
368 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | ||
369 | const struct cpuinfo_x86 *src) | ||
370 | { | ||
371 | *dst = *src; | ||
372 | } | ||
373 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | ||
374 | |||
375 | /* | 360 | /* |
376 | * The bootstrap kernel entry code has set these up. Save them for | 361 | * The bootstrap kernel entry code has set these up. Save them for |
377 | * a given CPU | 362 | * a given CPU |
@@ -381,7 +366,7 @@ void __cpuinit smp_store_cpu_info(int id) | |||
381 | { | 366 | { |
382 | struct cpuinfo_x86 *c = &cpu_data(id); | 367 | struct cpuinfo_x86 *c = &cpu_data(id); |
383 | 368 | ||
384 | copy_cpuinfo_x86(c, &boot_cpu_data); | 369 | *c = boot_cpu_data; |
385 | c->cpu_index = id; | 370 | c->cpu_index = id; |
386 | if (id != 0) | 371 | if (id != 0) |
387 | identify_secondary_cpu(c); | 372 | identify_secondary_cpu(c); |
@@ -389,15 +374,12 @@ void __cpuinit smp_store_cpu_info(int id) | |||
389 | 374 | ||
390 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | 375 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) |
391 | { | 376 | { |
392 | struct cpuinfo_x86 *c1 = &cpu_data(cpu1); | ||
393 | struct cpuinfo_x86 *c2 = &cpu_data(cpu2); | ||
394 | |||
395 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); | 377 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); |
396 | cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1)); | 378 | cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1)); |
397 | cpumask_set_cpu(cpu1, cpu_core_mask(cpu2)); | 379 | cpumask_set_cpu(cpu1, cpu_core_mask(cpu2)); |
398 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); | 380 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); |
399 | cpumask_set_cpu(cpu1, c2->llc_shared_map); | 381 | cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); |
400 | cpumask_set_cpu(cpu2, c1->llc_shared_map); | 382 | cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); |
401 | } | 383 | } |
402 | 384 | ||
403 | 385 | ||
@@ -425,7 +407,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
425 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); | 407 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); |
426 | } | 408 | } |
427 | 409 | ||
428 | cpumask_set_cpu(cpu, c->llc_shared_map); | 410 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); |
429 | 411 | ||
430 | if (__this_cpu_read(cpu_info.x86_max_cores) == 1) { | 412 | if (__this_cpu_read(cpu_info.x86_max_cores) == 1) { |
431 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); | 413 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); |
@@ -436,8 +418,8 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
436 | for_each_cpu(i, cpu_sibling_setup_mask) { | 418 | for_each_cpu(i, cpu_sibling_setup_mask) { |
437 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | 419 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && |
438 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 420 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
439 | cpumask_set_cpu(i, c->llc_shared_map); | 421 | cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); |
440 | cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map); | 422 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); |
441 | } | 423 | } |
442 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | 424 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { |
443 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 425 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
@@ -476,7 +458,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu) | |||
476 | !(cpu_has(c, X86_FEATURE_AMD_DCM))) | 458 | !(cpu_has(c, X86_FEATURE_AMD_DCM))) |
477 | return cpu_core_mask(cpu); | 459 | return cpu_core_mask(cpu); |
478 | else | 460 | else |
479 | return c->llc_shared_map; | 461 | return cpu_llc_shared_mask(cpu); |
480 | } | 462 | } |
481 | 463 | ||
482 | static void impress_friends(void) | 464 | static void impress_friends(void) |
@@ -1103,7 +1085,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1103 | for_each_possible_cpu(i) { | 1085 | for_each_possible_cpu(i) { |
1104 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); | 1086 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); |
1105 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); | 1087 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); |
1106 | zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); | 1088 | zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); |
1107 | } | 1089 | } |
1108 | set_cpu_sibling_map(0); | 1090 | set_cpu_sibling_map(0); |
1109 | 1091 | ||