aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/smpboot.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/smpboot.c')
-rw-r--r--arch/x86/kernel/smpboot.c88
1 files changed, 88 insertions, 0 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 40a3b56952ef..d774520a6b48 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -29,7 +29,95 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
29/* Per CPU bogomips and other parameters */ 29/* Per CPU bogomips and other parameters */
30DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 30DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
31EXPORT_PER_CPU_SYMBOL(cpu_info); 31EXPORT_PER_CPU_SYMBOL(cpu_info);
32
33/* representing cpus for which sibling maps can be computed */
34static cpumask_t cpu_sibling_setup_map;
35
36void __cpuinit set_cpu_sibling_map(int cpu)
37{
38 int i;
39 struct cpuinfo_x86 *c = &cpu_data(cpu);
40
41 cpu_set(cpu, cpu_sibling_setup_map);
42
43 if (smp_num_siblings > 1) {
44 for_each_cpu_mask(i, cpu_sibling_setup_map) {
45 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
46 c->cpu_core_id == cpu_data(i).cpu_core_id) {
47 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
48 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
49 cpu_set(i, per_cpu(cpu_core_map, cpu));
50 cpu_set(cpu, per_cpu(cpu_core_map, i));
51 cpu_set(i, c->llc_shared_map);
52 cpu_set(cpu, cpu_data(i).llc_shared_map);
53 }
54 }
55 } else {
56 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
57 }
58
59 cpu_set(cpu, c->llc_shared_map);
60
61 if (current_cpu_data.x86_max_cores == 1) {
62 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
63 c->booted_cores = 1;
64 return;
65 }
66
67 for_each_cpu_mask(i, cpu_sibling_setup_map) {
68 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
69 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
70 cpu_set(i, c->llc_shared_map);
71 cpu_set(cpu, cpu_data(i).llc_shared_map);
72 }
73 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
74 cpu_set(i, per_cpu(cpu_core_map, cpu));
75 cpu_set(cpu, per_cpu(cpu_core_map, i));
76 /*
77 * Does this new cpu bringup a new core?
78 */
79 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
80 /*
81 * for each core in package, increment
82 * the booted_cores for this new cpu
83 */
84 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
85 c->booted_cores++;
86 /*
87 * increment the core count for all
88 * the other cpus in this package
89 */
90 if (i != cpu)
91 cpu_data(i).booted_cores++;
92 } else if (i != cpu && !c->booted_cores)
93 c->booted_cores = cpu_data(i).booted_cores;
94 }
95 }
96}
97
32#ifdef CONFIG_HOTPLUG_CPU 98#ifdef CONFIG_HOTPLUG_CPU
99void remove_siblinginfo(int cpu)
100{
101 int sibling;
102 struct cpuinfo_x86 *c = &cpu_data(cpu);
103
104 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
105 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
106 /*/
107 * last thread sibling in this cpu core going down
108 */
109 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
110 cpu_data(sibling).booted_cores--;
111 }
112
113 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
114 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
115 cpus_clear(per_cpu(cpu_sibling_map, cpu));
116 cpus_clear(per_cpu(cpu_core_map, cpu));
117 c->phys_proc_id = 0;
118 c->cpu_core_id = 0;
119 cpu_clear(cpu, cpu_sibling_setup_map);
120}
33 121
34int additional_cpus __initdata = -1; 122int additional_cpus __initdata = -1;
35 123