aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/smpboot.c88
-rw-r--r--arch/x86/kernel/smpboot_32.c88
-rw-r--r--arch/x86/kernel/smpboot_64.c89
3 files changed, 88 insertions, 177 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 40a3b56952ef..d774520a6b48 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -29,7 +29,95 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
29/* Per CPU bogomips and other parameters */ 29/* Per CPU bogomips and other parameters */
30DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 30DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
31EXPORT_PER_CPU_SYMBOL(cpu_info); 31EXPORT_PER_CPU_SYMBOL(cpu_info);
32
33/* representing cpus for which sibling maps can be computed */
34static cpumask_t cpu_sibling_setup_map;
35
36void __cpuinit set_cpu_sibling_map(int cpu)
37{
38 int i;
39 struct cpuinfo_x86 *c = &cpu_data(cpu);
40
41 cpu_set(cpu, cpu_sibling_setup_map);
42
43 if (smp_num_siblings > 1) {
44 for_each_cpu_mask(i, cpu_sibling_setup_map) {
45 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
46 c->cpu_core_id == cpu_data(i).cpu_core_id) {
47 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
48 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
49 cpu_set(i, per_cpu(cpu_core_map, cpu));
50 cpu_set(cpu, per_cpu(cpu_core_map, i));
51 cpu_set(i, c->llc_shared_map);
52 cpu_set(cpu, cpu_data(i).llc_shared_map);
53 }
54 }
55 } else {
56 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
57 }
58
59 cpu_set(cpu, c->llc_shared_map);
60
61 if (current_cpu_data.x86_max_cores == 1) {
62 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
63 c->booted_cores = 1;
64 return;
65 }
66
67 for_each_cpu_mask(i, cpu_sibling_setup_map) {
68 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
69 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
70 cpu_set(i, c->llc_shared_map);
71 cpu_set(cpu, cpu_data(i).llc_shared_map);
72 }
73 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
74 cpu_set(i, per_cpu(cpu_core_map, cpu));
75 cpu_set(cpu, per_cpu(cpu_core_map, i));
76 /*
77 * Does this new cpu bringup a new core?
78 */
79 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
80 /*
81 * for each core in package, increment
82 * the booted_cores for this new cpu
83 */
84 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
85 c->booted_cores++;
86 /*
87 * increment the core count for all
88 * the other cpus in this package
89 */
90 if (i != cpu)
91 cpu_data(i).booted_cores++;
92 } else if (i != cpu && !c->booted_cores)
93 c->booted_cores = cpu_data(i).booted_cores;
94 }
95 }
96}
97
32#ifdef CONFIG_HOTPLUG_CPU 98#ifdef CONFIG_HOTPLUG_CPU
99void remove_siblinginfo(int cpu)
100{
101 int sibling;
102 struct cpuinfo_x86 *c = &cpu_data(cpu);
103
104 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
105 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
106 /*/
107 * last thread sibling in this cpu core going down
108 */
109 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
110 cpu_data(sibling).booted_cores--;
111 }
112
113 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
114 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
115 cpus_clear(per_cpu(cpu_sibling_map, cpu));
116 cpus_clear(per_cpu(cpu_core_map, cpu));
117 c->phys_proc_id = 0;
118 c->cpu_core_id = 0;
119 cpu_clear(cpu, cpu_sibling_setup_map);
120}
33 121
34int additional_cpus __initdata = -1; 122int additional_cpus __initdata = -1;
35 123
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index 0fbc98163b4e..322f46674d42 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -274,71 +274,6 @@ cpumask_t cpu_coregroup_map(int cpu)
274 return c->llc_shared_map; 274 return c->llc_shared_map;
275} 275}
276 276
277/* representing cpus for which sibling maps can be computed */
278static cpumask_t cpu_sibling_setup_map;
279
280void __cpuinit set_cpu_sibling_map(int cpu)
281{
282 int i;
283 struct cpuinfo_x86 *c = &cpu_data(cpu);
284
285 cpu_set(cpu, cpu_sibling_setup_map);
286
287 if (smp_num_siblings > 1) {
288 for_each_cpu_mask(i, cpu_sibling_setup_map) {
289 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
290 c->cpu_core_id == cpu_data(i).cpu_core_id) {
291 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
292 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
293 cpu_set(i, per_cpu(cpu_core_map, cpu));
294 cpu_set(cpu, per_cpu(cpu_core_map, i));
295 cpu_set(i, c->llc_shared_map);
296 cpu_set(cpu, cpu_data(i).llc_shared_map);
297 }
298 }
299 } else {
300 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
301 }
302
303 cpu_set(cpu, c->llc_shared_map);
304
305 if (current_cpu_data.x86_max_cores == 1) {
306 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
307 c->booted_cores = 1;
308 return;
309 }
310
311 for_each_cpu_mask(i, cpu_sibling_setup_map) {
312 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
313 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
314 cpu_set(i, c->llc_shared_map);
315 cpu_set(cpu, cpu_data(i).llc_shared_map);
316 }
317 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
318 cpu_set(i, per_cpu(cpu_core_map, cpu));
319 cpu_set(cpu, per_cpu(cpu_core_map, i));
320 /*
321 * Does this new cpu bringup a new core?
322 */
323 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
324 /*
325 * for each core in package, increment
326 * the booted_cores for this new cpu
327 */
328 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
329 c->booted_cores++;
330 /*
331 * increment the core count for all
332 * the other cpus in this package
333 */
334 if (i != cpu)
335 cpu_data(i).booted_cores++;
336 } else if (i != cpu && !c->booted_cores)
337 c->booted_cores = cpu_data(i).booted_cores;
338 }
339 }
340}
341
342/* 277/*
343 * Activate a secondary processor. 278 * Activate a secondary processor.
344 */ 279 */
@@ -1120,29 +1055,6 @@ void __init native_smp_prepare_boot_cpu(void)
1120} 1055}
1121 1056
1122#ifdef CONFIG_HOTPLUG_CPU 1057#ifdef CONFIG_HOTPLUG_CPU
1123void remove_siblinginfo(int cpu)
1124{
1125 int sibling;
1126 struct cpuinfo_x86 *c = &cpu_data(cpu);
1127
1128 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
1129 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1130 /*/
1131 * last thread sibling in this cpu core going down
1132 */
1133 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1134 cpu_data(sibling).booted_cores--;
1135 }
1136
1137 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1138 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1139 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1140 cpus_clear(per_cpu(cpu_core_map, cpu));
1141 c->phys_proc_id = 0;
1142 c->cpu_core_id = 0;
1143 cpu_clear(cpu, cpu_sibling_setup_map);
1144}
1145
1146int __cpu_disable(void) 1058int __cpu_disable(void)
1147{ 1059{
1148 cpumask_t map = cpu_online_map; 1060 cpumask_t map = cpu_online_map;
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 20f1c7df86a3..329f9c53a335 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -225,71 +225,6 @@ cpumask_t cpu_coregroup_map(int cpu)
225 return c->llc_shared_map; 225 return c->llc_shared_map;
226} 226}
227 227
228/* representing cpus for which sibling maps can be computed */
229static cpumask_t cpu_sibling_setup_map;
230
231void __cpuinit set_cpu_sibling_map(int cpu)
232{
233 int i;
234 struct cpuinfo_x86 *c = &cpu_data(cpu);
235
236 cpu_set(cpu, cpu_sibling_setup_map);
237
238 if (smp_num_siblings > 1) {
239 for_each_cpu_mask(i, cpu_sibling_setup_map) {
240 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
241 c->cpu_core_id == cpu_data(i).cpu_core_id) {
242 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
243 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
244 cpu_set(i, per_cpu(cpu_core_map, cpu));
245 cpu_set(cpu, per_cpu(cpu_core_map, i));
246 cpu_set(i, c->llc_shared_map);
247 cpu_set(cpu, cpu_data(i).llc_shared_map);
248 }
249 }
250 } else {
251 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
252 }
253
254 cpu_set(cpu, c->llc_shared_map);
255
256 if (current_cpu_data.x86_max_cores == 1) {
257 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
258 c->booted_cores = 1;
259 return;
260 }
261
262 for_each_cpu_mask(i, cpu_sibling_setup_map) {
263 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
264 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
265 cpu_set(i, c->llc_shared_map);
266 cpu_set(cpu, cpu_data(i).llc_shared_map);
267 }
268 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
269 cpu_set(i, per_cpu(cpu_core_map, cpu));
270 cpu_set(cpu, per_cpu(cpu_core_map, i));
271 /*
272 * Does this new cpu bringup a new core?
273 */
274 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
275 /*
276 * for each core in package, increment
277 * the booted_cores for this new cpu
278 */
279 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
280 c->booted_cores++;
281 /*
282 * increment the core count for all
283 * the other cpus in this package
284 */
285 if (i != cpu)
286 cpu_data(i).booted_cores++;
287 } else if (i != cpu && !c->booted_cores)
288 c->booted_cores = cpu_data(i).booted_cores;
289 }
290 }
291}
292
293/* 228/*
294 * Setup code on secondary processor (after comming out of the trampoline) 229 * Setup code on secondary processor (after comming out of the trampoline)
295 */ 230 */
@@ -917,30 +852,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
917} 852}
918 853
919#ifdef CONFIG_HOTPLUG_CPU 854#ifdef CONFIG_HOTPLUG_CPU
920
921void remove_siblinginfo(int cpu)
922{
923 int sibling;
924 struct cpuinfo_x86 *c = &cpu_data(cpu);
925
926 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
927 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
928 /*
929 * last thread sibling in this cpu core going down
930 */
931 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
932 cpu_data(sibling).booted_cores--;
933 }
934
935 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
936 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
937 cpus_clear(per_cpu(cpu_sibling_map, cpu));
938 cpus_clear(per_cpu(cpu_core_map, cpu));
939 c->phys_proc_id = 0;
940 c->cpu_core_id = 0;
941 cpu_clear(cpu, cpu_sibling_setup_map);
942}
943
944static void __ref remove_cpu_from_maps(void) 855static void __ref remove_cpu_from_maps(void)
945{ 856{
946 int cpu = smp_processor_id(); 857 int cpu = smp_processor_id();