aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/smpboot.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/smpboot.c')
-rw-r--r--arch/x86/kernel/smpboot.c87
1 files changed, 38 insertions, 49 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 76b6f50978f7..8c3aca7cb343 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -334,14 +334,17 @@ static void __cpuinit start_secondary(void *unused)
334 * does not change while we are assigning vectors to cpus. Holding 334 * does not change while we are assigning vectors to cpus. Holding
335 * this lock ensures we don't half assign or remove an irq from a cpu. 335 * this lock ensures we don't half assign or remove an irq from a cpu.
336 */ 336 */
337 ipi_call_lock_irq(); 337 ipi_call_lock();
338 lock_vector_lock(); 338 lock_vector_lock();
339 __setup_vector_irq(smp_processor_id()); 339 __setup_vector_irq(smp_processor_id());
340 cpu_set(smp_processor_id(), cpu_online_map); 340 cpu_set(smp_processor_id(), cpu_online_map);
341 unlock_vector_lock(); 341 unlock_vector_lock();
342 ipi_call_unlock_irq(); 342 ipi_call_unlock();
343 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 343 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
344 344
345 /* enable local interrupts */
346 local_irq_enable();
347
345 setup_secondary_clock(); 348 setup_secondary_clock();
346 349
347 wmb(); 350 wmb();
@@ -596,10 +599,12 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
596 * Give the other CPU some time to accept the IPI. 599 * Give the other CPU some time to accept the IPI.
597 */ 600 */
598 udelay(200); 601 udelay(200);
599 maxlvt = lapic_get_maxlvt(); 602 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
600 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 603 maxlvt = lapic_get_maxlvt();
601 apic_write(APIC_ESR, 0); 604 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
602 accept_status = (apic_read(APIC_ESR) & 0xEF); 605 apic_write(APIC_ESR, 0);
606 accept_status = (apic_read(APIC_ESR) & 0xEF);
607 }
603 pr_debug("NMI sent.\n"); 608 pr_debug("NMI sent.\n");
604 609
605 if (send_status) 610 if (send_status)
@@ -1256,39 +1261,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1256 check_nmi_watchdog(); 1261 check_nmi_watchdog();
1257} 1262}
1258 1263
1259#ifdef CONFIG_HOTPLUG_CPU
1260
1261static void remove_siblinginfo(int cpu)
1262{
1263 int sibling;
1264 struct cpuinfo_x86 *c = &cpu_data(cpu);
1265
1266 for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
1267 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1268 /*/
1269 * last thread sibling in this cpu core going down
1270 */
1271 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1272 cpu_data(sibling).booted_cores--;
1273 }
1274
1275 for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
1276 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1277 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1278 cpus_clear(per_cpu(cpu_core_map, cpu));
1279 c->phys_proc_id = 0;
1280 c->cpu_core_id = 0;
1281 cpu_clear(cpu, cpu_sibling_setup_map);
1282}
1283
1284static int additional_cpus __initdata = -1;
1285
1286static __init int setup_additional_cpus(char *s)
1287{
1288 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
1289}
1290early_param("additional_cpus", setup_additional_cpus);
1291
1292/* 1264/*
1293 * cpu_possible_map should be static, it cannot change as cpu's 1265 * cpu_possible_map should be static, it cannot change as cpu's
1294 * are onlined, or offlined. The reason is per-cpu data-structures 1266 * are onlined, or offlined. The reason is per-cpu data-structures
@@ -1308,21 +1280,13 @@ early_param("additional_cpus", setup_additional_cpus);
1308 */ 1280 */
1309__init void prefill_possible_map(void) 1281__init void prefill_possible_map(void)
1310{ 1282{
1311 int i; 1283 int i, possible;
1312 int possible;
1313 1284
1314 /* no processor from mptable or madt */ 1285 /* no processor from mptable or madt */
1315 if (!num_processors) 1286 if (!num_processors)
1316 num_processors = 1; 1287 num_processors = 1;
1317 1288
1318 if (additional_cpus == -1) { 1289 possible = num_processors + disabled_cpus;
1319 if (disabled_cpus > 0)
1320 additional_cpus = disabled_cpus;
1321 else
1322 additional_cpus = 0;
1323 }
1324
1325 possible = num_processors + additional_cpus;
1326 if (possible > NR_CPUS) 1290 if (possible > NR_CPUS)
1327 possible = NR_CPUS; 1291 possible = NR_CPUS;
1328 1292
@@ -1335,6 +1299,31 @@ __init void prefill_possible_map(void)
1335 nr_cpu_ids = possible; 1299 nr_cpu_ids = possible;
1336} 1300}
1337 1301
1302#ifdef CONFIG_HOTPLUG_CPU
1303
1304static void remove_siblinginfo(int cpu)
1305{
1306 int sibling;
1307 struct cpuinfo_x86 *c = &cpu_data(cpu);
1308
1309 for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
1310 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1311 /*/
1312 * last thread sibling in this cpu core going down
1313 */
1314 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1315 cpu_data(sibling).booted_cores--;
1316 }
1317
1318 for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
1319 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1320 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1321 cpus_clear(per_cpu(cpu_core_map, cpu));
1322 c->phys_proc_id = 0;
1323 c->cpu_core_id = 0;
1324 cpu_clear(cpu, cpu_sibling_setup_map);
1325}
1326
1338static void __ref remove_cpu_from_maps(int cpu) 1327static void __ref remove_cpu_from_maps(int cpu)
1339{ 1328{
1340 cpu_clear(cpu, cpu_online_map); 1329 cpu_clear(cpu, cpu_online_map);