diff options
Diffstat (limited to 'arch/x86/kernel/smpboot.c')
-rw-r--r-- | arch/x86/kernel/smpboot.c | 210 |
1 files changed, 117 insertions, 93 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7985c5b3f916..8c3aca7cb343 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <asm/desc.h> | 52 | #include <asm/desc.h> |
53 | #include <asm/nmi.h> | 53 | #include <asm/nmi.h> |
54 | #include <asm/irq.h> | 54 | #include <asm/irq.h> |
55 | #include <asm/idle.h> | ||
55 | #include <asm/smp.h> | 56 | #include <asm/smp.h> |
56 | #include <asm/trampoline.h> | 57 | #include <asm/trampoline.h> |
57 | #include <asm/cpu.h> | 58 | #include <asm/cpu.h> |
@@ -88,7 +89,7 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | |||
88 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | 89 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) |
89 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | 90 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) |
90 | #else | 91 | #else |
91 | struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | 92 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; |
92 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | 93 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) |
93 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | 94 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) |
94 | #endif | 95 | #endif |
@@ -123,13 +124,12 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); | |||
123 | 124 | ||
124 | static atomic_t init_deasserted; | 125 | static atomic_t init_deasserted; |
125 | 126 | ||
126 | static int boot_cpu_logical_apicid; | ||
127 | 127 | ||
128 | /* representing cpus for which sibling maps can be computed */ | 128 | /* representing cpus for which sibling maps can be computed */ |
129 | static cpumask_t cpu_sibling_setup_map; | 129 | static cpumask_t cpu_sibling_setup_map; |
130 | 130 | ||
131 | /* Set if we find a B stepping CPU */ | 131 | /* Set if we find a B stepping CPU */ |
132 | int __cpuinitdata smp_b_stepping; | 132 | static int __cpuinitdata smp_b_stepping; |
133 | 133 | ||
134 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) | 134 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
135 | 135 | ||
@@ -165,6 +165,8 @@ static void unmap_cpu_to_node(int cpu) | |||
165 | #endif | 165 | #endif |
166 | 166 | ||
167 | #ifdef CONFIG_X86_32 | 167 | #ifdef CONFIG_X86_32 |
168 | static int boot_cpu_logical_apicid; | ||
169 | |||
168 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = | 170 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = |
169 | { [0 ... NR_CPUS-1] = BAD_APICID }; | 171 | { [0 ... NR_CPUS-1] = BAD_APICID }; |
170 | 172 | ||
@@ -210,7 +212,7 @@ static void __cpuinit smp_callin(void) | |||
210 | /* | 212 | /* |
211 | * (This works even if the APIC is not enabled.) | 213 | * (This works even if the APIC is not enabled.) |
212 | */ | 214 | */ |
213 | phys_id = GET_APIC_ID(read_apic_id()); | 215 | phys_id = read_apic_id(); |
214 | cpuid = smp_processor_id(); | 216 | cpuid = smp_processor_id(); |
215 | if (cpu_isset(cpuid, cpu_callin_map)) { | 217 | if (cpu_isset(cpuid, cpu_callin_map)) { |
216 | panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, | 218 | panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, |
@@ -257,6 +259,7 @@ static void __cpuinit smp_callin(void) | |||
257 | end_local_APIC_setup(); | 259 | end_local_APIC_setup(); |
258 | map_cpu_to_logical_apicid(); | 260 | map_cpu_to_logical_apicid(); |
259 | 261 | ||
262 | notify_cpu_starting(cpuid); | ||
260 | /* | 263 | /* |
261 | * Get our bogomips. | 264 | * Get our bogomips. |
262 | * | 265 | * |
@@ -331,14 +334,17 @@ static void __cpuinit start_secondary(void *unused) | |||
331 | * does not change while we are assigning vectors to cpus. Holding | 334 | * does not change while we are assigning vectors to cpus. Holding |
332 | * this lock ensures we don't half assign or remove an irq from a cpu. | 335 | * this lock ensures we don't half assign or remove an irq from a cpu. |
333 | */ | 336 | */ |
334 | ipi_call_lock_irq(); | 337 | ipi_call_lock(); |
335 | lock_vector_lock(); | 338 | lock_vector_lock(); |
336 | __setup_vector_irq(smp_processor_id()); | 339 | __setup_vector_irq(smp_processor_id()); |
337 | cpu_set(smp_processor_id(), cpu_online_map); | 340 | cpu_set(smp_processor_id(), cpu_online_map); |
338 | unlock_vector_lock(); | 341 | unlock_vector_lock(); |
339 | ipi_call_unlock_irq(); | 342 | ipi_call_unlock(); |
340 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 343 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
341 | 344 | ||
345 | /* enable local interrupts */ | ||
346 | local_irq_enable(); | ||
347 | |||
342 | setup_secondary_clock(); | 348 | setup_secondary_clock(); |
343 | 349 | ||
344 | wmb(); | 350 | wmb(); |
@@ -550,8 +556,7 @@ static inline void __inquire_remote_apic(int apicid) | |||
550 | printk(KERN_CONT | 556 | printk(KERN_CONT |
551 | "a previous APIC delivery may have failed\n"); | 557 | "a previous APIC delivery may have failed\n"); |
552 | 558 | ||
553 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); | 559 | apic_icr_write(APIC_DM_REMRD | regs[i], apicid); |
554 | apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]); | ||
555 | 560 | ||
556 | timeout = 0; | 561 | timeout = 0; |
557 | do { | 562 | do { |
@@ -583,11 +588,9 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) | |||
583 | int maxlvt; | 588 | int maxlvt; |
584 | 589 | ||
585 | /* Target chip */ | 590 | /* Target chip */ |
586 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid)); | ||
587 | |||
588 | /* Boot on the stack */ | 591 | /* Boot on the stack */ |
589 | /* Kick the second */ | 592 | /* Kick the second */ |
590 | apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); | 593 | apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid); |
591 | 594 | ||
592 | pr_debug("Waiting for send to finish...\n"); | 595 | pr_debug("Waiting for send to finish...\n"); |
593 | send_status = safe_apic_wait_icr_idle(); | 596 | send_status = safe_apic_wait_icr_idle(); |
@@ -596,10 +599,12 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) | |||
596 | * Give the other CPU some time to accept the IPI. | 599 | * Give the other CPU some time to accept the IPI. |
597 | */ | 600 | */ |
598 | udelay(200); | 601 | udelay(200); |
599 | maxlvt = lapic_get_maxlvt(); | 602 | if (APIC_INTEGRATED(apic_version[phys_apicid])) { |
600 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | 603 | maxlvt = lapic_get_maxlvt(); |
601 | apic_write(APIC_ESR, 0); | 604 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
602 | accept_status = (apic_read(APIC_ESR) & 0xEF); | 605 | apic_write(APIC_ESR, 0); |
606 | accept_status = (apic_read(APIC_ESR) & 0xEF); | ||
607 | } | ||
603 | pr_debug("NMI sent.\n"); | 608 | pr_debug("NMI sent.\n"); |
604 | 609 | ||
605 | if (send_status) | 610 | if (send_status) |
@@ -640,13 +645,11 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
640 | /* | 645 | /* |
641 | * Turn INIT on target chip | 646 | * Turn INIT on target chip |
642 | */ | 647 | */ |
643 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
644 | |||
645 | /* | 648 | /* |
646 | * Send IPI | 649 | * Send IPI |
647 | */ | 650 | */ |
648 | apic_write(APIC_ICR, | 651 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT, |
649 | APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT); | 652 | phys_apicid); |
650 | 653 | ||
651 | pr_debug("Waiting for send to finish...\n"); | 654 | pr_debug("Waiting for send to finish...\n"); |
652 | send_status = safe_apic_wait_icr_idle(); | 655 | send_status = safe_apic_wait_icr_idle(); |
@@ -656,10 +659,8 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
656 | pr_debug("Deasserting INIT.\n"); | 659 | pr_debug("Deasserting INIT.\n"); |
657 | 660 | ||
658 | /* Target chip */ | 661 | /* Target chip */ |
659 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
660 | |||
661 | /* Send IPI */ | 662 | /* Send IPI */ |
662 | apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); | 663 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid); |
663 | 664 | ||
664 | pr_debug("Waiting for send to finish...\n"); | 665 | pr_debug("Waiting for send to finish...\n"); |
665 | send_status = safe_apic_wait_icr_idle(); | 666 | send_status = safe_apic_wait_icr_idle(); |
@@ -702,11 +703,10 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
702 | */ | 703 | */ |
703 | 704 | ||
704 | /* Target chip */ | 705 | /* Target chip */ |
705 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
706 | |||
707 | /* Boot on the stack */ | 706 | /* Boot on the stack */ |
708 | /* Kick the second */ | 707 | /* Kick the second */ |
709 | apic_write(APIC_ICR, APIC_DM_STARTUP | (start_eip >> 12)); | 708 | apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), |
709 | phys_apicid); | ||
710 | 710 | ||
711 | /* | 711 | /* |
712 | * Give the other CPU some time to accept the IPI. | 712 | * Give the other CPU some time to accept the IPI. |
@@ -1175,10 +1175,17 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1175 | * Setup boot CPU information | 1175 | * Setup boot CPU information |
1176 | */ | 1176 | */ |
1177 | smp_store_cpu_info(0); /* Final full version of the data */ | 1177 | smp_store_cpu_info(0); /* Final full version of the data */ |
1178 | #ifdef CONFIG_X86_32 | ||
1178 | boot_cpu_logical_apicid = logical_smp_processor_id(); | 1179 | boot_cpu_logical_apicid = logical_smp_processor_id(); |
1180 | #endif | ||
1179 | current_thread_info()->cpu = 0; /* needed? */ | 1181 | current_thread_info()->cpu = 0; /* needed? */ |
1180 | set_cpu_sibling_map(0); | 1182 | set_cpu_sibling_map(0); |
1181 | 1183 | ||
1184 | #ifdef CONFIG_X86_64 | ||
1185 | enable_IR_x2apic(); | ||
1186 | setup_apic_routing(); | ||
1187 | #endif | ||
1188 | |||
1182 | if (smp_sanity_check(max_cpus) < 0) { | 1189 | if (smp_sanity_check(max_cpus) < 0) { |
1183 | printk(KERN_INFO "SMP disabled\n"); | 1190 | printk(KERN_INFO "SMP disabled\n"); |
1184 | disable_smp(); | 1191 | disable_smp(); |
@@ -1186,9 +1193,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1186 | } | 1193 | } |
1187 | 1194 | ||
1188 | preempt_disable(); | 1195 | preempt_disable(); |
1189 | if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) { | 1196 | if (read_apic_id() != boot_cpu_physical_apicid) { |
1190 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", | 1197 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", |
1191 | GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid); | 1198 | read_apic_id(), boot_cpu_physical_apicid); |
1192 | /* Or can we switch back to PIC here? */ | 1199 | /* Or can we switch back to PIC here? */ |
1193 | } | 1200 | } |
1194 | preempt_enable(); | 1201 | preempt_enable(); |
@@ -1254,39 +1261,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus) | |||
1254 | check_nmi_watchdog(); | 1261 | check_nmi_watchdog(); |
1255 | } | 1262 | } |
1256 | 1263 | ||
1257 | #ifdef CONFIG_HOTPLUG_CPU | ||
1258 | |||
1259 | static void remove_siblinginfo(int cpu) | ||
1260 | { | ||
1261 | int sibling; | ||
1262 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
1263 | |||
1264 | for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { | ||
1265 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); | ||
1266 | /*/ | ||
1267 | * last thread sibling in this cpu core going down | ||
1268 | */ | ||
1269 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) | ||
1270 | cpu_data(sibling).booted_cores--; | ||
1271 | } | ||
1272 | |||
1273 | for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) | ||
1274 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); | ||
1275 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | ||
1276 | cpus_clear(per_cpu(cpu_core_map, cpu)); | ||
1277 | c->phys_proc_id = 0; | ||
1278 | c->cpu_core_id = 0; | ||
1279 | cpu_clear(cpu, cpu_sibling_setup_map); | ||
1280 | } | ||
1281 | |||
1282 | static int additional_cpus __initdata = -1; | ||
1283 | |||
1284 | static __init int setup_additional_cpus(char *s) | ||
1285 | { | ||
1286 | return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL; | ||
1287 | } | ||
1288 | early_param("additional_cpus", setup_additional_cpus); | ||
1289 | |||
1290 | /* | 1264 | /* |
1291 | * cpu_possible_map should be static, it cannot change as cpu's | 1265 | * cpu_possible_map should be static, it cannot change as cpu's |
1292 | * are onlined, or offlined. The reason is per-cpu data-structures | 1266 | * are onlined, or offlined. The reason is per-cpu data-structures |
@@ -1306,24 +1280,13 @@ early_param("additional_cpus", setup_additional_cpus); | |||
1306 | */ | 1280 | */ |
1307 | __init void prefill_possible_map(void) | 1281 | __init void prefill_possible_map(void) |
1308 | { | 1282 | { |
1309 | int i; | 1283 | int i, possible; |
1310 | int possible; | ||
1311 | 1284 | ||
1312 | /* no processor from mptable or madt */ | 1285 | /* no processor from mptable or madt */ |
1313 | if (!num_processors) | 1286 | if (!num_processors) |
1314 | num_processors = 1; | 1287 | num_processors = 1; |
1315 | 1288 | ||
1316 | #ifdef CONFIG_HOTPLUG_CPU | 1289 | possible = num_processors + disabled_cpus; |
1317 | if (additional_cpus == -1) { | ||
1318 | if (disabled_cpus > 0) | ||
1319 | additional_cpus = disabled_cpus; | ||
1320 | else | ||
1321 | additional_cpus = 0; | ||
1322 | } | ||
1323 | #else | ||
1324 | additional_cpus = 0; | ||
1325 | #endif | ||
1326 | possible = num_processors + additional_cpus; | ||
1327 | if (possible > NR_CPUS) | 1290 | if (possible > NR_CPUS) |
1328 | possible = NR_CPUS; | 1291 | possible = NR_CPUS; |
1329 | 1292 | ||
@@ -1336,6 +1299,31 @@ __init void prefill_possible_map(void) | |||
1336 | nr_cpu_ids = possible; | 1299 | nr_cpu_ids = possible; |
1337 | } | 1300 | } |
1338 | 1301 | ||
1302 | #ifdef CONFIG_HOTPLUG_CPU | ||
1303 | |||
1304 | static void remove_siblinginfo(int cpu) | ||
1305 | { | ||
1306 | int sibling; | ||
1307 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
1308 | |||
1309 | for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { | ||
1310 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); | ||
1311 | /*/ | ||
1312 | * last thread sibling in this cpu core going down | ||
1313 | */ | ||
1314 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) | ||
1315 | cpu_data(sibling).booted_cores--; | ||
1316 | } | ||
1317 | |||
1318 | for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) | ||
1319 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); | ||
1320 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | ||
1321 | cpus_clear(per_cpu(cpu_core_map, cpu)); | ||
1322 | c->phys_proc_id = 0; | ||
1323 | c->cpu_core_id = 0; | ||
1324 | cpu_clear(cpu, cpu_sibling_setup_map); | ||
1325 | } | ||
1326 | |||
1339 | static void __ref remove_cpu_from_maps(int cpu) | 1327 | static void __ref remove_cpu_from_maps(int cpu) |
1340 | { | 1328 | { |
1341 | cpu_clear(cpu, cpu_online_map); | 1329 | cpu_clear(cpu, cpu_online_map); |
@@ -1346,25 +1334,9 @@ static void __ref remove_cpu_from_maps(int cpu) | |||
1346 | numa_remove_cpu(cpu); | 1334 | numa_remove_cpu(cpu); |
1347 | } | 1335 | } |
1348 | 1336 | ||
1349 | int __cpu_disable(void) | 1337 | void cpu_disable_common(void) |
1350 | { | 1338 | { |
1351 | int cpu = smp_processor_id(); | 1339 | int cpu = smp_processor_id(); |
1352 | |||
1353 | /* | ||
1354 | * Perhaps use cpufreq to drop frequency, but that could go | ||
1355 | * into generic code. | ||
1356 | * | ||
1357 | * We won't take down the boot processor on i386 due to some | ||
1358 | * interrupts only being able to be serviced by the BSP. | ||
1359 | * Especially so if we're not using an IOAPIC -zwane | ||
1360 | */ | ||
1361 | if (cpu == 0) | ||
1362 | return -EBUSY; | ||
1363 | |||
1364 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
1365 | stop_apic_nmi_watchdog(NULL); | ||
1366 | clear_local_APIC(); | ||
1367 | |||
1368 | /* | 1340 | /* |
1369 | * HACK: | 1341 | * HACK: |
1370 | * Allow any queued timer interrupts to get serviced | 1342 | * Allow any queued timer interrupts to get serviced |
@@ -1382,10 +1354,32 @@ int __cpu_disable(void) | |||
1382 | remove_cpu_from_maps(cpu); | 1354 | remove_cpu_from_maps(cpu); |
1383 | unlock_vector_lock(); | 1355 | unlock_vector_lock(); |
1384 | fixup_irqs(cpu_online_map); | 1356 | fixup_irqs(cpu_online_map); |
1357 | } | ||
1358 | |||
1359 | int native_cpu_disable(void) | ||
1360 | { | ||
1361 | int cpu = smp_processor_id(); | ||
1362 | |||
1363 | /* | ||
1364 | * Perhaps use cpufreq to drop frequency, but that could go | ||
1365 | * into generic code. | ||
1366 | * | ||
1367 | * We won't take down the boot processor on i386 due to some | ||
1368 | * interrupts only being able to be serviced by the BSP. | ||
1369 | * Especially so if we're not using an IOAPIC -zwane | ||
1370 | */ | ||
1371 | if (cpu == 0) | ||
1372 | return -EBUSY; | ||
1373 | |||
1374 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
1375 | stop_apic_nmi_watchdog(NULL); | ||
1376 | clear_local_APIC(); | ||
1377 | |||
1378 | cpu_disable_common(); | ||
1385 | return 0; | 1379 | return 0; |
1386 | } | 1380 | } |
1387 | 1381 | ||
1388 | void __cpu_die(unsigned int cpu) | 1382 | void native_cpu_die(unsigned int cpu) |
1389 | { | 1383 | { |
1390 | /* We don't do anything here: idle task is faking death itself. */ | 1384 | /* We don't do anything here: idle task is faking death itself. */ |
1391 | unsigned int i; | 1385 | unsigned int i; |
@@ -1402,15 +1396,45 @@ void __cpu_die(unsigned int cpu) | |||
1402 | } | 1396 | } |
1403 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | 1397 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
1404 | } | 1398 | } |
1399 | |||
1400 | void play_dead_common(void) | ||
1401 | { | ||
1402 | idle_task_exit(); | ||
1403 | reset_lazy_tlbstate(); | ||
1404 | irq_ctx_exit(raw_smp_processor_id()); | ||
1405 | c1e_remove_cpu(raw_smp_processor_id()); | ||
1406 | |||
1407 | mb(); | ||
1408 | /* Ack it */ | ||
1409 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
1410 | |||
1411 | /* | ||
1412 | * With physical CPU hotplug, we should halt the cpu | ||
1413 | */ | ||
1414 | local_irq_disable(); | ||
1415 | } | ||
1416 | |||
1417 | void native_play_dead(void) | ||
1418 | { | ||
1419 | play_dead_common(); | ||
1420 | wbinvd_halt(); | ||
1421 | } | ||
1422 | |||
1405 | #else /* ... !CONFIG_HOTPLUG_CPU */ | 1423 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
1406 | int __cpu_disable(void) | 1424 | int native_cpu_disable(void) |
1407 | { | 1425 | { |
1408 | return -ENOSYS; | 1426 | return -ENOSYS; |
1409 | } | 1427 | } |
1410 | 1428 | ||
1411 | void __cpu_die(unsigned int cpu) | 1429 | void native_cpu_die(unsigned int cpu) |
1412 | { | 1430 | { |
1413 | /* We said "no" in __cpu_disable */ | 1431 | /* We said "no" in __cpu_disable */ |
1414 | BUG(); | 1432 | BUG(); |
1415 | } | 1433 | } |
1434 | |||
1435 | void native_play_dead(void) | ||
1436 | { | ||
1437 | BUG(); | ||
1438 | } | ||
1439 | |||
1416 | #endif | 1440 | #endif |