aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/smpboot.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/smpboot.c')
-rw-r--r--arch/i386/kernel/smpboot.c349
1 files changed, 282 insertions, 67 deletions
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index c20d96d5c15c..d66bf489a2e9 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -44,6 +44,9 @@
44#include <linux/smp_lock.h> 44#include <linux/smp_lock.h>
45#include <linux/irq.h> 45#include <linux/irq.h>
46#include <linux/bootmem.h> 46#include <linux/bootmem.h>
47#include <linux/notifier.h>
48#include <linux/cpu.h>
49#include <linux/percpu.h>
47 50
48#include <linux/delay.h> 51#include <linux/delay.h>
49#include <linux/mc146818rtc.h> 52#include <linux/mc146818rtc.h>
@@ -56,18 +59,28 @@
56#include <smpboot_hooks.h> 59#include <smpboot_hooks.h>
57 60
58/* Set if we find a B stepping CPU */ 61/* Set if we find a B stepping CPU */
59static int __initdata smp_b_stepping; 62static int __devinitdata smp_b_stepping;
60 63
61/* Number of siblings per CPU package */ 64/* Number of siblings per CPU package */
62int smp_num_siblings = 1; 65int smp_num_siblings = 1;
63#ifdef CONFIG_X86_HT 66#ifdef CONFIG_X86_HT
64EXPORT_SYMBOL(smp_num_siblings); 67EXPORT_SYMBOL(smp_num_siblings);
65#endif 68#endif
66int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ 69
70/* Package ID of each logical CPU */
71int phys_proc_id[NR_CPUS] = {[0 ... NR_CPUS-1] = BAD_APICID};
67EXPORT_SYMBOL(phys_proc_id); 72EXPORT_SYMBOL(phys_proc_id);
68int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */ 73
74/* Core ID of each logical CPU */
75int cpu_core_id[NR_CPUS] = {[0 ... NR_CPUS-1] = BAD_APICID};
69EXPORT_SYMBOL(cpu_core_id); 76EXPORT_SYMBOL(cpu_core_id);
70 77
78cpumask_t cpu_sibling_map[NR_CPUS];
79EXPORT_SYMBOL(cpu_sibling_map);
80
81cpumask_t cpu_core_map[NR_CPUS];
82EXPORT_SYMBOL(cpu_core_map);
83
71/* bitmap of online cpus */ 84/* bitmap of online cpus */
72cpumask_t cpu_online_map; 85cpumask_t cpu_online_map;
73EXPORT_SYMBOL(cpu_online_map); 86EXPORT_SYMBOL(cpu_online_map);
@@ -77,6 +90,12 @@ cpumask_t cpu_callout_map;
77EXPORT_SYMBOL(cpu_callout_map); 90EXPORT_SYMBOL(cpu_callout_map);
78static cpumask_t smp_commenced_mask; 91static cpumask_t smp_commenced_mask;
79 92
93/* TSC's upper 32 bits can't be written in eariler CPU (before prescott), there
94 * is no way to resync one AP against BP. TBD: for prescott and above, we
95 * should use IA64's algorithm
96 */
97static int __devinitdata tsc_sync_disabled;
98
80/* Per CPU bogomips and other parameters */ 99/* Per CPU bogomips and other parameters */
81struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 100struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
82EXPORT_SYMBOL(cpu_data); 101EXPORT_SYMBOL(cpu_data);
@@ -96,13 +115,16 @@ static int trampoline_exec;
96 115
97static void map_cpu_to_logical_apicid(void); 116static void map_cpu_to_logical_apicid(void);
98 117
118/* State of each CPU. */
119DEFINE_PER_CPU(int, cpu_state) = { 0 };
120
99/* 121/*
100 * Currently trivial. Write the real->protected mode 122 * Currently trivial. Write the real->protected mode
101 * bootstrap into the page concerned. The caller 123 * bootstrap into the page concerned. The caller
102 * has made sure it's suitably aligned. 124 * has made sure it's suitably aligned.
103 */ 125 */
104 126
105static unsigned long __init setup_trampoline(void) 127static unsigned long __devinit setup_trampoline(void)
106{ 128{
107 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); 129 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
108 return virt_to_phys(trampoline_base); 130 return virt_to_phys(trampoline_base);
@@ -132,7 +154,7 @@ void __init smp_alloc_memory(void)
132 * a given CPU 154 * a given CPU
133 */ 155 */
134 156
135static void __init smp_store_cpu_info(int id) 157static void __devinit smp_store_cpu_info(int id)
136{ 158{
137 struct cpuinfo_x86 *c = cpu_data + id; 159 struct cpuinfo_x86 *c = cpu_data + id;
138 160
@@ -326,7 +348,7 @@ extern void calibrate_delay(void);
326 348
327static atomic_t init_deasserted; 349static atomic_t init_deasserted;
328 350
329static void __init smp_callin(void) 351static void __devinit smp_callin(void)
330{ 352{
331 int cpuid, phys_id; 353 int cpuid, phys_id;
332 unsigned long timeout; 354 unsigned long timeout;
@@ -411,16 +433,48 @@ static void __init smp_callin(void)
411 /* 433 /*
412 * Synchronize the TSC with the BP 434 * Synchronize the TSC with the BP
413 */ 435 */
414 if (cpu_has_tsc && cpu_khz) 436 if (cpu_has_tsc && cpu_khz && !tsc_sync_disabled)
415 synchronize_tsc_ap(); 437 synchronize_tsc_ap();
416} 438}
417 439
418static int cpucount; 440static int cpucount;
419 441
442static inline void
443set_cpu_sibling_map(int cpu)
444{
445 int i;
446
447 if (smp_num_siblings > 1) {
448 for (i = 0; i < NR_CPUS; i++) {
449 if (!cpu_isset(i, cpu_callout_map))
450 continue;
451 if (cpu_core_id[cpu] == cpu_core_id[i]) {
452 cpu_set(i, cpu_sibling_map[cpu]);
453 cpu_set(cpu, cpu_sibling_map[i]);
454 }
455 }
456 } else {
457 cpu_set(cpu, cpu_sibling_map[cpu]);
458 }
459
460 if (current_cpu_data.x86_num_cores > 1) {
461 for (i = 0; i < NR_CPUS; i++) {
462 if (!cpu_isset(i, cpu_callout_map))
463 continue;
464 if (phys_proc_id[cpu] == phys_proc_id[i]) {
465 cpu_set(i, cpu_core_map[cpu]);
466 cpu_set(cpu, cpu_core_map[i]);
467 }
468 }
469 } else {
470 cpu_core_map[cpu] = cpu_sibling_map[cpu];
471 }
472}
473
420/* 474/*
421 * Activate a secondary processor. 475 * Activate a secondary processor.
422 */ 476 */
423static void __init start_secondary(void *unused) 477static void __devinit start_secondary(void *unused)
424{ 478{
425 /* 479 /*
426 * Dont put anything before smp_callin(), SMP 480 * Dont put anything before smp_callin(), SMP
@@ -443,7 +497,23 @@ static void __init start_secondary(void *unused)
443 * the local TLBs too. 497 * the local TLBs too.
444 */ 498 */
445 local_flush_tlb(); 499 local_flush_tlb();
500
501 /* This must be done before setting cpu_online_map */
502 set_cpu_sibling_map(raw_smp_processor_id());
503 wmb();
504
505 /*
506 * We need to hold call_lock, so there is no inconsistency
507 * between the time smp_call_function() determines number of
508 * IPI receipients, and the time when the determination is made
509 * for which cpus receive the IPI. Holding this
510 * lock helps us to not include this cpu in a currently in progress
511 * smp_call_function().
512 */
513 lock_ipi_call_lock();
446 cpu_set(smp_processor_id(), cpu_online_map); 514 cpu_set(smp_processor_id(), cpu_online_map);
515 unlock_ipi_call_lock();
516 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
447 517
448 /* We can take interrupts now: we're officially "up". */ 518 /* We can take interrupts now: we're officially "up". */
449 local_irq_enable(); 519 local_irq_enable();
@@ -458,7 +528,7 @@ static void __init start_secondary(void *unused)
458 * from the task structure 528 * from the task structure
459 * This function must not return. 529 * This function must not return.
460 */ 530 */
461void __init initialize_secondary(void) 531void __devinit initialize_secondary(void)
462{ 532{
463 /* 533 /*
464 * We don't actually need to load the full TSS, 534 * We don't actually need to load the full TSS,
@@ -572,7 +642,7 @@ static inline void __inquire_remote_apic(int apicid)
572 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this 642 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
573 * won't ... remember to clear down the APIC, etc later. 643 * won't ... remember to clear down the APIC, etc later.
574 */ 644 */
575static int __init 645static int __devinit
576wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) 646wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
577{ 647{
578 unsigned long send_status = 0, accept_status = 0; 648 unsigned long send_status = 0, accept_status = 0;
@@ -618,7 +688,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
618#endif /* WAKE_SECONDARY_VIA_NMI */ 688#endif /* WAKE_SECONDARY_VIA_NMI */
619 689
620#ifdef WAKE_SECONDARY_VIA_INIT 690#ifdef WAKE_SECONDARY_VIA_INIT
621static int __init 691static int __devinit
622wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) 692wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
623{ 693{
624 unsigned long send_status = 0, accept_status = 0; 694 unsigned long send_status = 0, accept_status = 0;
@@ -753,8 +823,43 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
753#endif /* WAKE_SECONDARY_VIA_INIT */ 823#endif /* WAKE_SECONDARY_VIA_INIT */
754 824
755extern cpumask_t cpu_initialized; 825extern cpumask_t cpu_initialized;
826static inline int alloc_cpu_id(void)
827{
828 cpumask_t tmp_map;
829 int cpu;
830 cpus_complement(tmp_map, cpu_present_map);
831 cpu = first_cpu(tmp_map);
832 if (cpu >= NR_CPUS)
833 return -ENODEV;
834 return cpu;
835}
836
837#ifdef CONFIG_HOTPLUG_CPU
838static struct task_struct * __devinitdata cpu_idle_tasks[NR_CPUS];
839static inline struct task_struct * alloc_idle_task(int cpu)
840{
841 struct task_struct *idle;
842
843 if ((idle = cpu_idle_tasks[cpu]) != NULL) {
844 /* initialize thread_struct. we really want to avoid destroy
845 * idle tread
846 */
847 idle->thread.esp = (unsigned long)(((struct pt_regs *)
848 (THREAD_SIZE + (unsigned long) idle->thread_info)) - 1);
849 init_idle(idle, cpu);
850 return idle;
851 }
852 idle = fork_idle(cpu);
853
854 if (!IS_ERR(idle))
855 cpu_idle_tasks[cpu] = idle;
856 return idle;
857}
858#else
859#define alloc_idle_task(cpu) fork_idle(cpu)
860#endif
756 861
757static int __init do_boot_cpu(int apicid) 862static int __devinit do_boot_cpu(int apicid, int cpu)
758/* 863/*
759 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 864 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
760 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 865 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -763,16 +868,17 @@ static int __init do_boot_cpu(int apicid)
763{ 868{
764 struct task_struct *idle; 869 struct task_struct *idle;
765 unsigned long boot_error; 870 unsigned long boot_error;
766 int timeout, cpu; 871 int timeout;
767 unsigned long start_eip; 872 unsigned long start_eip;
768 unsigned short nmi_high = 0, nmi_low = 0; 873 unsigned short nmi_high = 0, nmi_low = 0;
769 874
770 cpu = ++cpucount; 875 ++cpucount;
876
771 /* 877 /*
772 * We can't use kernel_thread since we must avoid to 878 * We can't use kernel_thread since we must avoid to
773 * reschedule the child. 879 * reschedule the child.
774 */ 880 */
775 idle = fork_idle(cpu); 881 idle = alloc_idle_task(cpu);
776 if (IS_ERR(idle)) 882 if (IS_ERR(idle))
777 panic("failed fork for CPU %d", cpu); 883 panic("failed fork for CPU %d", cpu);
778 idle->thread.eip = (unsigned long) start_secondary; 884 idle->thread.eip = (unsigned long) start_secondary;
@@ -839,13 +945,16 @@ static int __init do_boot_cpu(int apicid)
839 inquire_remote_apic(apicid); 945 inquire_remote_apic(apicid);
840 } 946 }
841 } 947 }
842 x86_cpu_to_apicid[cpu] = apicid; 948
843 if (boot_error) { 949 if (boot_error) {
844 /* Try to put things back the way they were before ... */ 950 /* Try to put things back the way they were before ... */
845 unmap_cpu_to_logical_apicid(cpu); 951 unmap_cpu_to_logical_apicid(cpu);
846 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ 952 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
847 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ 953 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
848 cpucount--; 954 cpucount--;
955 } else {
956 x86_cpu_to_apicid[cpu] = apicid;
957 cpu_set(cpu, cpu_present_map);
849 } 958 }
850 959
851 /* mark "stuck" area as not stuck */ 960 /* mark "stuck" area as not stuck */
@@ -854,6 +963,75 @@ static int __init do_boot_cpu(int apicid)
854 return boot_error; 963 return boot_error;
855} 964}
856 965
966#ifdef CONFIG_HOTPLUG_CPU
967void cpu_exit_clear(void)
968{
969 int cpu = raw_smp_processor_id();
970
971 idle_task_exit();
972
973 cpucount --;
974 cpu_uninit();
975 irq_ctx_exit(cpu);
976
977 cpu_clear(cpu, cpu_callout_map);
978 cpu_clear(cpu, cpu_callin_map);
979 cpu_clear(cpu, cpu_present_map);
980
981 cpu_clear(cpu, smp_commenced_mask);
982 unmap_cpu_to_logical_apicid(cpu);
983}
984
985struct warm_boot_cpu_info {
986 struct completion *complete;
987 int apicid;
988 int cpu;
989};
990
991static void __devinit do_warm_boot_cpu(void *p)
992{
993 struct warm_boot_cpu_info *info = p;
994 do_boot_cpu(info->apicid, info->cpu);
995 complete(info->complete);
996}
997
998int __devinit smp_prepare_cpu(int cpu)
999{
1000 DECLARE_COMPLETION(done);
1001 struct warm_boot_cpu_info info;
1002 struct work_struct task;
1003 int apicid, ret;
1004
1005 lock_cpu_hotplug();
1006 apicid = x86_cpu_to_apicid[cpu];
1007 if (apicid == BAD_APICID) {
1008 ret = -ENODEV;
1009 goto exit;
1010 }
1011
1012 info.complete = &done;
1013 info.apicid = apicid;
1014 info.cpu = cpu;
1015 INIT_WORK(&task, do_warm_boot_cpu, &info);
1016
1017 tsc_sync_disabled = 1;
1018
1019 /* init low mem mapping */
1020 memcpy(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
1021 sizeof(swapper_pg_dir[0]) * KERNEL_PGD_PTRS);
1022 flush_tlb_all();
1023 schedule_work(&task);
1024 wait_for_completion(&done);
1025
1026 tsc_sync_disabled = 0;
1027 zap_low_mappings();
1028 ret = 0;
1029exit:
1030 unlock_cpu_hotplug();
1031 return ret;
1032}
1033#endif
1034
857static void smp_tune_scheduling (void) 1035static void smp_tune_scheduling (void)
858{ 1036{
859 unsigned long cachesize; /* kB */ 1037 unsigned long cachesize; /* kB */
@@ -895,13 +1073,6 @@ void *xquad_portio;
895EXPORT_SYMBOL(xquad_portio); 1073EXPORT_SYMBOL(xquad_portio);
896#endif 1074#endif
897 1075
898cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
899#ifdef CONFIG_X86_HT
900EXPORT_SYMBOL(cpu_sibling_map);
901#endif
902cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
903EXPORT_SYMBOL(cpu_core_map);
904
905static void __init smp_boot_cpus(unsigned int max_cpus) 1076static void __init smp_boot_cpus(unsigned int max_cpus)
906{ 1077{
907 int apicid, cpu, bit, kicked; 1078 int apicid, cpu, bit, kicked;
@@ -1013,7 +1184,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1013 if (max_cpus <= cpucount+1) 1184 if (max_cpus <= cpucount+1)
1014 continue; 1185 continue;
1015 1186
1016 if (do_boot_cpu(apicid)) 1187 if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
1017 printk("CPU #%d not responding - cannot use it.\n", 1188 printk("CPU #%d not responding - cannot use it.\n",
1018 apicid); 1189 apicid);
1019 else 1190 else
@@ -1065,44 +1236,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1065 cpus_clear(cpu_core_map[cpu]); 1236 cpus_clear(cpu_core_map[cpu]);
1066 } 1237 }
1067 1238
1068 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1239 cpu_set(0, cpu_sibling_map[0]);
1069 struct cpuinfo_x86 *c = cpu_data + cpu; 1240 cpu_set(0, cpu_core_map[0]);
1070 int siblings = 0;
1071 int i;
1072 if (!cpu_isset(cpu, cpu_callout_map))
1073 continue;
1074
1075 if (smp_num_siblings > 1) {
1076 for (i = 0; i < NR_CPUS; i++) {
1077 if (!cpu_isset(i, cpu_callout_map))
1078 continue;
1079 if (cpu_core_id[cpu] == cpu_core_id[i]) {
1080 siblings++;
1081 cpu_set(i, cpu_sibling_map[cpu]);
1082 }
1083 }
1084 } else {
1085 siblings++;
1086 cpu_set(cpu, cpu_sibling_map[cpu]);
1087 }
1088
1089 if (siblings != smp_num_siblings) {
1090 printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
1091 smp_num_siblings = siblings;
1092 }
1093
1094 if (c->x86_num_cores > 1) {
1095 for (i = 0; i < NR_CPUS; i++) {
1096 if (!cpu_isset(i, cpu_callout_map))
1097 continue;
1098 if (phys_proc_id[cpu] == phys_proc_id[i]) {
1099 cpu_set(i, cpu_core_map[cpu]);
1100 }
1101 }
1102 } else {
1103 cpu_core_map[cpu] = cpu_sibling_map[cpu];
1104 }
1105 }
1106 1241
1107 smpboot_setup_io_apic(); 1242 smpboot_setup_io_apic();
1108 1243
@@ -1119,6 +1254,9 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1119 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ 1254 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1120void __init smp_prepare_cpus(unsigned int max_cpus) 1255void __init smp_prepare_cpus(unsigned int max_cpus)
1121{ 1256{
1257 smp_commenced_mask = cpumask_of_cpu(0);
1258 cpu_callin_map = cpumask_of_cpu(0);
1259 mb();
1122 smp_boot_cpus(max_cpus); 1260 smp_boot_cpus(max_cpus);
1123} 1261}
1124 1262
@@ -1126,23 +1264,98 @@ void __devinit smp_prepare_boot_cpu(void)
1126{ 1264{
1127 cpu_set(smp_processor_id(), cpu_online_map); 1265 cpu_set(smp_processor_id(), cpu_online_map);
1128 cpu_set(smp_processor_id(), cpu_callout_map); 1266 cpu_set(smp_processor_id(), cpu_callout_map);
1267 cpu_set(smp_processor_id(), cpu_present_map);
1268 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
1129} 1269}
1130 1270
1131int __devinit __cpu_up(unsigned int cpu) 1271#ifdef CONFIG_HOTPLUG_CPU
1272static void
1273remove_siblinginfo(int cpu)
1132{ 1274{
1133 /* This only works at boot for x86. See "rewrite" above. */ 1275 int sibling;
1134 if (cpu_isset(cpu, smp_commenced_mask)) { 1276
1135 local_irq_enable(); 1277 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1136 return -ENOSYS; 1278 cpu_clear(cpu, cpu_sibling_map[sibling]);
1279 for_each_cpu_mask(sibling, cpu_core_map[cpu])
1280 cpu_clear(cpu, cpu_core_map[sibling]);
1281 cpus_clear(cpu_sibling_map[cpu]);
1282 cpus_clear(cpu_core_map[cpu]);
1283 phys_proc_id[cpu] = BAD_APICID;
1284 cpu_core_id[cpu] = BAD_APICID;
1285}
1286
1287int __cpu_disable(void)
1288{
1289 cpumask_t map = cpu_online_map;
1290 int cpu = smp_processor_id();
1291
1292 /*
1293 * Perhaps use cpufreq to drop frequency, but that could go
1294 * into generic code.
1295 *
1296 * We won't take down the boot processor on i386 due to some
1297 * interrupts only being able to be serviced by the BSP.
1298 * Especially so if we're not using an IOAPIC -zwane
1299 */
1300 if (cpu == 0)
1301 return -EBUSY;
1302
1303 /* We enable the timer again on the exit path of the death loop */
1304 disable_APIC_timer();
1305 /* Allow any queued timer interrupts to get serviced */
1306 local_irq_enable();
1307 mdelay(1);
1308 local_irq_disable();
1309
1310 remove_siblinginfo(cpu);
1311
1312 cpu_clear(cpu, map);
1313 fixup_irqs(map);
1314 /* It's now safe to remove this processor from the online map */
1315 cpu_clear(cpu, cpu_online_map);
1316 return 0;
1317}
1318
1319void __cpu_die(unsigned int cpu)
1320{
1321 /* We don't do anything here: idle task is faking death itself. */
1322 unsigned int i;
1323
1324 for (i = 0; i < 10; i++) {
1325 /* They ack this in play_dead by setting CPU_DEAD */
1326 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1327 printk ("CPU %d is now offline\n", cpu);
1328 return;
1329 }
1330 current->state = TASK_UNINTERRUPTIBLE;
1331 schedule_timeout(HZ/10);
1137 } 1332 }
1333 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1334}
1335#else /* ... !CONFIG_HOTPLUG_CPU */
1336int __cpu_disable(void)
1337{
1338 return -ENOSYS;
1339}
1340
1341void __cpu_die(unsigned int cpu)
1342{
1343 /* We said "no" in __cpu_disable */
1344 BUG();
1345}
1346#endif /* CONFIG_HOTPLUG_CPU */
1138 1347
1348int __devinit __cpu_up(unsigned int cpu)
1349{
1139 /* In case one didn't come up */ 1350 /* In case one didn't come up */
1140 if (!cpu_isset(cpu, cpu_callin_map)) { 1351 if (!cpu_isset(cpu, cpu_callin_map)) {
1352 printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
1141 local_irq_enable(); 1353 local_irq_enable();
1142 return -EIO; 1354 return -EIO;
1143 } 1355 }
1144 1356
1145 local_irq_enable(); 1357 local_irq_enable();
1358 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1146 /* Unleash the CPU! */ 1359 /* Unleash the CPU! */
1147 cpu_set(cpu, smp_commenced_mask); 1360 cpu_set(cpu, smp_commenced_mask);
1148 while (!cpu_isset(cpu, cpu_online_map)) 1361 while (!cpu_isset(cpu, cpu_online_map))
@@ -1156,10 +1369,12 @@ void __init smp_cpus_done(unsigned int max_cpus)
1156 setup_ioapic_dest(); 1369 setup_ioapic_dest();
1157#endif 1370#endif
1158 zap_low_mappings(); 1371 zap_low_mappings();
1372#ifndef CONFIG_HOTPLUG_CPU
1159 /* 1373 /*
1160 * Disable executability of the SMP trampoline: 1374 * Disable executability of the SMP trampoline:
1161 */ 1375 */
1162 set_kernel_exec((unsigned long)trampoline_base, trampoline_exec); 1376 set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
1377#endif
1163} 1378}
1164 1379
1165void __init smp_intr_init(void) 1380void __init smp_intr_init(void)