aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/smpboot.c
diff options
context:
space:
mode:
authorZwane Mwaikambo <zwane@linuxpower.ca>2005-06-25 17:54:50 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:29 -0400
commitf370513640492641b4046bfd9a6e4714f6ae530d (patch)
tree46da47197fcbb3614b51c5f1fac841bf26d5e572 /arch/i386/kernel/smpboot.c
parentd92de65cab5980c16d4a1c326c1ef9a591892883 (diff)
[PATCH] i386 CPU hotplug
(The i386 CPU hotplug patch provides infrastructure for some work which Pavel is doing as well as for ACPI S3 (suspend-to-RAM) work which Li Shaohua <shaohua.li@intel.com> is doing) The following provides i386 architecture support for safely unregistering and registering processors during runtime, updated for the current -mm tree. In order to avoid dumping cpu hotplug code into kernel/irq/* i dropped the cpu_online check in do_IRQ() by modifying fixup_irqs(). The difference being that on cpu offline, fixup_irqs() is called before we clear the cpu from cpu_online_map and a long delay in order to ensure that we never have any queued external interrupts on the APICs. There are additional changes to s390 and ppc64 to account for this change. 1) Add CONFIG_HOTPLUG_CPU 2) disable local APIC timer on dead cpus. 3) Disable preempt around irq balancing to prevent CPUs going down. 4) Print irq stats for all possible cpus. 5) Debugging check for interrupts on offline cpus. 6) Hacky fixup_irqs() to redirect irqs when cpus go off/online. 7) play_dead() for offline cpus to spin inside. 8) Handle offline cpus set in flush_tlb_others(). 9) Grab lock earlier in smp_call_function() to prevent CPUs going down. 10) Implement __cpu_disable() and __cpu_die(). 11) Enable local interrupts in cpu_enable() after fixup_irqs() 12) Don't fiddle with NMI on dead cpu, but leave intact on other cpus. 13) Program IRQ affinity whilst cpu is still in cpu_online_map on offline. Signed-off-by: Zwane Mwaikambo <zwane@linuxpower.ca> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel/smpboot.c')
-rw-r--r--arch/i386/kernel/smpboot.c98
1 files changed, 93 insertions, 5 deletions
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index c20d96d5c15c..ad74a46e9ef0 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -44,6 +44,9 @@
44#include <linux/smp_lock.h> 44#include <linux/smp_lock.h>
45#include <linux/irq.h> 45#include <linux/irq.h>
46#include <linux/bootmem.h> 46#include <linux/bootmem.h>
47#include <linux/notifier.h>
48#include <linux/cpu.h>
49#include <linux/percpu.h>
47 50
48#include <linux/delay.h> 51#include <linux/delay.h>
49#include <linux/mc146818rtc.h> 52#include <linux/mc146818rtc.h>
@@ -96,6 +99,9 @@ static int trampoline_exec;
96 99
97static void map_cpu_to_logical_apicid(void); 100static void map_cpu_to_logical_apicid(void);
98 101
102/* State of each CPU. */
103DEFINE_PER_CPU(int, cpu_state) = { 0 };
104
99/* 105/*
100 * Currently trivial. Write the real->protected mode 106 * Currently trivial. Write the real->protected mode
101 * bootstrap into the page concerned. The caller 107 * bootstrap into the page concerned. The caller
@@ -1119,6 +1125,9 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1119 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ 1125 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1120void __init smp_prepare_cpus(unsigned int max_cpus) 1126void __init smp_prepare_cpus(unsigned int max_cpus)
1121{ 1127{
1128 smp_commenced_mask = cpumask_of_cpu(0);
1129 cpu_callin_map = cpumask_of_cpu(0);
1130 mb();
1122 smp_boot_cpus(max_cpus); 1131 smp_boot_cpus(max_cpus);
1123} 1132}
1124 1133
@@ -1128,20 +1137,99 @@ void __devinit smp_prepare_boot_cpu(void)
1128 cpu_set(smp_processor_id(), cpu_callout_map); 1137 cpu_set(smp_processor_id(), cpu_callout_map);
1129} 1138}
1130 1139
1131int __devinit __cpu_up(unsigned int cpu) 1140#ifdef CONFIG_HOTPLUG_CPU
1141
1142/* must be called with the cpucontrol mutex held */
1143static int __devinit cpu_enable(unsigned int cpu)
1132{ 1144{
1133 /* This only works at boot for x86. See "rewrite" above. */ 1145 /* get the target out of its holding state */
1134 if (cpu_isset(cpu, smp_commenced_mask)) { 1146 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1135 local_irq_enable(); 1147 wmb();
1136 return -ENOSYS; 1148
1149 /* wait for the processor to ack it. timeout? */
1150 while (!cpu_online(cpu))
1151 cpu_relax();
1152
1153 fixup_irqs(cpu_online_map);
1154 /* counter the disable in fixup_irqs() */
1155 local_irq_enable();
1156 return 0;
1157}
1158
1159int __cpu_disable(void)
1160{
1161 cpumask_t map = cpu_online_map;
1162 int cpu = smp_processor_id();
1163
1164 /*
1165 * Perhaps use cpufreq to drop frequency, but that could go
1166 * into generic code.
1167 *
1168 * We won't take down the boot processor on i386 due to some
1169 * interrupts only being able to be serviced by the BSP.
1170 * Especially so if we're not using an IOAPIC -zwane
1171 */
1172 if (cpu == 0)
1173 return -EBUSY;
1174
1175 /* We enable the timer again on the exit path of the death loop */
1176 disable_APIC_timer();
1177 /* Allow any queued timer interrupts to get serviced */
1178 local_irq_enable();
1179 mdelay(1);
1180 local_irq_disable();
1181
1182 cpu_clear(cpu, map);
1183 fixup_irqs(map);
1184 /* It's now safe to remove this processor from the online map */
1185 cpu_clear(cpu, cpu_online_map);
1186 return 0;
1187}
1188
1189void __cpu_die(unsigned int cpu)
1190{
1191 /* We don't do anything here: idle task is faking death itself. */
1192 unsigned int i;
1193
1194 for (i = 0; i < 10; i++) {
1195 /* They ack this in play_dead by setting CPU_DEAD */
1196 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
1197 return;
1198 current->state = TASK_UNINTERRUPTIBLE;
1199 schedule_timeout(HZ/10);
1137 } 1200 }
1201 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1202}
1203#else /* ... !CONFIG_HOTPLUG_CPU */
1204int __cpu_disable(void)
1205{
1206 return -ENOSYS;
1207}
1138 1208
1209void __cpu_die(unsigned int cpu)
1210{
1211 /* We said "no" in __cpu_disable */
1212 BUG();
1213}
1214#endif /* CONFIG_HOTPLUG_CPU */
1215
1216int __devinit __cpu_up(unsigned int cpu)
1217{
1139 /* In case one didn't come up */ 1218 /* In case one didn't come up */
1140 if (!cpu_isset(cpu, cpu_callin_map)) { 1219 if (!cpu_isset(cpu, cpu_callin_map)) {
1220 printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
1141 local_irq_enable(); 1221 local_irq_enable();
1142 return -EIO; 1222 return -EIO;
1143 } 1223 }
1144 1224
1225#ifdef CONFIG_HOTPLUG_CPU
1226 /* Already up, and in cpu_quiescent now? */
1227 if (cpu_isset(cpu, smp_commenced_mask)) {
1228 cpu_enable(cpu);
1229 return 0;
1230 }
1231#endif
1232
1145 local_irq_enable(); 1233 local_irq_enable();
1146 /* Unleash the CPU! */ 1234 /* Unleash the CPU! */
1147 cpu_set(cpu, smp_commenced_mask); 1235 cpu_set(cpu, smp_commenced_mask);