aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/irq.c
diff options
context:
space:
mode:
authorZwane Mwaikambo <zwane@linuxpower.ca>2005-06-25 17:54:50 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:29 -0400
commitf370513640492641b4046bfd9a6e4714f6ae530d (patch)
tree46da47197fcbb3614b51c5f1fac841bf26d5e572 /arch/i386/kernel/irq.c
parentd92de65cab5980c16d4a1c326c1ef9a591892883 (diff)
[PATCH] i386 CPU hotplug
(The i386 CPU hotplug patch provides infrastructure for some work which Pavel is doing as well as for ACPI S3 (suspend-to-RAM) work which Li Shaohua <shaohua.li@intel.com> is doing) The following provides i386 architecture support for safely unregistering and registering processors during runtime, updated for the current -mm tree. In order to avoid dumping cpu hotplug code into kernel/irq/* i dropped the cpu_online check in do_IRQ() by modifying fixup_irqs(). The difference being that on cpu offline, fixup_irqs() is called before we clear the cpu from cpu_online_map and a long delay in order to ensure that we never have any queued external interrupts on the APICs. There are additional changes to s390 and ppc64 to account for this change. 1) Add CONFIG_HOTPLUG_CPU 2) disable local APIC timer on dead cpus. 3) Disable preempt around irq balancing to prevent CPUs going down. 4) Print irq stats for all possible cpus. 5) Debugging check for interrupts on offline cpus. 6) Hacky fixup_irqs() to redirect irqs when cpus go off/online. 7) play_dead() for offline cpus to spin inside. 8) Handle offline cpus set in flush_tlb_others(). 9) Grab lock earlier in smp_call_function() to prevent CPUs going down. 10) Implement __cpu_disable() and __cpu_die(). 11) Enable local interrupts in cpu_enable() after fixup_irqs() 12) Don't fiddle with NMI on dead cpu, but leave intact on other cpus. 13) Program IRQ affinity whilst cpu is still in cpu_online_map on offline. Signed-off-by: Zwane Mwaikambo <zwane@linuxpower.ca> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel/irq.c')
-rw-r--r--arch/i386/kernel/irq.c67
1 files changed, 54 insertions, 13 deletions
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 73945a3c53c..af115004aec 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -15,6 +15,9 @@
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/notifier.h>
19#include <linux/cpu.h>
20#include <linux/delay.h>
18 21
19DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp; 22DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp;
20EXPORT_PER_CPU_SYMBOL(irq_stat); 23EXPORT_PER_CPU_SYMBOL(irq_stat);
@@ -210,9 +213,8 @@ int show_interrupts(struct seq_file *p, void *v)
210 213
211 if (i == 0) { 214 if (i == 0) {
212 seq_printf(p, " "); 215 seq_printf(p, " ");
213 for (j=0; j<NR_CPUS; j++) 216 for_each_cpu(j)
214 if (cpu_online(j)) 217 seq_printf(p, "CPU%d ",j);
215 seq_printf(p, "CPU%d ",j);
216 seq_putc(p, '\n'); 218 seq_putc(p, '\n');
217 } 219 }
218 220
@@ -225,9 +227,8 @@ int show_interrupts(struct seq_file *p, void *v)
225#ifndef CONFIG_SMP 227#ifndef CONFIG_SMP
226 seq_printf(p, "%10u ", kstat_irqs(i)); 228 seq_printf(p, "%10u ", kstat_irqs(i));
227#else 229#else
228 for (j = 0; j < NR_CPUS; j++) 230 for_each_cpu(j)
229 if (cpu_online(j)) 231 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
230 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
231#endif 232#endif
232 seq_printf(p, " %14s", irq_desc[i].handler->typename); 233 seq_printf(p, " %14s", irq_desc[i].handler->typename);
233 seq_printf(p, " %s", action->name); 234 seq_printf(p, " %s", action->name);
@@ -240,16 +241,14 @@ skip:
240 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 241 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
241 } else if (i == NR_IRQS) { 242 } else if (i == NR_IRQS) {
242 seq_printf(p, "NMI: "); 243 seq_printf(p, "NMI: ");
243 for (j = 0; j < NR_CPUS; j++) 244 for_each_cpu(j)
244 if (cpu_online(j)) 245 seq_printf(p, "%10u ", nmi_count(j));
245 seq_printf(p, "%10u ", nmi_count(j));
246 seq_putc(p, '\n'); 246 seq_putc(p, '\n');
247#ifdef CONFIG_X86_LOCAL_APIC 247#ifdef CONFIG_X86_LOCAL_APIC
248 seq_printf(p, "LOC: "); 248 seq_printf(p, "LOC: ");
249 for (j = 0; j < NR_CPUS; j++) 249 for_each_cpu(j)
250 if (cpu_online(j)) 250 seq_printf(p, "%10u ",
251 seq_printf(p, "%10u ", 251 per_cpu(irq_stat,j).apic_timer_irqs);
252 per_cpu(irq_stat,j).apic_timer_irqs);
253 seq_putc(p, '\n'); 252 seq_putc(p, '\n');
254#endif 253#endif
255 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 254 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
@@ -259,3 +258,45 @@ skip:
259 } 258 }
260 return 0; 259 return 0;
261} 260}
261
262#ifdef CONFIG_HOTPLUG_CPU
263#include <mach_apic.h>
264
265void fixup_irqs(cpumask_t map)
266{
267 unsigned int irq;
268 static int warned;
269
270 for (irq = 0; irq < NR_IRQS; irq++) {
271 cpumask_t mask;
272 if (irq == 2)
273 continue;
274
275 cpus_and(mask, irq_affinity[irq], map);
276 if (any_online_cpu(mask) == NR_CPUS) {
277 printk("Breaking affinity for irq %i\n", irq);
278 mask = map;
279 }
280 if (irq_desc[irq].handler->set_affinity)
281 irq_desc[irq].handler->set_affinity(irq, mask);
282 else if (irq_desc[irq].action && !(warned++))
283 printk("Cannot set affinity for irq %i\n", irq);
284 }
285
286#if 0
287 barrier();
288 /* Ingo Molnar says: "after the IO-APIC masks have been redirected
289 [note the nop - the interrupt-enable boundary on x86 is two
290 instructions from sti] - to flush out pending hardirqs and
291 IPIs. After this point nothing is supposed to reach this CPU." */
292 __asm__ __volatile__("sti; nop; cli");
293 barrier();
294#else
295 /* That doesn't seem sufficient. Give it 1ms. */
296 local_irq_enable();
297 mdelay(1);
298 local_irq_disable();
299#endif
300}
301#endif
302