aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2009-10-26 18:24:34 -0400
committerIngo Molnar <mingo@elte.hu>2009-11-02 09:56:36 -0500
commita5e74b841930bec78a4684ab9f208b2ddfe7c736 (patch)
tree46af59815263c8f16269a80ffb511788aee1ee74 /arch
parent23359a88e7eca3c4f402562b102f23014db3c2aa (diff)
x86: Force irq complete move during cpu offline
When a cpu goes offline, fixup_irqs() try to move irq's currently destined to the offline cpu to a new cpu. But this attempt will fail if the irq is recently moved to this cpu and the irq still hasn't arrived at this cpu (for non intr-remapping platforms this is when we free the vector allocation at the previous destination) that is about to go offline. This will endup with the interrupt subsystem still pointing the irq to the offline cpu, causing that irq to not work any more. Fix this by forcing the irq to complete its move (its been a long time we moved the irq to this cpu which we are offlining now) and then move this irq to a new cpu before this cpu goes offline. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Gary Hade <garyhade@us.ibm.com> Cc: Eric W. Biederman <ebiederm@xmission.com> LKML-Reference: <20091026230001.848830905@sbs-t61.sc.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/irq.h1
-rw-r--r--arch/x86/kernel/apic/io_apic.c18
-rw-r--r--arch/x86/kernel/irq.c7
3 files changed, 23 insertions, 3 deletions
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index ddda6cbed6f..ffd700ff5dc 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -34,6 +34,7 @@ static inline int irq_canonicalize(int irq)
34#ifdef CONFIG_HOTPLUG_CPU 34#ifdef CONFIG_HOTPLUG_CPU
35#include <linux/cpumask.h> 35#include <linux/cpumask.h>
36extern void fixup_irqs(void); 36extern void fixup_irqs(void);
37extern void irq_force_complete_move(int);
37#endif 38#endif
38 39
39extern void (*generic_interrupt_extension)(void); 40extern void (*generic_interrupt_extension)(void);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index e9e5b02c3af..4e886efd9a1 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2450,21 +2450,33 @@ unlock:
2450 irq_exit(); 2450 irq_exit();
2451} 2451}
2452 2452
2453static void irq_complete_move(struct irq_desc **descp) 2453static void __irq_complete_move(struct irq_desc **descp, unsigned vector)
2454{ 2454{
2455 struct irq_desc *desc = *descp; 2455 struct irq_desc *desc = *descp;
2456 struct irq_cfg *cfg = desc->chip_data; 2456 struct irq_cfg *cfg = desc->chip_data;
2457 unsigned vector, me; 2457 unsigned me;
2458 2458
2459 if (likely(!cfg->move_in_progress)) 2459 if (likely(!cfg->move_in_progress))
2460 return; 2460 return;
2461 2461
2462 vector = ~get_irq_regs()->orig_ax;
2463 me = smp_processor_id(); 2462 me = smp_processor_id();
2464 2463
2465 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2464 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2466 send_cleanup_vector(cfg); 2465 send_cleanup_vector(cfg);
2467} 2466}
2467
2468static void irq_complete_move(struct irq_desc **descp)
2469{
2470 __irq_complete_move(descp, ~get_irq_regs()->orig_ax);
2471}
2472
2473void irq_force_complete_move(int irq)
2474{
2475 struct irq_desc *desc = irq_to_desc(irq);
2476 struct irq_cfg *cfg = desc->chip_data;
2477
2478 __irq_complete_move(&desc, cfg->vector);
2479}
2468#else 2480#else
2469static inline void irq_complete_move(struct irq_desc **descp) {} 2481static inline void irq_complete_move(struct irq_desc **descp) {}
2470#endif 2482#endif
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 342bcbca19b..b10a5e1da06 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -305,6 +305,13 @@ void fixup_irqs(void)
305 continue; 305 continue;
306 } 306 }
307 307
308 /*
309 * Complete the irq move. This cpu is going down and for
310 * non intr-remapping case, we can't wait till this interrupt
311 * arrives at this cpu before completing the irq move.
312 */
313 irq_force_complete_move(irq);
314
308 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 315 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
309 break_affinity = 1; 316 break_affinity = 1;
310 affinity = cpu_all_mask; 317 affinity = cpu_all_mask;