aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2009-10-26 18:24:36 -0400
committerIngo Molnar <mingo@elte.hu>2009-11-02 09:56:37 -0500
commit5231a68614b94f60e8f6c56bc6e3d75955b9e75e (patch)
tree0e7cb7aecbb0d18617d68bb85a4e9703c7299c55
parentb3ec0a37a7907813bb4fb85a2d94102c152470b7 (diff)
x86: Remove local_irq_enable()/local_irq_disable() in fixup_irqs()
To ensure that we handle all the pending interrupts (destined for this cpu that is going down) in the interrupt subsystem before the cpu goes offline, fixup_irqs() does: local_irq_enable(); mdelay(1); local_irq_disable(); Enabling interrupts is not a good thing as this cpu is already offline. So this patch replaces that logic with, mdelay(1); check APIC_IRR bits Retrigger the irq at the new destination if any interrupt has arrived via IPI. For IO-APIC level triggered interrupts, this retrigger IPI will appear as an edge interrupt. ack_apic_level() will detect this condition and IO-APIC RTE's remoteIRR is cleared using directed EOI(using IO-APIC EOI register) on Intel platforms and for others it uses the existing mask+edge logic followed by unmask+level. We can also remove mdelay() and then send spuriuous interrupts to new cpu targets for all the irqs that were handled previously by this cpu that is going offline. While it works, I have seen spurious interrupt messages (nothing wrong but still annoying messages during cpu offline, which can be seen during suspend/resume etc) Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Gary Hade <garyhade@us.ibm.com> Cc: Eric W. Biederman <ebiederm@xmission.com> LKML-Reference: <20091026230002.043281924@sbs-t61.sc.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/irq.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index b10a5e1da06c..8a82728d47c1 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -281,7 +281,7 @@ EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
281/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ 281/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
282void fixup_irqs(void) 282void fixup_irqs(void)
283{ 283{
284 unsigned int irq; 284 unsigned int irq, vector;
285 static int warned; 285 static int warned;
286 struct irq_desc *desc; 286 struct irq_desc *desc;
287 287
@@ -336,9 +336,33 @@ void fixup_irqs(void)
336 printk("Cannot set affinity for irq %i\n", irq); 336 printk("Cannot set affinity for irq %i\n", irq);
337 } 337 }
338 338
339 /* That doesn't seem sufficient. Give it 1ms. */ 339 /*
340 local_irq_enable(); 340 * We can remove mdelay() and then send spuriuous interrupts to
341 * new cpu targets for all the irqs that were handled previously by
342 * this cpu. While it works, I have seen spurious interrupt messages
343 * (nothing wrong but still...).
344 *
345 * So for now, retain mdelay(1) and check the IRR and then send those
346 * interrupts to new targets as this cpu is already offlined...
347 */
341 mdelay(1); 348 mdelay(1);
342 local_irq_disable(); 349
350 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
351 unsigned int irr;
352
353 if (__get_cpu_var(vector_irq)[vector] < 0)
354 continue;
355
356 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
357 if (irr & (1 << (vector % 32))) {
358 irq = __get_cpu_var(vector_irq)[vector];
359
360 desc = irq_to_desc(irq);
361 spin_lock(&desc->lock);
362 if (desc->chip->retrigger)
363 desc->chip->retrigger(irq);
364 spin_unlock(&desc->lock);
365 }
366 }
343} 367}
344#endif 368#endif