diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-05 18:31:25 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-05 18:31:25 -0500 |
| commit | a77d2e081bbbccb38f42da45500dd089756efdfb (patch) | |
| tree | 8bb8d91e1c9e59143afcac8ff7d74341f7a0af80 /arch/x86/kernel/irq.c | |
| parent | 897e81bea1fcfcd2c5cdb720c9efdb25da9ff374 (diff) | |
| parent | 7d1849aff6687a135a8da3a75e32a00e3137a5e2 (diff) | |
Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (30 commits)
x86, apic: Enable lapic nmi watchdog on AMD Family 11h
x86: Remove unnecessary mdelay() from cpu_disable_common()
x86, ioapic: Document another case when level irq is seen as an edge
x86, ioapic: Fix the EOI register detection mechanism
x86, io-apic: Move the effort of clearing remoteIRR explicitly before migrating the irq
x86: SGI UV: Map low MMR ranges
x86: apic: Print out SRAT table APIC id in hex
x86: Re-get cfg_new in case reuse/move irq_desc
x86: apic: Remove not needed #ifdef
x86: io-apic: IO-APIC MMIO should not fail on resource insertion
x86: Remove asm/apicnum.h
x86: apic: Do not use stacked physid_mask_t
x86, apic: Get rid of apicid_to_cpu_present assign on 64-bit
x86, ioapic: Use snrpintf while set names for IO-APIC resourses
x86, apic: Use PAGE_SIZE instead of numbers
x86: Remove local_irq_enable()/local_irq_disable() in fixup_irqs()
x86: Use EOI register in io-apic on intel platforms
x86: Force irq complete move during cpu offline
x86: Remove move_cleanup_count from irq_cfg
x86, intr-remap: Avoid irq_chip mask/unmask in fixup_irqs() for intr-remapping
...
Diffstat (limited to 'arch/x86/kernel/irq.c')
| -rw-r--r-- | arch/x86/kernel/irq.c | 90 |
1 files changed, 90 insertions, 0 deletions
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 19212cb01558..fee6cc2b2079 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
| @@ -274,3 +274,93 @@ void smp_generic_interrupt(struct pt_regs *regs) | |||
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); | 276 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); |
| 277 | |||
| 278 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 279 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ | ||
| 280 | void fixup_irqs(void) | ||
| 281 | { | ||
| 282 | unsigned int irq, vector; | ||
| 283 | static int warned; | ||
| 284 | struct irq_desc *desc; | ||
| 285 | |||
| 286 | for_each_irq_desc(irq, desc) { | ||
| 287 | int break_affinity = 0; | ||
| 288 | int set_affinity = 1; | ||
| 289 | const struct cpumask *affinity; | ||
| 290 | |||
| 291 | if (!desc) | ||
| 292 | continue; | ||
| 293 | if (irq == 2) | ||
| 294 | continue; | ||
| 295 | |||
| 296 | /* interrupt's are disabled at this point */ | ||
| 297 | spin_lock(&desc->lock); | ||
| 298 | |||
| 299 | affinity = desc->affinity; | ||
| 300 | if (!irq_has_action(irq) || | ||
| 301 | cpumask_equal(affinity, cpu_online_mask)) { | ||
| 302 | spin_unlock(&desc->lock); | ||
| 303 | continue; | ||
| 304 | } | ||
| 305 | |||
| 306 | /* | ||
| 307 | * Complete the irq move. This cpu is going down and for | ||
| 308 | * non intr-remapping case, we can't wait till this interrupt | ||
| 309 | * arrives at this cpu before completing the irq move. | ||
| 310 | */ | ||
| 311 | irq_force_complete_move(irq); | ||
| 312 | |||
| 313 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
| 314 | break_affinity = 1; | ||
| 315 | affinity = cpu_all_mask; | ||
| 316 | } | ||
| 317 | |||
| 318 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask) | ||
| 319 | desc->chip->mask(irq); | ||
| 320 | |||
| 321 | if (desc->chip->set_affinity) | ||
| 322 | desc->chip->set_affinity(irq, affinity); | ||
| 323 | else if (!(warned++)) | ||
| 324 | set_affinity = 0; | ||
| 325 | |||
| 326 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) | ||
| 327 | desc->chip->unmask(irq); | ||
| 328 | |||
| 329 | spin_unlock(&desc->lock); | ||
| 330 | |||
| 331 | if (break_affinity && set_affinity) | ||
| 332 | printk("Broke affinity for irq %i\n", irq); | ||
| 333 | else if (!set_affinity) | ||
| 334 | printk("Cannot set affinity for irq %i\n", irq); | ||
| 335 | } | ||
| 336 | |||
| 337 | /* | ||
| 338 | * We can remove mdelay() and then send spuriuous interrupts to | ||
| 339 | * new cpu targets for all the irqs that were handled previously by | ||
| 340 | * this cpu. While it works, I have seen spurious interrupt messages | ||
| 341 | * (nothing wrong but still...). | ||
| 342 | * | ||
| 343 | * So for now, retain mdelay(1) and check the IRR and then send those | ||
| 344 | * interrupts to new targets as this cpu is already offlined... | ||
| 345 | */ | ||
| 346 | mdelay(1); | ||
| 347 | |||
| 348 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | ||
| 349 | unsigned int irr; | ||
| 350 | |||
| 351 | if (__get_cpu_var(vector_irq)[vector] < 0) | ||
| 352 | continue; | ||
| 353 | |||
| 354 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | ||
| 355 | if (irr & (1 << (vector % 32))) { | ||
| 356 | irq = __get_cpu_var(vector_irq)[vector]; | ||
| 357 | |||
| 358 | desc = irq_to_desc(irq); | ||
| 359 | spin_lock(&desc->lock); | ||
| 360 | if (desc->chip->retrigger) | ||
| 361 | desc->chip->retrigger(irq); | ||
| 362 | spin_unlock(&desc->lock); | ||
| 363 | } | ||
| 364 | } | ||
| 365 | } | ||
| 366 | #endif | ||
