aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKenji Kaneshige <kaneshige.kenji@jp.fujitsu.com>2007-08-28 19:01:21 -0400
committerTony Luck <tony.luck@intel.com>2007-08-28 19:01:21 -0400
commit17764d2437b0c4440e0718205f2c26dbaa72bc27 (patch)
tree37ddca9b39dbc184b27503d226cba86f48f8c525
parentb07d68b5ca4d55a16fab223d63d5fb36f89ff42f (diff)
[IA64] Fix unexpected interrupt vector handling
Fix handling for spurious interrupts not being mapped to any IRQs. Currently, spurious interrupts that are not mapped to any IRQs are handled as IRQ 15 (== IA64_SPURIOUS_VECTOR). But it is not proper because vector != irq. We need special handlings for such spurious interrupts not being mapped to any IRQs. Signed-off-by: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/kernel/irq_ia64.c28
1 files changed, 22 insertions, 6 deletions
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index c47c8acc96e3..00a4599e5f47 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -82,7 +82,7 @@ struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
82}; 82};
83 83
84DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { 84DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
85 [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR 85 [0 ... IA64_NUM_VECTORS - 1] = -1
86}; 86};
87 87
88static cpumask_t vector_table[IA64_NUM_VECTORS] = { 88static cpumask_t vector_table[IA64_NUM_VECTORS] = {
@@ -179,7 +179,7 @@ static void __clear_irq_vector(int irq)
179 domain = cfg->domain; 179 domain = cfg->domain;
180 cpus_and(mask, cfg->domain, cpu_online_map); 180 cpus_and(mask, cfg->domain, cpu_online_map);
181 for_each_cpu_mask(cpu, mask) 181 for_each_cpu_mask(cpu, mask)
182 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR; 182 per_cpu(vector_irq, cpu)[vector] = -1;
183 cfg->vector = IRQ_VECTOR_UNASSIGNED; 183 cfg->vector = IRQ_VECTOR_UNASSIGNED;
184 cfg->domain = CPU_MASK_NONE; 184 cfg->domain = CPU_MASK_NONE;
185 irq_status[irq] = IRQ_UNUSED; 185 irq_status[irq] = IRQ_UNUSED;
@@ -249,7 +249,7 @@ void __setup_vector_irq(int cpu)
249 249
250 /* Clear vector_irq */ 250 /* Clear vector_irq */
251 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) 251 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
252 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR; 252 per_cpu(vector_irq, cpu)[vector] = -1;
253 /* Mark the inuse vectors */ 253 /* Mark the inuse vectors */
254 for (irq = 0; irq < NR_IRQS; ++irq) { 254 for (irq = 0; irq < NR_IRQS; ++irq) {
255 if (!cpu_isset(cpu, irq_cfg[irq].domain)) 255 if (!cpu_isset(cpu, irq_cfg[irq].domain))
@@ -432,10 +432,18 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
432 } else if (unlikely(IS_RESCHEDULE(vector))) 432 } else if (unlikely(IS_RESCHEDULE(vector)))
433 kstat_this_cpu.irqs[vector]++; 433 kstat_this_cpu.irqs[vector]++;
434 else { 434 else {
435 int irq = local_vector_to_irq(vector);
436
435 ia64_setreg(_IA64_REG_CR_TPR, vector); 437 ia64_setreg(_IA64_REG_CR_TPR, vector);
436 ia64_srlz_d(); 438 ia64_srlz_d();
437 439
438 generic_handle_irq(local_vector_to_irq(vector)); 440 if (unlikely(irq < 0)) {
441 printk(KERN_ERR "%s: Unexpected interrupt "
442 "vector %d on CPU %d is not mapped "
443 "to any IRQ!\n", __FUNCTION__, vector,
444 smp_processor_id());
445 } else
446 generic_handle_irq(irq);
439 447
440 /* 448 /*
441 * Disable interrupts and send EOI: 449 * Disable interrupts and send EOI:
@@ -483,6 +491,7 @@ void ia64_process_pending_intr(void)
483 kstat_this_cpu.irqs[vector]++; 491 kstat_this_cpu.irqs[vector]++;
484 else { 492 else {
485 struct pt_regs *old_regs = set_irq_regs(NULL); 493 struct pt_regs *old_regs = set_irq_regs(NULL);
494 int irq = local_vector_to_irq(vector);
486 495
487 ia64_setreg(_IA64_REG_CR_TPR, vector); 496 ia64_setreg(_IA64_REG_CR_TPR, vector);
488 ia64_srlz_d(); 497 ia64_srlz_d();
@@ -493,8 +502,15 @@ void ia64_process_pending_intr(void)
493 * it will work. I hope it works!. 502 * it will work. I hope it works!.
494 * Probably could shared code. 503 * Probably could shared code.
495 */ 504 */
496 vectors_in_migration[local_vector_to_irq(vector)]=0; 505 if (unlikely(irq < 0)) {
497 generic_handle_irq(local_vector_to_irq(vector)); 506 printk(KERN_ERR "%s: Unexpected interrupt "
507 "vector %d on CPU %d not being mapped "
508 "to any IRQ!!\n", __FUNCTION__, vector,
509 smp_processor_id());
510 } else {
511 vectors_in_migration[irq]=0;
512 generic_handle_irq(irq);
513 }
498 set_irq_regs(old_regs); 514 set_irq_regs(old_regs);
499 515
500 /* 516 /*