diff options
author | Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> | 2008-02-25 00:32:22 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2008-03-04 17:16:20 -0500 |
commit | a6cd6322d594014240465210ccb290971469c6e8 (patch) | |
tree | 4be5c34dc636fa85e7b6e39e0b3ff82510c7bb8a /arch/ia64/kernel/iosapic.c | |
parent | 86dffa4cd1a1d61fed68ab64c674d4094f2bdfe4 (diff) |
[IA64] Fix irq migration in multiple vector domain
Fix the problem that the following error message is sometimes displayed
at irq migration when vector domain is enabled.
"Unexpected interrupt vector %d on CPU %d is not mapped to any IRQ!"
The cause of this problem is an interrupt is sent to the previous
target CPU after cleaning up vector to irq mapping table. To clean up
vector to irq map on the previous target CPU safty, change the irq
migration in multiple vector domain as follows. The original idea is
from x86 interrupt management code.
- Delay vector to irq table cleanup until the interrupts are sent
to new target CPUs. By this, it is ensured that target CPU is
completely changed on the interrupt controller side.
- Even after the interrupts are sent to new target CPUs, there can
be pended interrupts remaining on the previous target CPU. So we
need to delay clearning up vector to irq table until the pended
interrupt is handled. For this, send IPI to the previous target
CPU with lower priority vector and clean up vector to irq table
in its handler.
This patch affects only to irq migration code with multiple vector
domain is enabled.
Signed-off-by: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel/iosapic.c')
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 4 |
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 398e2fd1cd25..7b3292282dea 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -345,7 +345,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) | |||
345 | if (cpus_empty(mask)) | 345 | if (cpus_empty(mask)) |
346 | return; | 346 | return; |
347 | 347 | ||
348 | if (reassign_irq_vector(irq, first_cpu(mask))) | 348 | if (irq_prepare_move(irq, first_cpu(mask))) |
349 | return; | 349 | return; |
350 | 350 | ||
351 | dest = cpu_physical_id(first_cpu(mask)); | 351 | dest = cpu_physical_id(first_cpu(mask)); |
@@ -397,6 +397,7 @@ iosapic_end_level_irq (unsigned int irq) | |||
397 | struct iosapic_rte_info *rte; | 397 | struct iosapic_rte_info *rte; |
398 | int do_unmask_irq = 0; | 398 | int do_unmask_irq = 0; |
399 | 399 | ||
400 | irq_complete_move(irq); | ||
400 | if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { | 401 | if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { |
401 | do_unmask_irq = 1; | 402 | do_unmask_irq = 1; |
402 | mask_irq(irq); | 403 | mask_irq(irq); |
@@ -450,6 +451,7 @@ iosapic_ack_edge_irq (unsigned int irq) | |||
450 | { | 451 | { |
451 | irq_desc_t *idesc = irq_desc + irq; | 452 | irq_desc_t *idesc = irq_desc + irq; |
452 | 453 | ||
454 | irq_complete_move(irq); | ||
453 | move_native_irq(irq); | 455 | move_native_irq(irq); |
454 | /* | 456 | /* |
455 | * Once we have recorded IRQ_PENDING already, we can mask the | 457 | * Once we have recorded IRQ_PENDING already, we can mask the |