diff options
author | Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> | 2007-07-17 08:22:48 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2007-07-17 12:57:42 -0400 |
commit | cd378f18cf73d92bf0b6e1e6b5759b5dd729a9f2 (patch) | |
tree | 6960768a1ddecb74e6a5fa1dfc978e5df8635eb2 /arch/ia64/kernel/msi_ia64.c | |
parent | 4994be1b3fe9120c88022ff5c0c33f6312b17adb (diff) |
[IA64] Support irq migration across domain
Add support for IRQ migration across vector domain.
Signed-off-by: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com>
Signed-off-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel/msi_ia64.c')
-rw-r--r-- | arch/ia64/kernel/msi_ia64.c | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 1d22670cc88b..2fdbd5c3f213 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #define MSI_DATA_VECTOR_SHIFT 0 | 14 | #define MSI_DATA_VECTOR_SHIFT 0 |
15 | #define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) | 15 | #define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) |
16 | #define MSI_DATA_VECTOR_MASK 0xffffff00 | ||
16 | 17 | ||
17 | #define MSI_DATA_DELIVERY_SHIFT 8 | 18 | #define MSI_DATA_DELIVERY_SHIFT 8 |
18 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) | 19 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) |
@@ -50,22 +51,29 @@ static struct irq_chip ia64_msi_chip; | |||
50 | static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | 51 | static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) |
51 | { | 52 | { |
52 | struct msi_msg msg; | 53 | struct msi_msg msg; |
53 | u32 addr; | 54 | u32 addr, data; |
55 | int cpu = first_cpu(cpu_mask); | ||
54 | 56 | ||
55 | /* IRQ migration across domain is not supported yet */ | 57 | if (!cpu_online(cpu)) |
56 | cpus_and(cpu_mask, cpu_mask, irq_to_domain(irq)); | 58 | return; |
57 | if (cpus_empty(cpu_mask)) | 59 | |
60 | if (reassign_irq_vector(irq, cpu)) | ||
58 | return; | 61 | return; |
59 | 62 | ||
60 | read_msi_msg(irq, &msg); | 63 | read_msi_msg(irq, &msg); |
61 | 64 | ||
62 | addr = msg.address_lo; | 65 | addr = msg.address_lo; |
63 | addr &= MSI_ADDR_DESTID_MASK; | 66 | addr &= MSI_ADDR_DESTID_MASK; |
64 | addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask))); | 67 | addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); |
65 | msg.address_lo = addr; | 68 | msg.address_lo = addr; |
66 | 69 | ||
70 | data = msg.data; | ||
71 | data &= MSI_DATA_VECTOR_MASK; | ||
72 | data |= MSI_DATA_VECTOR(irq_to_vector(irq)); | ||
73 | msg.data = data; | ||
74 | |||
67 | write_msi_msg(irq, &msg); | 75 | write_msi_msg(irq, &msg); |
68 | irq_desc[irq].affinity = cpu_mask; | 76 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); |
69 | } | 77 | } |
70 | #endif /* CONFIG_SMP */ | 78 | #endif /* CONFIG_SMP */ |
71 | 79 | ||