diff options
author | Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> | 2007-07-17 08:22:48 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2007-07-17 12:57:42 -0400 |
commit | cd378f18cf73d92bf0b6e1e6b5759b5dd729a9f2 (patch) | |
tree | 6960768a1ddecb74e6a5fa1dfc978e5df8635eb2 /arch | |
parent | 4994be1b3fe9120c88022ff5c0c33f6312b17adb (diff) |
[IA64] Support irq migration across domain
Add support for IRQ migration across vector domain.
Signed-off-by: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com>
Signed-off-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 20 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 42 | ||||
-rw-r--r-- | arch/ia64/kernel/msi_ia64.c | 20 |
3 files changed, 70 insertions, 12 deletions
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index e647254c2707..c101c8bff27b 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -354,11 +354,13 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) | |||
354 | 354 | ||
355 | irq &= (~IA64_IRQ_REDIRECTED); | 355 | irq &= (~IA64_IRQ_REDIRECTED); |
356 | 356 | ||
357 | /* IRQ migration across domain is not supported yet */ | 357 | cpus_and(mask, mask, cpu_online_map); |
358 | cpus_and(mask, mask, irq_to_domain(irq)); | ||
359 | if (cpus_empty(mask)) | 358 | if (cpus_empty(mask)) |
360 | return; | 359 | return; |
361 | 360 | ||
361 | if (reassign_irq_vector(irq, first_cpu(mask))) | ||
362 | return; | ||
363 | |||
362 | dest = cpu_physical_id(first_cpu(mask)); | 364 | dest = cpu_physical_id(first_cpu(mask)); |
363 | 365 | ||
364 | if (list_empty(&iosapic_intr_info[irq].rtes)) | 366 | if (list_empty(&iosapic_intr_info[irq].rtes)) |
@@ -376,6 +378,8 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) | |||
376 | else | 378 | else |
377 | /* change delivery mode to fixed */ | 379 | /* change delivery mode to fixed */ |
378 | low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT); | 380 | low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT); |
381 | low32 &= IOSAPIC_VECTOR_MASK; | ||
382 | low32 |= irq_to_vector(irq); | ||
379 | 383 | ||
380 | iosapic_intr_info[irq].low32 = low32; | 384 | iosapic_intr_info[irq].low32 = low32; |
381 | iosapic_intr_info[irq].dest = dest; | 385 | iosapic_intr_info[irq].dest = dest; |
@@ -404,10 +408,20 @@ iosapic_end_level_irq (unsigned int irq) | |||
404 | { | 408 | { |
405 | ia64_vector vec = irq_to_vector(irq); | 409 | ia64_vector vec = irq_to_vector(irq); |
406 | struct iosapic_rte_info *rte; | 410 | struct iosapic_rte_info *rte; |
411 | int do_unmask_irq = 0; | ||
412 | |||
413 | if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { | ||
414 | do_unmask_irq = 1; | ||
415 | mask_irq(irq); | ||
416 | } | ||
407 | 417 | ||
408 | move_native_irq(irq); | ||
409 | list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) | 418 | list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) |
410 | iosapic_eoi(rte->iosapic->addr, vec); | 419 | iosapic_eoi(rte->iosapic->addr, vec); |
420 | |||
421 | if (unlikely(do_unmask_irq)) { | ||
422 | move_masked_irq(irq); | ||
423 | unmask_irq(irq); | ||
424 | } | ||
411 | } | 425 | } |
412 | 426 | ||
413 | #define iosapic_shutdown_level_irq mask_irq | 427 | #define iosapic_shutdown_level_irq mask_irq |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index a3667631ed80..22806b94025a 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -172,15 +172,13 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain) | |||
172 | return ret; | 172 | return ret; |
173 | } | 173 | } |
174 | 174 | ||
175 | static void clear_irq_vector(int irq) | 175 | static void __clear_irq_vector(int irq) |
176 | { | 176 | { |
177 | unsigned long flags; | ||
178 | int vector, cpu, pos; | 177 | int vector, cpu, pos; |
179 | cpumask_t mask; | 178 | cpumask_t mask; |
180 | cpumask_t domain; | 179 | cpumask_t domain; |
181 | struct irq_cfg *cfg = &irq_cfg[irq]; | 180 | struct irq_cfg *cfg = &irq_cfg[irq]; |
182 | 181 | ||
183 | spin_lock_irqsave(&vector_lock, flags); | ||
184 | BUG_ON((unsigned)irq >= NR_IRQS); | 182 | BUG_ON((unsigned)irq >= NR_IRQS); |
185 | BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); | 183 | BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); |
186 | vector = cfg->vector; | 184 | vector = cfg->vector; |
@@ -193,6 +191,14 @@ static void clear_irq_vector(int irq) | |||
193 | irq_status[irq] = IRQ_UNUSED; | 191 | irq_status[irq] = IRQ_UNUSED; |
194 | pos = vector - IA64_FIRST_DEVICE_VECTOR; | 192 | pos = vector - IA64_FIRST_DEVICE_VECTOR; |
195 | cpus_andnot(vector_table[pos], vector_table[pos], domain); | 193 | cpus_andnot(vector_table[pos], vector_table[pos], domain); |
194 | } | ||
195 | |||
196 | static void clear_irq_vector(int irq) | ||
197 | { | ||
198 | unsigned long flags; | ||
199 | |||
200 | spin_lock_irqsave(&vector_lock, flags); | ||
201 | __clear_irq_vector(irq); | ||
196 | spin_unlock_irqrestore(&vector_lock, flags); | 202 | spin_unlock_irqrestore(&vector_lock, flags); |
197 | } | 203 | } |
198 | 204 | ||
@@ -275,6 +281,36 @@ void destroy_and_reserve_irq(unsigned int irq) | |||
275 | reserve_irq(irq); | 281 | reserve_irq(irq); |
276 | } | 282 | } |
277 | 283 | ||
284 | static int __reassign_irq_vector(int irq, int cpu) | ||
285 | { | ||
286 | struct irq_cfg *cfg = &irq_cfg[irq]; | ||
287 | int vector; | ||
288 | cpumask_t domain; | ||
289 | |||
290 | if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) | ||
291 | return -EINVAL; | ||
292 | if (cpu_isset(cpu, cfg->domain)) | ||
293 | return 0; | ||
294 | domain = vector_allocation_domain(cpu); | ||
295 | vector = find_unassigned_vector(domain); | ||
296 | if (vector < 0) | ||
297 | return -ENOSPC; | ||
298 | __clear_irq_vector(irq); | ||
299 | BUG_ON(__bind_irq_vector(irq, vector, domain)); | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | int reassign_irq_vector(int irq, int cpu) | ||
304 | { | ||
305 | unsigned long flags; | ||
306 | int ret; | ||
307 | |||
308 | spin_lock_irqsave(&vector_lock, flags); | ||
309 | ret = __reassign_irq_vector(irq, cpu); | ||
310 | spin_unlock_irqrestore(&vector_lock, flags); | ||
311 | return ret; | ||
312 | } | ||
313 | |||
278 | /* | 314 | /* |
279 | * Dynamic irq allocate and deallocation for MSI | 315 | * Dynamic irq allocate and deallocation for MSI |
280 | */ | 316 | */ |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 1d22670cc88b..2fdbd5c3f213 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #define MSI_DATA_VECTOR_SHIFT 0 | 14 | #define MSI_DATA_VECTOR_SHIFT 0 |
15 | #define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) | 15 | #define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) |
16 | #define MSI_DATA_VECTOR_MASK 0xffffff00 | ||
16 | 17 | ||
17 | #define MSI_DATA_DELIVERY_SHIFT 8 | 18 | #define MSI_DATA_DELIVERY_SHIFT 8 |
18 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) | 19 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) |
@@ -50,22 +51,29 @@ static struct irq_chip ia64_msi_chip; | |||
50 | static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | 51 | static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) |
51 | { | 52 | { |
52 | struct msi_msg msg; | 53 | struct msi_msg msg; |
53 | u32 addr; | 54 | u32 addr, data; |
55 | int cpu = first_cpu(cpu_mask); | ||
54 | 56 | ||
55 | /* IRQ migration across domain is not supported yet */ | 57 | if (!cpu_online(cpu)) |
56 | cpus_and(cpu_mask, cpu_mask, irq_to_domain(irq)); | 58 | return; |
57 | if (cpus_empty(cpu_mask)) | 59 | |
60 | if (reassign_irq_vector(irq, cpu)) | ||
58 | return; | 61 | return; |
59 | 62 | ||
60 | read_msi_msg(irq, &msg); | 63 | read_msi_msg(irq, &msg); |
61 | 64 | ||
62 | addr = msg.address_lo; | 65 | addr = msg.address_lo; |
63 | addr &= MSI_ADDR_DESTID_MASK; | 66 | addr &= MSI_ADDR_DESTID_MASK; |
64 | addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask))); | 67 | addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); |
65 | msg.address_lo = addr; | 68 | msg.address_lo = addr; |
66 | 69 | ||
70 | data = msg.data; | ||
71 | data &= MSI_DATA_VECTOR_MASK; | ||
72 | data |= MSI_DATA_VECTOR(irq_to_vector(irq)); | ||
73 | msg.data = data; | ||
74 | |||
67 | write_msi_msg(irq, &msg); | 75 | write_msi_msg(irq, &msg); |
68 | irq_desc[irq].affinity = cpu_mask; | 76 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); |
69 | } | 77 | } |
70 | #endif /* CONFIG_SMP */ | 78 | #endif /* CONFIG_SMP */ |
71 | 79 | ||