aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorKenji Kaneshige <kaneshige.kenji@jp.fujitsu.com>2008-02-25 00:32:22 -0500
committerTony Luck <tony.luck@intel.com>2008-03-04 17:16:20 -0500
commita6cd6322d594014240465210ccb290971469c6e8 (patch)
tree4be5c34dc636fa85e7b6e39e0b3ff82510c7bb8a /arch/ia64/kernel
parent86dffa4cd1a1d61fed68ab64c674d4094f2bdfe4 (diff)
[IA64] Fix irq migration in multiple vector domain
Fix the problem that the following error message is sometimes displayed at irq migration when vector domain is enabled. "Unexpected interrupt vector %d on CPU %d is not mapped to any IRQ!" The cause of this problem is an interrupt is sent to the previous target CPU after cleaning up vector to irq mapping table. To clean up vector to irq map on the previous target CPU safty, change the irq migration in multiple vector domain as follows. The original idea is from x86 interrupt management code. - Delay vector to irq table cleanup until the interrupts are sent to new target CPUs. By this, it is ensured that target CPU is completely changed on the interrupt controller side. - Even after the interrupts are sent to new target CPUs, there can be pended interrupts remaining on the previous target CPU. So we need to delay clearning up vector to irq table until the pended interrupt is handled. For this, send IPI to the previous target CPU with lower priority vector and clean up vector to irq table in its handler. This patch affects only to irq migration code with multiple vector domain is enabled. Signed-off-by: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/iosapic.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c134
-rw-r--r--arch/ia64/kernel/msi_ia64.c3
3 files changed, 109 insertions, 32 deletions
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 398e2fd1cd25..7b3292282dea 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -345,7 +345,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
345 if (cpus_empty(mask)) 345 if (cpus_empty(mask))
346 return; 346 return;
347 347
348 if (reassign_irq_vector(irq, first_cpu(mask))) 348 if (irq_prepare_move(irq, first_cpu(mask)))
349 return; 349 return;
350 350
351 dest = cpu_physical_id(first_cpu(mask)); 351 dest = cpu_physical_id(first_cpu(mask));
@@ -397,6 +397,7 @@ iosapic_end_level_irq (unsigned int irq)
397 struct iosapic_rte_info *rte; 397 struct iosapic_rte_info *rte;
398 int do_unmask_irq = 0; 398 int do_unmask_irq = 0;
399 399
400 irq_complete_move(irq);
400 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { 401 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
401 do_unmask_irq = 1; 402 do_unmask_irq = 1;
402 mask_irq(irq); 403 mask_irq(irq);
@@ -450,6 +451,7 @@ iosapic_ack_edge_irq (unsigned int irq)
450{ 451{
451 irq_desc_t *idesc = irq_desc + irq; 452 irq_desc_t *idesc = irq_desc + irq;
452 453
454 irq_complete_move(irq);
453 move_native_irq(irq); 455 move_native_irq(irq);
454 /* 456 /*
455 * Once we have recorded IRQ_PENDING already, we can mask the 457 * Once we have recorded IRQ_PENDING already, we can mask the
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 0b52f19ed046..2b8cf6e85af4 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -260,6 +260,8 @@ void __setup_vector_irq(int cpu)
260} 260}
261 261
262#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) 262#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
263#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR
264
263static enum vector_domain_type { 265static enum vector_domain_type {
264 VECTOR_DOMAIN_NONE, 266 VECTOR_DOMAIN_NONE,
265 VECTOR_DOMAIN_PERCPU 267 VECTOR_DOMAIN_PERCPU
@@ -272,6 +274,101 @@ static cpumask_t vector_allocation_domain(int cpu)
272 return CPU_MASK_ALL; 274 return CPU_MASK_ALL;
273} 275}
274 276
277static int __irq_prepare_move(int irq, int cpu)
278{
279 struct irq_cfg *cfg = &irq_cfg[irq];
280 int vector;
281 cpumask_t domain;
282
283 if (cfg->move_in_progress || cfg->move_cleanup_count)
284 return -EBUSY;
285 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
286 return -EINVAL;
287 if (cpu_isset(cpu, cfg->domain))
288 return 0;
289 domain = vector_allocation_domain(cpu);
290 vector = find_unassigned_vector(domain);
291 if (vector < 0)
292 return -ENOSPC;
293 cfg->move_in_progress = 1;
294 cfg->old_domain = cfg->domain;
295 cfg->vector = IRQ_VECTOR_UNASSIGNED;
296 cfg->domain = CPU_MASK_NONE;
297 BUG_ON(__bind_irq_vector(irq, vector, domain));
298 return 0;
299}
300
301int irq_prepare_move(int irq, int cpu)
302{
303 unsigned long flags;
304 int ret;
305
306 spin_lock_irqsave(&vector_lock, flags);
307 ret = __irq_prepare_move(irq, cpu);
308 spin_unlock_irqrestore(&vector_lock, flags);
309 return ret;
310}
311
312void irq_complete_move(unsigned irq)
313{
314 struct irq_cfg *cfg = &irq_cfg[irq];
315 cpumask_t cleanup_mask;
316 int i;
317
318 if (likely(!cfg->move_in_progress))
319 return;
320
321 if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
322 return;
323
324 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
325 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
326 for_each_cpu_mask(i, cleanup_mask)
327 platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
328 cfg->move_in_progress = 0;
329}
330
331static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
332{
333 int me = smp_processor_id();
334 ia64_vector vector;
335 unsigned long flags;
336
337 for (vector = IA64_FIRST_DEVICE_VECTOR;
338 vector < IA64_LAST_DEVICE_VECTOR; vector++) {
339 int irq;
340 struct irq_desc *desc;
341 struct irq_cfg *cfg;
342 irq = __get_cpu_var(vector_irq)[vector];
343 if (irq < 0)
344 continue;
345
346 desc = irq_desc + irq;
347 cfg = irq_cfg + irq;
348 spin_lock(&desc->lock);
349 if (!cfg->move_cleanup_count)
350 goto unlock;
351
352 if (!cpu_isset(me, cfg->old_domain))
353 goto unlock;
354
355 spin_lock_irqsave(&vector_lock, flags);
356 __get_cpu_var(vector_irq)[vector] = -1;
357 cpu_clear(me, vector_table[vector]);
358 spin_unlock_irqrestore(&vector_lock, flags);
359 cfg->move_cleanup_count--;
360 unlock:
361 spin_unlock(&desc->lock);
362 }
363 return IRQ_HANDLED;
364}
365
366static struct irqaction irq_move_irqaction = {
367 .handler = smp_irq_move_cleanup_interrupt,
368 .flags = IRQF_DISABLED,
369 .name = "irq_move"
370};
371
275static int __init parse_vector_domain(char *arg) 372static int __init parse_vector_domain(char *arg)
276{ 373{
277 if (!arg) 374 if (!arg)
@@ -303,36 +400,6 @@ void destroy_and_reserve_irq(unsigned int irq)
303 spin_unlock_irqrestore(&vector_lock, flags); 400 spin_unlock_irqrestore(&vector_lock, flags);
304} 401}
305 402
306static int __reassign_irq_vector(int irq, int cpu)
307{
308 struct irq_cfg *cfg = &irq_cfg[irq];
309 int vector;
310 cpumask_t domain;
311
312 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
313 return -EINVAL;
314 if (cpu_isset(cpu, cfg->domain))
315 return 0;
316 domain = vector_allocation_domain(cpu);
317 vector = find_unassigned_vector(domain);
318 if (vector < 0)
319 return -ENOSPC;
320 __clear_irq_vector(irq);
321 BUG_ON(__bind_irq_vector(irq, vector, domain));
322 return 0;
323}
324
325int reassign_irq_vector(int irq, int cpu)
326{
327 unsigned long flags;
328 int ret;
329
330 spin_lock_irqsave(&vector_lock, flags);
331 ret = __reassign_irq_vector(irq, cpu);
332 spin_unlock_irqrestore(&vector_lock, flags);
333 return ret;
334}
335
336/* 403/*
337 * Dynamic irq allocate and deallocation for MSI 404 * Dynamic irq allocate and deallocation for MSI
338 */ 405 */
@@ -578,6 +645,13 @@ init_IRQ (void)
578 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); 645 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
579 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); 646 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
580 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); 647 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
648#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
649 if (vector_domain_type != VECTOR_DOMAIN_NONE) {
650 BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
651 IA64_FIRST_DEVICE_VECTOR++;
652 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
653 }
654#endif
581#endif 655#endif
582#ifdef CONFIG_PERFMON 656#ifdef CONFIG_PERFMON
583 pfm_init_percpu(); 657 pfm_init_percpu();
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index e86d02959794..60c6ef67ebb2 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -57,7 +57,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
57 if (!cpu_online(cpu)) 57 if (!cpu_online(cpu))
58 return; 58 return;
59 59
60 if (reassign_irq_vector(irq, cpu)) 60 if (irq_prepare_move(irq, cpu))
61 return; 61 return;
62 62
63 read_msi_msg(irq, &msg); 63 read_msi_msg(irq, &msg);
@@ -119,6 +119,7 @@ void ia64_teardown_msi_irq(unsigned int irq)
119 119
120static void ia64_ack_msi_irq(unsigned int irq) 120static void ia64_ack_msi_irq(unsigned int irq)
121{ 121{
122 irq_complete_move(irq);
122 move_native_irq(irq); 123 move_native_irq(irq);
123 ia64_eoi(); 124 ia64_eoi();
124} 125}