aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apic/io_apic.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2009-04-27 20:58:23 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-28 06:21:15 -0400
commitfcef5911c7ea89b80d5bfc727f402f37c9eefd57 (patch)
treee37ad2cda662933b01b2ba6e28e570a18ef1edc2 /arch/x86/kernel/apic/io_apic.c
parent9ec4fa271faf2db3b8e1419c998da1ca6b094eb6 (diff)
x86/irq: remove leftover code from NUMA_MIGRATE_IRQ_DESC
The original feature of migrating irq_desc dynamic was too fragile and was causing problems: it caused crashes on systems with lots of cards with MSI-X when user-space irq-balancer was enabled. We now have new patches that create irq_desc according to device numa node. This patch removes the leftover bits of the dynamic balancer. [ Impact: remove dead code ] Signed-off-by: Yinghai Lu <yinghai@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Rusty Russell <rusty@rustcorp.com.au> LKML-Reference: <49F654AF.8000808@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/apic/io_apic.c')
-rw-r--r--arch/x86/kernel/apic/io_apic.c56
1 files changed, 4 insertions, 52 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 30da617d18e..9fbf0f7ec7e 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -148,9 +148,6 @@ struct irq_cfg {
148 unsigned move_cleanup_count; 148 unsigned move_cleanup_count;
149 u8 vector; 149 u8 vector;
150 u8 move_in_progress : 1; 150 u8 move_in_progress : 1;
151#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
152 u8 move_desc_pending : 1;
153#endif
154}; 151};
155 152
156/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 153/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
@@ -254,8 +251,7 @@ int arch_init_chip_data(struct irq_desc *desc, int cpu)
254 return 0; 251 return 0;
255} 252}
256 253
257#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC 254/* for move_irq_desc */
258
259static void 255static void
260init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu) 256init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu)
261{ 257{
@@ -356,19 +352,7 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
356 old_desc->chip_data = NULL; 352 old_desc->chip_data = NULL;
357 } 353 }
358} 354}
359 355/* end for move_irq_desc */
360static void
361set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
362{
363 struct irq_cfg *cfg = desc->chip_data;
364
365 if (!cfg->move_in_progress) {
366 /* it means that domain is not changed */
367 if (!cpumask_intersects(desc->affinity, mask))
368 cfg->move_desc_pending = 1;
369 }
370}
371#endif
372 356
373#else 357#else
374static struct irq_cfg *irq_cfg(unsigned int irq) 358static struct irq_cfg *irq_cfg(unsigned int irq)
@@ -378,13 +362,6 @@ static struct irq_cfg *irq_cfg(unsigned int irq)
378 362
379#endif 363#endif
380 364
381#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
382static inline void
383set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
384{
385}
386#endif
387
388struct io_apic { 365struct io_apic {
389 unsigned int index; 366 unsigned int index;
390 unsigned int unused[3]; 367 unsigned int unused[3];
@@ -592,9 +569,6 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
592 if (assign_irq_vector(irq, cfg, mask)) 569 if (assign_irq_vector(irq, cfg, mask))
593 return BAD_APICID; 570 return BAD_APICID;
594 571
595 /* check that before desc->addinity get updated */
596 set_extra_move_desc(desc, mask);
597
598 cpumask_copy(desc->affinity, mask); 572 cpumask_copy(desc->affinity, mask);
599 573
600 return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); 574 return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
@@ -2393,8 +2367,6 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2393 if (assign_irq_vector(irq, cfg, mask)) 2367 if (assign_irq_vector(irq, cfg, mask))
2394 return; 2368 return;
2395 2369
2396 set_extra_move_desc(desc, mask);
2397
2398 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 2370 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2399 2371
2400 irte.vector = cfg->vector; 2372 irte.vector = cfg->vector;
@@ -2491,34 +2463,14 @@ static void irq_complete_move(struct irq_desc **descp)
2491 struct irq_cfg *cfg = desc->chip_data; 2463 struct irq_cfg *cfg = desc->chip_data;
2492 unsigned vector, me; 2464 unsigned vector, me;
2493 2465
2494 if (likely(!cfg->move_in_progress)) { 2466 if (likely(!cfg->move_in_progress))
2495#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2496 if (likely(!cfg->move_desc_pending))
2497 return;
2498
2499 /* domain has not changed, but affinity did */
2500 me = smp_processor_id();
2501 if (cpumask_test_cpu(me, desc->affinity)) {
2502 *descp = desc = move_irq_desc(desc, me);
2503 /* get the new one */
2504 cfg = desc->chip_data;
2505 cfg->move_desc_pending = 0;
2506 }
2507#endif
2508 return; 2467 return;
2509 }
2510 2468
2511 vector = ~get_irq_regs()->orig_ax; 2469 vector = ~get_irq_regs()->orig_ax;
2512 me = smp_processor_id(); 2470 me = smp_processor_id();
2513 2471
2514 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) { 2472 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2515#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2516 *descp = desc = move_irq_desc(desc, me);
2517 /* get the new one */
2518 cfg = desc->chip_data;
2519#endif
2520 send_cleanup_vector(cfg); 2473 send_cleanup_vector(cfg);
2521 }
2522} 2474}
2523#else 2475#else
2524static inline void irq_complete_move(struct irq_desc **descp) {} 2476static inline void irq_complete_move(struct irq_desc **descp) {}