aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/io_apic_64.c
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-08-19 23:50:05 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-16 10:52:29 -0400
commit08678b0841267c1d00d771fe01548d86043d065e (patch)
tree7debb21f9e9a768ced43077f7376797a0c46f8c0 /arch/x86/kernel/io_apic_64.c
parentbfea1238beac9d306eeac081c67de5ca6aec4c7a (diff)
generic: sparse irqs: use irq_desc() together with dyn_array, instead of irq_desc[]
add CONFIG_HAVE_SPARSE_IRQ to for use condensed array. Get rid of irq_desc[] array assumptions. Preallocate 32 irq_desc, and irq_desc() will try to get more. ( No change in functionality is expected anywhere, except the odd build failure where we missed a code site or where a crossing commit itroduces new irq_desc[] usage. ) v2: according to Eric, change get_irq_desc() to irq_desc() Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/io_apic_64.c')
-rw-r--r--arch/x86/kernel/io_apic_64.c75
1 files changed, 49 insertions, 26 deletions
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 93a3ffabfe6a..cab5a25d81b1 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -345,6 +345,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
345 unsigned long flags; 345 unsigned long flags;
346 unsigned int dest; 346 unsigned int dest;
347 cpumask_t tmp; 347 cpumask_t tmp;
348 struct irq_desc *desc;
348 349
349 cpus_and(tmp, mask, cpu_online_map); 350 cpus_and(tmp, mask, cpu_online_map);
350 if (cpus_empty(tmp)) 351 if (cpus_empty(tmp))
@@ -361,9 +362,10 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
361 */ 362 */
362 dest = SET_APIC_LOGICAL_ID(dest); 363 dest = SET_APIC_LOGICAL_ID(dest);
363 364
365 desc = irq_to_desc(irq);
364 spin_lock_irqsave(&ioapic_lock, flags); 366 spin_lock_irqsave(&ioapic_lock, flags);
365 __target_IO_APIC_irq(irq, dest, cfg->vector); 367 __target_IO_APIC_irq(irq, dest, cfg->vector);
366 irq_desc[irq].affinity = mask; 368 desc->affinity = mask;
367 spin_unlock_irqrestore(&ioapic_lock, flags); 369 spin_unlock_irqrestore(&ioapic_lock, flags);
368} 370}
369#endif 371#endif
@@ -933,14 +935,17 @@ static struct irq_chip ir_ioapic_chip;
933 935
934static void ioapic_register_intr(int irq, unsigned long trigger) 936static void ioapic_register_intr(int irq, unsigned long trigger)
935{ 937{
938 struct irq_desc *desc;
939
940 desc = irq_to_desc(irq);
936 if (trigger) 941 if (trigger)
937 irq_desc[irq].status |= IRQ_LEVEL; 942 desc->status |= IRQ_LEVEL;
938 else 943 else
939 irq_desc[irq].status &= ~IRQ_LEVEL; 944 desc->status &= ~IRQ_LEVEL;
940 945
941#ifdef CONFIG_INTR_REMAP 946#ifdef CONFIG_INTR_REMAP
942 if (irq_remapped(irq)) { 947 if (irq_remapped(irq)) {
943 irq_desc[irq].status |= IRQ_MOVE_PCNTXT; 948 desc->status |= IRQ_MOVE_PCNTXT;
944 if (trigger) 949 if (trigger)
945 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, 950 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
946 handle_fasteoi_irq, 951 handle_fasteoi_irq,
@@ -1596,10 +1601,10 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
1596static void migrate_ioapic_irq(int irq, cpumask_t mask) 1601static void migrate_ioapic_irq(int irq, cpumask_t mask)
1597{ 1602{
1598 struct irq_cfg *cfg = irq_cfg + irq; 1603 struct irq_cfg *cfg = irq_cfg + irq;
1599 struct irq_desc *desc = irq_desc + irq; 1604 struct irq_desc *desc;
1600 cpumask_t tmp, cleanup_mask; 1605 cpumask_t tmp, cleanup_mask;
1601 struct irte irte; 1606 struct irte irte;
1602 int modify_ioapic_rte = desc->status & IRQ_LEVEL; 1607 int modify_ioapic_rte;
1603 unsigned int dest; 1608 unsigned int dest;
1604 unsigned long flags; 1609 unsigned long flags;
1605 1610
@@ -1616,6 +1621,8 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask)
1616 cpus_and(tmp, cfg->domain, mask); 1621 cpus_and(tmp, cfg->domain, mask);
1617 dest = cpu_mask_to_apicid(tmp); 1622 dest = cpu_mask_to_apicid(tmp);
1618 1623
1624 desc = irq_to_desc(irq);
1625 modify_ioapic_rte = desc->status & IRQ_LEVEL;
1619 if (modify_ioapic_rte) { 1626 if (modify_ioapic_rte) {
1620 spin_lock_irqsave(&ioapic_lock, flags); 1627 spin_lock_irqsave(&ioapic_lock, flags);
1621 __target_IO_APIC_irq(irq, dest, cfg->vector); 1628 __target_IO_APIC_irq(irq, dest, cfg->vector);
@@ -1637,12 +1644,13 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask)
1637 cfg->move_in_progress = 0; 1644 cfg->move_in_progress = 0;
1638 } 1645 }
1639 1646
1640 irq_desc[irq].affinity = mask; 1647 desc->affinity = mask;
1641} 1648}
1642 1649
1643static int migrate_irq_remapped_level(int irq) 1650static int migrate_irq_remapped_level(int irq)
1644{ 1651{
1645 int ret = -1; 1652 int ret = -1;
1653 struct irq_desc *desc = irq_to_desc(irq);
1646 1654
1647 mask_IO_APIC_irq(irq); 1655 mask_IO_APIC_irq(irq);
1648 1656
@@ -1658,11 +1666,11 @@ static int migrate_irq_remapped_level(int irq)
1658 } 1666 }
1659 1667
1660 /* everthing is clear. we have right of way */ 1668 /* everthing is clear. we have right of way */
1661 migrate_ioapic_irq(irq, irq_desc[irq].pending_mask); 1669 migrate_ioapic_irq(irq, desc->pending_mask);
1662 1670
1663 ret = 0; 1671 ret = 0;
1664 irq_desc[irq].status &= ~IRQ_MOVE_PENDING; 1672 desc->status &= ~IRQ_MOVE_PENDING;
1665 cpus_clear(irq_desc[irq].pending_mask); 1673 cpus_clear(desc->pending_mask);
1666 1674
1667unmask: 1675unmask:
1668 unmask_IO_APIC_irq(irq); 1676 unmask_IO_APIC_irq(irq);
@@ -1674,7 +1682,7 @@ static void ir_irq_migration(struct work_struct *work)
1674 int irq; 1682 int irq;
1675 1683
1676 for (irq = 0; irq < nr_irqs; irq++) { 1684 for (irq = 0; irq < nr_irqs; irq++) {
1677 struct irq_desc *desc = irq_desc + irq; 1685 struct irq_desc *desc = irq_to_desc(irq);
1678 if (desc->status & IRQ_MOVE_PENDING) { 1686 if (desc->status & IRQ_MOVE_PENDING) {
1679 unsigned long flags; 1687 unsigned long flags;
1680 1688
@@ -1686,8 +1694,7 @@ static void ir_irq_migration(struct work_struct *work)
1686 continue; 1694 continue;
1687 } 1695 }
1688 1696
1689 desc->chip->set_affinity(irq, 1697 desc->chip->set_affinity(irq, desc->pending_mask);
1690 irq_desc[irq].pending_mask);
1691 spin_unlock_irqrestore(&desc->lock, flags); 1698 spin_unlock_irqrestore(&desc->lock, flags);
1692 } 1699 }
1693 } 1700 }
@@ -1698,9 +1705,11 @@ static void ir_irq_migration(struct work_struct *work)
1698 */ 1705 */
1699static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 1706static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
1700{ 1707{
1701 if (irq_desc[irq].status & IRQ_LEVEL) { 1708 struct irq_desc *desc = irq_to_desc(irq);
1702 irq_desc[irq].status |= IRQ_MOVE_PENDING; 1709
1703 irq_desc[irq].pending_mask = mask; 1710 if (desc->status & IRQ_LEVEL) {
1711 desc->status |= IRQ_MOVE_PENDING;
1712 desc->pending_mask = mask;
1704 migrate_irq_remapped_level(irq); 1713 migrate_irq_remapped_level(irq);
1705 return; 1714 return;
1706 } 1715 }
@@ -1725,7 +1734,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
1725 if (irq >= nr_irqs) 1734 if (irq >= nr_irqs)
1726 continue; 1735 continue;
1727 1736
1728 desc = irq_desc + irq; 1737 desc = irq_to_desc(irq);
1729 cfg = irq_cfg + irq; 1738 cfg = irq_cfg + irq;
1730 spin_lock(&desc->lock); 1739 spin_lock(&desc->lock);
1731 if (!cfg->move_cleanup_count) 1740 if (!cfg->move_cleanup_count)
@@ -1791,7 +1800,7 @@ static void ack_apic_level(unsigned int irq)
1791 irq_complete_move(irq); 1800 irq_complete_move(irq);
1792#ifdef CONFIG_GENERIC_PENDING_IRQ 1801#ifdef CONFIG_GENERIC_PENDING_IRQ
1793 /* If we are moving the irq we need to mask it */ 1802 /* If we are moving the irq we need to mask it */
1794 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { 1803 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
1795 do_unmask_irq = 1; 1804 do_unmask_irq = 1;
1796 mask_IO_APIC_irq(irq); 1805 mask_IO_APIC_irq(irq);
1797 } 1806 }
@@ -1868,6 +1877,7 @@ static struct irq_chip ir_ioapic_chip __read_mostly = {
1868static inline void init_IO_APIC_traps(void) 1877static inline void init_IO_APIC_traps(void)
1869{ 1878{
1870 int irq; 1879 int irq;
1880 struct irq_desc *desc;
1871 1881
1872 /* 1882 /*
1873 * NOTE! The local APIC isn't very good at handling 1883 * NOTE! The local APIC isn't very good at handling
@@ -1889,9 +1899,11 @@ static inline void init_IO_APIC_traps(void)
1889 */ 1899 */
1890 if (irq < 16) 1900 if (irq < 16)
1891 make_8259A_irq(irq); 1901 make_8259A_irq(irq);
1892 else 1902 else {
1903 desc = irq_to_desc(irq);
1893 /* Strange. Oh, well.. */ 1904 /* Strange. Oh, well.. */
1894 irq_desc[irq].chip = &no_irq_chip; 1905 desc->chip = &no_irq_chip;
1906 }
1895 } 1907 }
1896 } 1908 }
1897} 1909}
@@ -1926,7 +1938,10 @@ static struct irq_chip lapic_chip __read_mostly = {
1926 1938
1927static void lapic_register_intr(int irq) 1939static void lapic_register_intr(int irq)
1928{ 1940{
1929 irq_desc[irq].status &= ~IRQ_LEVEL; 1941 struct irq_desc *desc;
1942
1943 desc = irq_to_desc(irq);
1944 desc->status &= ~IRQ_LEVEL;
1930 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 1945 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
1931 "edge"); 1946 "edge");
1932} 1947}
@@ -2402,6 +2417,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2402 struct msi_msg msg; 2417 struct msi_msg msg;
2403 unsigned int dest; 2418 unsigned int dest;
2404 cpumask_t tmp; 2419 cpumask_t tmp;
2420 struct irq_desc *desc;
2405 2421
2406 cpus_and(tmp, mask, cpu_online_map); 2422 cpus_and(tmp, mask, cpu_online_map);
2407 if (cpus_empty(tmp)) 2423 if (cpus_empty(tmp))
@@ -2421,7 +2437,8 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2421 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 2437 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2422 2438
2423 write_msi_msg(irq, &msg); 2439 write_msi_msg(irq, &msg);
2424 irq_desc[irq].affinity = mask; 2440 desc = irq_to_desc(irq);
2441 desc->affinity = mask;
2425} 2442}
2426 2443
2427#ifdef CONFIG_INTR_REMAP 2444#ifdef CONFIG_INTR_REMAP
@@ -2435,6 +2452,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2435 unsigned int dest; 2452 unsigned int dest;
2436 cpumask_t tmp, cleanup_mask; 2453 cpumask_t tmp, cleanup_mask;
2437 struct irte irte; 2454 struct irte irte;
2455 struct irq_desc *desc;
2438 2456
2439 cpus_and(tmp, mask, cpu_online_map); 2457 cpus_and(tmp, mask, cpu_online_map);
2440 if (cpus_empty(tmp)) 2458 if (cpus_empty(tmp))
@@ -2469,7 +2487,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2469 cfg->move_in_progress = 0; 2487 cfg->move_in_progress = 0;
2470 } 2488 }
2471 2489
2472 irq_desc[irq].affinity = mask; 2490 desc = irq_to_desc(irq);
2491 desc->affinity = mask;
2473} 2492}
2474#endif 2493#endif
2475#endif /* CONFIG_SMP */ 2494#endif /* CONFIG_SMP */
@@ -2543,7 +2562,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
2543 2562
2544#ifdef CONFIG_INTR_REMAP 2563#ifdef CONFIG_INTR_REMAP
2545 if (irq_remapped(irq)) { 2564 if (irq_remapped(irq)) {
2546 struct irq_desc *desc = irq_desc + irq; 2565 struct irq_desc *desc = irq_to_desc(irq);
2547 /* 2566 /*
2548 * irq migration in process context 2567 * irq migration in process context
2549 */ 2568 */
@@ -2655,6 +2674,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2655 struct msi_msg msg; 2674 struct msi_msg msg;
2656 unsigned int dest; 2675 unsigned int dest;
2657 cpumask_t tmp; 2676 cpumask_t tmp;
2677 struct irq_desc *desc;
2658 2678
2659 cpus_and(tmp, mask, cpu_online_map); 2679 cpus_and(tmp, mask, cpu_online_map);
2660 if (cpus_empty(tmp)) 2680 if (cpus_empty(tmp))
@@ -2674,7 +2694,8 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2674 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 2694 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2675 2695
2676 dmar_msi_write(irq, &msg); 2696 dmar_msi_write(irq, &msg);
2677 irq_desc[irq].affinity = mask; 2697 desc = irq_to_desc(irq);
2698 desc->affinity = mask;
2678} 2699}
2679#endif /* CONFIG_SMP */ 2700#endif /* CONFIG_SMP */
2680 2701
@@ -2731,6 +2752,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2731 struct irq_cfg *cfg = irq_cfg + irq; 2752 struct irq_cfg *cfg = irq_cfg + irq;
2732 unsigned int dest; 2753 unsigned int dest;
2733 cpumask_t tmp; 2754 cpumask_t tmp;
2755 struct irq_desc *desc;
2734 2756
2735 cpus_and(tmp, mask, cpu_online_map); 2757 cpus_and(tmp, mask, cpu_online_map);
2736 if (cpus_empty(tmp)) 2758 if (cpus_empty(tmp))
@@ -2743,7 +2765,8 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2743 dest = cpu_mask_to_apicid(tmp); 2765 dest = cpu_mask_to_apicid(tmp);
2744 2766
2745 target_ht_irq(irq, dest, cfg->vector); 2767 target_ht_irq(irq, dest, cfg->vector);
2746 irq_desc[irq].affinity = mask; 2768 desc = irq_to_desc(irq);
2769 desc->affinity = mask;
2747} 2770}
2748#endif 2771#endif
2749 2772