diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2007-02-23 06:35:05 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-26 13:34:08 -0500 |
commit | dfbffdd81c5d029ca8f8a754ce0eb8199c418eba (patch) | |
tree | 31858110fa5535e9e089c52e32ba31151158ed4d /arch | |
parent | 13a79503ab4a0f602c6806b2572b3338994b55d2 (diff) |
[PATCH] x86_64 irq: Simplify assign_irq_vector's arguments.
Currently assign_irq_vector works mostly by side effect and returns
the results of it's changes to the caller. Which makes for a lot of
arguments to pass/return and confusion as to what to do if you need
the status but you aren't calling assign_irq_vector.
This patch stops returning values from assign_irq_vector that can be
retrieved just as easily by examining irq_cfg, and modifies the
callers to retrive those values from irq_cfg when they need them.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86_64/kernel/io_apic.c | 109 |
1 files changed, 56 insertions, 53 deletions
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index dd6580c66f61..79fa7b2bbee4 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c | |||
@@ -72,7 +72,7 @@ struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { | |||
72 | [15] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 15 }, | 72 | [15] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 15 }, |
73 | }; | 73 | }; |
74 | 74 | ||
75 | static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result); | 75 | static int assign_irq_vector(int irq, cpumask_t mask); |
76 | 76 | ||
77 | #define __apicdebuginit __init | 77 | #define __apicdebuginit __init |
78 | 78 | ||
@@ -262,19 +262,19 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
262 | 262 | ||
263 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 263 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) |
264 | { | 264 | { |
265 | struct irq_cfg *cfg = irq_cfg + irq; | ||
265 | unsigned long flags; | 266 | unsigned long flags; |
266 | unsigned int dest; | 267 | unsigned int dest; |
267 | cpumask_t tmp; | 268 | cpumask_t tmp; |
268 | int vector; | ||
269 | 269 | ||
270 | cpus_and(tmp, mask, cpu_online_map); | 270 | cpus_and(tmp, mask, cpu_online_map); |
271 | if (cpus_empty(tmp)) | 271 | if (cpus_empty(tmp)) |
272 | return; | 272 | return; |
273 | 273 | ||
274 | vector = assign_irq_vector(irq, mask, &tmp); | 274 | if (assign_irq_vector(irq, mask)) |
275 | if (vector < 0) | ||
276 | return; | 275 | return; |
277 | 276 | ||
277 | cpus_and(tmp, cfg->domain, mask); | ||
278 | dest = cpu_mask_to_apicid(tmp); | 278 | dest = cpu_mask_to_apicid(tmp); |
279 | 279 | ||
280 | /* | 280 | /* |
@@ -283,7 +283,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | |||
283 | dest = SET_APIC_LOGICAL_ID(dest); | 283 | dest = SET_APIC_LOGICAL_ID(dest); |
284 | 284 | ||
285 | spin_lock_irqsave(&ioapic_lock, flags); | 285 | spin_lock_irqsave(&ioapic_lock, flags); |
286 | __target_IO_APIC_irq(irq, dest, vector); | 286 | __target_IO_APIC_irq(irq, dest, cfg->vector); |
287 | irq_desc[irq].affinity = mask; | 287 | irq_desc[irq].affinity = mask; |
288 | spin_unlock_irqrestore(&ioapic_lock, flags); | 288 | spin_unlock_irqrestore(&ioapic_lock, flags); |
289 | } | 289 | } |
@@ -638,7 +638,7 @@ static int pin_2_irq(int idx, int apic, int pin) | |||
638 | return irq; | 638 | return irq; |
639 | } | 639 | } |
640 | 640 | ||
641 | static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) | 641 | static int __assign_irq_vector(int irq, cpumask_t mask) |
642 | { | 642 | { |
643 | /* | 643 | /* |
644 | * NOTE! The local APIC isn't very good at handling | 644 | * NOTE! The local APIC isn't very good at handling |
@@ -653,7 +653,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) | |||
653 | */ | 653 | */ |
654 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; | 654 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; |
655 | cpumask_t old_mask = CPU_MASK_NONE; | 655 | cpumask_t old_mask = CPU_MASK_NONE; |
656 | int old_vector = -1; | 656 | unsigned int old_vector; |
657 | int cpu; | 657 | int cpu; |
658 | struct irq_cfg *cfg; | 658 | struct irq_cfg *cfg; |
659 | 659 | ||
@@ -663,12 +663,12 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) | |||
663 | /* Only try and allocate irqs on cpus that are present */ | 663 | /* Only try and allocate irqs on cpus that are present */ |
664 | cpus_and(mask, mask, cpu_online_map); | 664 | cpus_and(mask, mask, cpu_online_map); |
665 | 665 | ||
666 | if (cfg->vector > 0) | 666 | old_vector = cfg->vector; |
667 | old_vector = cfg->vector; | 667 | if (old_vector) { |
668 | if (old_vector > 0) { | 668 | cpumask_t tmp; |
669 | cpus_and(*result, cfg->domain, mask); | 669 | cpus_and(tmp, cfg->domain, mask); |
670 | if (!cpus_empty(*result)) | 670 | if (!cpus_empty(tmp)) |
671 | return old_vector; | 671 | return 0; |
672 | cpus_and(old_mask, cfg->domain, cpu_online_map); | 672 | cpus_and(old_mask, cfg->domain, cpu_online_map); |
673 | } | 673 | } |
674 | 674 | ||
@@ -705,21 +705,20 @@ next: | |||
705 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 705 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
706 | cfg->vector = vector; | 706 | cfg->vector = vector; |
707 | cfg->domain = domain; | 707 | cfg->domain = domain; |
708 | cpus_and(*result, domain, mask); | 708 | return 0; |
709 | return vector; | ||
710 | } | 709 | } |
711 | return -ENOSPC; | 710 | return -ENOSPC; |
712 | } | 711 | } |
713 | 712 | ||
714 | static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) | 713 | static int assign_irq_vector(int irq, cpumask_t mask) |
715 | { | 714 | { |
716 | int vector; | 715 | int err; |
717 | unsigned long flags; | 716 | unsigned long flags; |
718 | 717 | ||
719 | spin_lock_irqsave(&vector_lock, flags); | 718 | spin_lock_irqsave(&vector_lock, flags); |
720 | vector = __assign_irq_vector(irq, mask, result); | 719 | err = __assign_irq_vector(irq, mask); |
721 | spin_unlock_irqrestore(&vector_lock, flags); | 720 | spin_unlock_irqrestore(&vector_lock, flags); |
722 | return vector; | 721 | return err; |
723 | } | 722 | } |
724 | 723 | ||
725 | static void __clear_irq_vector(int irq) | 724 | static void __clear_irq_vector(int irq) |
@@ -780,22 +779,24 @@ static void ioapic_register_intr(int irq, unsigned long trigger) | |||
780 | static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, | 779 | static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, |
781 | int trigger, int polarity) | 780 | int trigger, int polarity) |
782 | { | 781 | { |
782 | struct irq_cfg *cfg = irq_cfg + irq; | ||
783 | struct IO_APIC_route_entry entry; | 783 | struct IO_APIC_route_entry entry; |
784 | cpumask_t mask; | 784 | cpumask_t mask; |
785 | int vector; | ||
786 | unsigned long flags; | 785 | unsigned long flags; |
787 | 786 | ||
788 | if (!IO_APIC_IRQ(irq)) | 787 | if (!IO_APIC_IRQ(irq)) |
789 | return; | 788 | return; |
790 | 789 | ||
791 | vector = assign_irq_vector(irq, TARGET_CPUS, &mask); | 790 | mask = TARGET_CPUS; |
792 | if (vector < 0) | 791 | if (assign_irq_vector(irq, mask)) |
793 | return; | 792 | return; |
794 | 793 | ||
794 | cpus_and(mask, cfg->domain, mask); | ||
795 | |||
795 | apic_printk(APIC_VERBOSE,KERN_DEBUG | 796 | apic_printk(APIC_VERBOSE,KERN_DEBUG |
796 | "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " | 797 | "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " |
797 | "IRQ %d Mode:%i Active:%i)\n", | 798 | "IRQ %d Mode:%i Active:%i)\n", |
798 | apic, mp_ioapics[apic].mpc_apicid, pin, vector, | 799 | apic, mp_ioapics[apic].mpc_apicid, pin, cfg->vector, |
799 | irq, trigger, polarity); | 800 | irq, trigger, polarity); |
800 | 801 | ||
801 | /* | 802 | /* |
@@ -809,7 +810,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, | |||
809 | entry.mask = 0; /* enable IRQ */ | 810 | entry.mask = 0; /* enable IRQ */ |
810 | entry.trigger = trigger; | 811 | entry.trigger = trigger; |
811 | entry.polarity = polarity; | 812 | entry.polarity = polarity; |
812 | entry.vector = vector; | 813 | entry.vector = cfg->vector; |
813 | 814 | ||
814 | /* Mask level triggered irqs. | 815 | /* Mask level triggered irqs. |
815 | * Use IRQ_DELAYED_DISABLE for edge triggered irqs. | 816 | * Use IRQ_DELAYED_DISABLE for edge triggered irqs. |
@@ -1558,15 +1559,14 @@ static inline void unlock_ExtINT_logic(void) | |||
1558 | */ | 1559 | */ |
1559 | static inline void check_timer(void) | 1560 | static inline void check_timer(void) |
1560 | { | 1561 | { |
1562 | struct irq_cfg *cfg = irq_cfg + 0; | ||
1561 | int apic1, pin1, apic2, pin2; | 1563 | int apic1, pin1, apic2, pin2; |
1562 | int vector; | ||
1563 | cpumask_t mask; | ||
1564 | 1564 | ||
1565 | /* | 1565 | /* |
1566 | * get/set the timer IRQ vector: | 1566 | * get/set the timer IRQ vector: |
1567 | */ | 1567 | */ |
1568 | disable_8259A_irq(0); | 1568 | disable_8259A_irq(0); |
1569 | vector = assign_irq_vector(0, TARGET_CPUS, &mask); | 1569 | assign_irq_vector(0, TARGET_CPUS); |
1570 | 1570 | ||
1571 | /* | 1571 | /* |
1572 | * Subtle, code in do_timer_interrupt() expects an AEOI | 1572 | * Subtle, code in do_timer_interrupt() expects an AEOI |
@@ -1586,7 +1586,7 @@ static inline void check_timer(void) | |||
1586 | apic2 = ioapic_i8259.apic; | 1586 | apic2 = ioapic_i8259.apic; |
1587 | 1587 | ||
1588 | apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", | 1588 | apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", |
1589 | vector, apic1, pin1, apic2, pin2); | 1589 | cfg->vector, apic1, pin1, apic2, pin2); |
1590 | 1590 | ||
1591 | if (pin1 != -1) { | 1591 | if (pin1 != -1) { |
1592 | /* | 1592 | /* |
@@ -1617,7 +1617,7 @@ static inline void check_timer(void) | |||
1617 | /* | 1617 | /* |
1618 | * legacy devices should be connected to IO APIC #0 | 1618 | * legacy devices should be connected to IO APIC #0 |
1619 | */ | 1619 | */ |
1620 | setup_ExtINT_IRQ0_pin(apic2, pin2, vector); | 1620 | setup_ExtINT_IRQ0_pin(apic2, pin2, cfg->vector); |
1621 | if (timer_irq_works()) { | 1621 | if (timer_irq_works()) { |
1622 | apic_printk(APIC_VERBOSE," works.\n"); | 1622 | apic_printk(APIC_VERBOSE," works.\n"); |
1623 | nmi_watchdog_default(); | 1623 | nmi_watchdog_default(); |
@@ -1642,14 +1642,14 @@ static inline void check_timer(void) | |||
1642 | 1642 | ||
1643 | disable_8259A_irq(0); | 1643 | disable_8259A_irq(0); |
1644 | irq_desc[0].chip = &lapic_irq_type; | 1644 | irq_desc[0].chip = &lapic_irq_type; |
1645 | apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ | 1645 | apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ |
1646 | enable_8259A_irq(0); | 1646 | enable_8259A_irq(0); |
1647 | 1647 | ||
1648 | if (timer_irq_works()) { | 1648 | if (timer_irq_works()) { |
1649 | apic_printk(APIC_VERBOSE," works.\n"); | 1649 | apic_printk(APIC_VERBOSE," works.\n"); |
1650 | return; | 1650 | return; |
1651 | } | 1651 | } |
1652 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); | 1652 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); |
1653 | apic_printk(APIC_VERBOSE," failed.\n"); | 1653 | apic_printk(APIC_VERBOSE," failed.\n"); |
1654 | 1654 | ||
1655 | apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ..."); | 1655 | apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ..."); |
@@ -1796,9 +1796,7 @@ int create_irq(void) | |||
1796 | /* Allocate an unused irq */ | 1796 | /* Allocate an unused irq */ |
1797 | int irq; | 1797 | int irq; |
1798 | int new; | 1798 | int new; |
1799 | int vector = 0; | ||
1800 | unsigned long flags; | 1799 | unsigned long flags; |
1801 | cpumask_t mask; | ||
1802 | 1800 | ||
1803 | irq = -ENOSPC; | 1801 | irq = -ENOSPC; |
1804 | spin_lock_irqsave(&vector_lock, flags); | 1802 | spin_lock_irqsave(&vector_lock, flags); |
@@ -1807,8 +1805,7 @@ int create_irq(void) | |||
1807 | continue; | 1805 | continue; |
1808 | if (irq_cfg[new].vector != 0) | 1806 | if (irq_cfg[new].vector != 0) |
1809 | continue; | 1807 | continue; |
1810 | vector = __assign_irq_vector(new, TARGET_CPUS, &mask); | 1808 | if (__assign_irq_vector(new, TARGET_CPUS) == 0) |
1811 | if (likely(vector > 0)) | ||
1812 | irq = new; | 1809 | irq = new; |
1813 | break; | 1810 | break; |
1814 | } | 1811 | } |
@@ -1837,12 +1834,15 @@ void destroy_irq(unsigned int irq) | |||
1837 | #ifdef CONFIG_PCI_MSI | 1834 | #ifdef CONFIG_PCI_MSI |
1838 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) | 1835 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) |
1839 | { | 1836 | { |
1840 | int vector; | 1837 | struct irq_cfg *cfg = irq_cfg + irq; |
1838 | int err; | ||
1841 | unsigned dest; | 1839 | unsigned dest; |
1842 | cpumask_t tmp; | 1840 | cpumask_t tmp; |
1843 | 1841 | ||
1844 | vector = assign_irq_vector(irq, TARGET_CPUS, &tmp); | 1842 | tmp = TARGET_CPUS; |
1845 | if (vector >= 0) { | 1843 | err = assign_irq_vector(irq, tmp); |
1844 | if (!err) { | ||
1845 | cpus_and(tmp, cfg->domain, tmp); | ||
1846 | dest = cpu_mask_to_apicid(tmp); | 1846 | dest = cpu_mask_to_apicid(tmp); |
1847 | 1847 | ||
1848 | msg->address_hi = MSI_ADDR_BASE_HI; | 1848 | msg->address_hi = MSI_ADDR_BASE_HI; |
@@ -1862,33 +1862,33 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
1862 | ((INT_DELIVERY_MODE != dest_LowestPrio) ? | 1862 | ((INT_DELIVERY_MODE != dest_LowestPrio) ? |
1863 | MSI_DATA_DELIVERY_FIXED: | 1863 | MSI_DATA_DELIVERY_FIXED: |
1864 | MSI_DATA_DELIVERY_LOWPRI) | | 1864 | MSI_DATA_DELIVERY_LOWPRI) | |
1865 | MSI_DATA_VECTOR(vector); | 1865 | MSI_DATA_VECTOR(cfg->vector); |
1866 | } | 1866 | } |
1867 | return vector; | 1867 | return err; |
1868 | } | 1868 | } |
1869 | 1869 | ||
1870 | #ifdef CONFIG_SMP | 1870 | #ifdef CONFIG_SMP |
1871 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 1871 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) |
1872 | { | 1872 | { |
1873 | struct irq_cfg *cfg = irq_cfg + irq; | ||
1873 | struct msi_msg msg; | 1874 | struct msi_msg msg; |
1874 | unsigned int dest; | 1875 | unsigned int dest; |
1875 | cpumask_t tmp; | 1876 | cpumask_t tmp; |
1876 | int vector; | ||
1877 | 1877 | ||
1878 | cpus_and(tmp, mask, cpu_online_map); | 1878 | cpus_and(tmp, mask, cpu_online_map); |
1879 | if (cpus_empty(tmp)) | 1879 | if (cpus_empty(tmp)) |
1880 | return; | 1880 | return; |
1881 | 1881 | ||
1882 | vector = assign_irq_vector(irq, mask, &tmp); | 1882 | if (assign_irq_vector(irq, mask)) |
1883 | if (vector < 0) | ||
1884 | return; | 1883 | return; |
1885 | 1884 | ||
1885 | cpus_and(tmp, cfg->domain, mask); | ||
1886 | dest = cpu_mask_to_apicid(tmp); | 1886 | dest = cpu_mask_to_apicid(tmp); |
1887 | 1887 | ||
1888 | read_msi_msg(irq, &msg); | 1888 | read_msi_msg(irq, &msg); |
1889 | 1889 | ||
1890 | msg.data &= ~MSI_DATA_VECTOR_MASK; | 1890 | msg.data &= ~MSI_DATA_VECTOR_MASK; |
1891 | msg.data |= MSI_DATA_VECTOR(vector); | 1891 | msg.data |= MSI_DATA_VECTOR(cfg->vector); |
1892 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | 1892 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; |
1893 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 1893 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
1894 | 1894 | ||
@@ -1964,21 +1964,21 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
1964 | 1964 | ||
1965 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | 1965 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) |
1966 | { | 1966 | { |
1967 | struct irq_cfg *cfg = irq_cfg + irq; | ||
1967 | unsigned int dest; | 1968 | unsigned int dest; |
1968 | cpumask_t tmp; | 1969 | cpumask_t tmp; |
1969 | int vector; | ||
1970 | 1970 | ||
1971 | cpus_and(tmp, mask, cpu_online_map); | 1971 | cpus_and(tmp, mask, cpu_online_map); |
1972 | if (cpus_empty(tmp)) | 1972 | if (cpus_empty(tmp)) |
1973 | return; | 1973 | return; |
1974 | 1974 | ||
1975 | vector = assign_irq_vector(irq, mask, &tmp); | 1975 | if (assign_irq_vector(irq, mask)) |
1976 | if (vector < 0) | ||
1977 | return; | 1976 | return; |
1978 | 1977 | ||
1978 | cpus_and(tmp, cfg->domain, mask); | ||
1979 | dest = cpu_mask_to_apicid(tmp); | 1979 | dest = cpu_mask_to_apicid(tmp); |
1980 | 1980 | ||
1981 | target_ht_irq(irq, dest, vector); | 1981 | target_ht_irq(irq, dest, cfg->vector); |
1982 | irq_desc[irq].affinity = mask; | 1982 | irq_desc[irq].affinity = mask; |
1983 | } | 1983 | } |
1984 | #endif | 1984 | #endif |
@@ -1996,14 +1996,17 @@ static struct irq_chip ht_irq_chip = { | |||
1996 | 1996 | ||
1997 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | 1997 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) |
1998 | { | 1998 | { |
1999 | int vector; | 1999 | struct irq_cfg *cfg = irq_cfg + irq; |
2000 | int err; | ||
2000 | cpumask_t tmp; | 2001 | cpumask_t tmp; |
2001 | 2002 | ||
2002 | vector = assign_irq_vector(irq, TARGET_CPUS, &tmp); | 2003 | tmp = TARGET_CPUS; |
2003 | if (vector >= 0) { | 2004 | err = assign_irq_vector(irq, tmp); |
2005 | if (!err) { | ||
2004 | struct ht_irq_msg msg; | 2006 | struct ht_irq_msg msg; |
2005 | unsigned dest; | 2007 | unsigned dest; |
2006 | 2008 | ||
2009 | cpus_and(tmp, cfg->domain, tmp); | ||
2007 | dest = cpu_mask_to_apicid(tmp); | 2010 | dest = cpu_mask_to_apicid(tmp); |
2008 | 2011 | ||
2009 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); | 2012 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); |
@@ -2011,7 +2014,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
2011 | msg.address_lo = | 2014 | msg.address_lo = |
2012 | HT_IRQ_LOW_BASE | | 2015 | HT_IRQ_LOW_BASE | |
2013 | HT_IRQ_LOW_DEST_ID(dest) | | 2016 | HT_IRQ_LOW_DEST_ID(dest) | |
2014 | HT_IRQ_LOW_VECTOR(vector) | | 2017 | HT_IRQ_LOW_VECTOR(cfg->vector) | |
2015 | ((INT_DEST_MODE == 0) ? | 2018 | ((INT_DEST_MODE == 0) ? |
2016 | HT_IRQ_LOW_DM_PHYSICAL : | 2019 | HT_IRQ_LOW_DM_PHYSICAL : |
2017 | HT_IRQ_LOW_DM_LOGICAL) | | 2020 | HT_IRQ_LOW_DM_LOGICAL) | |
@@ -2026,7 +2029,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
2026 | set_irq_chip_and_handler_name(irq, &ht_irq_chip, | 2029 | set_irq_chip_and_handler_name(irq, &ht_irq_chip, |
2027 | handle_edge_irq, "edge"); | 2030 | handle_edge_irq, "edge"); |
2028 | } | 2031 | } |
2029 | return vector; | 2032 | return err; |
2030 | } | 2033 | } |
2031 | #endif /* CONFIG_HT_IRQ */ | 2034 | #endif /* CONFIG_HT_IRQ */ |
2032 | 2035 | ||