diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-08-19 23:50:05 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-16 10:52:29 -0400 |
commit | 08678b0841267c1d00d771fe01548d86043d065e (patch) | |
tree | 7debb21f9e9a768ced43077f7376797a0c46f8c0 /arch/x86 | |
parent | bfea1238beac9d306eeac081c67de5ca6aec4c7a (diff) |
generic: sparse irqs: use irq_desc() together with dyn_array, instead of irq_desc[]
add CONFIG_HAVE_SPARSE_IRQ to for use condensed array.
Get rid of irq_desc[] array assumptions.
Preallocate 32 irq_desc, and irq_desc() will try to get more.
( No change in functionality is expected anywhere, except the odd build
failure where we missed a code site or where a crossing commit itroduces
new irq_desc[] usage. )
v2: according to Eric, change get_irq_desc() to irq_desc()
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 1 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic_32.c | 46 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic_64.c | 75 | ||||
-rw-r--r-- | arch/x86/kernel/irq_32.c | 24 | ||||
-rw-r--r-- | arch/x86/kernel/irq_64.c | 35 | ||||
-rw-r--r-- | arch/x86/kernel/irqinit_64.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/visws_quirks.c | 30 | ||||
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 4 |
8 files changed, 142 insertions, 83 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 42f98009d752..1004888e9b13 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -34,6 +34,7 @@ config X86 | |||
34 | select HAVE_GENERIC_DMA_COHERENT if X86_32 | 34 | select HAVE_GENERIC_DMA_COHERENT if X86_32 |
35 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 35 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
36 | select HAVE_DYN_ARRAY | 36 | select HAVE_DYN_ARRAY |
37 | select HAVE_SPARSE_IRQ if X86_64 | ||
37 | 38 | ||
38 | config ARCH_DEFCONFIG | 39 | config ARCH_DEFCONFIG |
39 | string | 40 | string |
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 7f2bcc3dad82..c2160cfdec9b 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -345,6 +345,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask) | |||
345 | struct irq_pin_list *entry = irq_2_pin + irq; | 345 | struct irq_pin_list *entry = irq_2_pin + irq; |
346 | unsigned int apicid_value; | 346 | unsigned int apicid_value; |
347 | cpumask_t tmp; | 347 | cpumask_t tmp; |
348 | struct irq_desc *desc; | ||
348 | 349 | ||
349 | cpus_and(tmp, cpumask, cpu_online_map); | 350 | cpus_and(tmp, cpumask, cpu_online_map); |
350 | if (cpus_empty(tmp)) | 351 | if (cpus_empty(tmp)) |
@@ -365,7 +366,8 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask) | |||
365 | break; | 366 | break; |
366 | entry = irq_2_pin + entry->next; | 367 | entry = irq_2_pin + entry->next; |
367 | } | 368 | } |
368 | irq_desc[irq].affinity = cpumask; | 369 | desc = irq_to_desc(irq); |
370 | desc->affinity = cpumask; | ||
369 | spin_unlock_irqrestore(&ioapic_lock, flags); | 371 | spin_unlock_irqrestore(&ioapic_lock, flags); |
370 | } | 372 | } |
371 | 373 | ||
@@ -475,10 +477,12 @@ static inline void balance_irq(int cpu, int irq) | |||
475 | static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) | 477 | static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) |
476 | { | 478 | { |
477 | int i, j; | 479 | int i, j; |
480 | struct irq_desc *desc; | ||
478 | 481 | ||
479 | for_each_online_cpu(i) { | 482 | for_each_online_cpu(i) { |
480 | for (j = 0; j < nr_irqs; j++) { | 483 | for (j = 0; j < nr_irqs; j++) { |
481 | if (!irq_desc[j].action) | 484 | desc = irq_to_desc(j); |
485 | if (!desc->action) | ||
482 | continue; | 486 | continue; |
483 | /* Is it a significant load ? */ | 487 | /* Is it a significant load ? */ |
484 | if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i), j) < | 488 | if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i), j) < |
@@ -505,6 +509,7 @@ static void do_irq_balance(void) | |||
505 | unsigned long tmp_cpu_irq; | 509 | unsigned long tmp_cpu_irq; |
506 | unsigned long imbalance = 0; | 510 | unsigned long imbalance = 0; |
507 | cpumask_t allowed_mask, target_cpu_mask, tmp; | 511 | cpumask_t allowed_mask, target_cpu_mask, tmp; |
512 | struct irq_desc *desc; | ||
508 | 513 | ||
509 | for_each_possible_cpu(i) { | 514 | for_each_possible_cpu(i) { |
510 | int package_index; | 515 | int package_index; |
@@ -515,7 +520,8 @@ static void do_irq_balance(void) | |||
515 | for (j = 0; j < nr_irqs; j++) { | 520 | for (j = 0; j < nr_irqs; j++) { |
516 | unsigned long value_now, delta; | 521 | unsigned long value_now, delta; |
517 | /* Is this an active IRQ or balancing disabled ? */ | 522 | /* Is this an active IRQ or balancing disabled ? */ |
518 | if (!irq_desc[j].action || irq_balancing_disabled(j)) | 523 | desc = irq_to_desc(j); |
524 | if (!desc->action || irq_balancing_disabled(j)) | ||
519 | continue; | 525 | continue; |
520 | if (package_index == i) | 526 | if (package_index == i) |
521 | IRQ_DELTA(package_index, j) = 0; | 527 | IRQ_DELTA(package_index, j) = 0; |
@@ -609,7 +615,8 @@ tryanotherirq: | |||
609 | selected_irq = -1; | 615 | selected_irq = -1; |
610 | for (j = 0; j < nr_irqs; j++) { | 616 | for (j = 0; j < nr_irqs; j++) { |
611 | /* Is this an active IRQ? */ | 617 | /* Is this an active IRQ? */ |
612 | if (!irq_desc[j].action) | 618 | desc = irq_to_desc(j); |
619 | if (!desc->action) | ||
613 | continue; | 620 | continue; |
614 | if (imbalance <= IRQ_DELTA(max_loaded, j)) | 621 | if (imbalance <= IRQ_DELTA(max_loaded, j)) |
615 | continue; | 622 | continue; |
@@ -682,10 +689,12 @@ static int balanced_irq(void *unused) | |||
682 | int i; | 689 | int i; |
683 | unsigned long prev_balance_time = jiffies; | 690 | unsigned long prev_balance_time = jiffies; |
684 | long time_remaining = balanced_irq_interval; | 691 | long time_remaining = balanced_irq_interval; |
692 | struct irq_desc *desc; | ||
685 | 693 | ||
686 | /* push everything to CPU 0 to give us a starting point. */ | 694 | /* push everything to CPU 0 to give us a starting point. */ |
687 | for (i = 0 ; i < nr_irqs ; i++) { | 695 | for (i = 0 ; i < nr_irqs ; i++) { |
688 | irq_desc[i].pending_mask = cpumask_of_cpu(0); | 696 | desc = irq_to_desc(i); |
697 | desc->pending_mask = cpumask_of_cpu(0); | ||
689 | set_pending_irq(i, cpumask_of_cpu(0)); | 698 | set_pending_irq(i, cpumask_of_cpu(0)); |
690 | } | 699 | } |
691 | 700 | ||
@@ -1254,13 +1263,16 @@ static struct irq_chip ioapic_chip; | |||
1254 | 1263 | ||
1255 | static void ioapic_register_intr(int irq, int vector, unsigned long trigger) | 1264 | static void ioapic_register_intr(int irq, int vector, unsigned long trigger) |
1256 | { | 1265 | { |
1266 | struct irq_desc *desc; | ||
1267 | |||
1268 | desc = irq_to_desc(irq); | ||
1257 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | 1269 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || |
1258 | trigger == IOAPIC_LEVEL) { | 1270 | trigger == IOAPIC_LEVEL) { |
1259 | irq_desc[irq].status |= IRQ_LEVEL; | 1271 | desc->status |= IRQ_LEVEL; |
1260 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | 1272 | set_irq_chip_and_handler_name(irq, &ioapic_chip, |
1261 | handle_fasteoi_irq, "fasteoi"); | 1273 | handle_fasteoi_irq, "fasteoi"); |
1262 | } else { | 1274 | } else { |
1263 | irq_desc[irq].status &= ~IRQ_LEVEL; | 1275 | desc->status &= ~IRQ_LEVEL; |
1264 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | 1276 | set_irq_chip_and_handler_name(irq, &ioapic_chip, |
1265 | handle_edge_irq, "edge"); | 1277 | handle_edge_irq, "edge"); |
1266 | } | 1278 | } |
@@ -2027,6 +2039,7 @@ static struct irq_chip ioapic_chip __read_mostly = { | |||
2027 | static inline void init_IO_APIC_traps(void) | 2039 | static inline void init_IO_APIC_traps(void) |
2028 | { | 2040 | { |
2029 | int irq; | 2041 | int irq; |
2042 | struct irq_desc *desc; | ||
2030 | 2043 | ||
2031 | /* | 2044 | /* |
2032 | * NOTE! The local APIC isn't very good at handling | 2045 | * NOTE! The local APIC isn't very good at handling |
@@ -2048,9 +2061,11 @@ static inline void init_IO_APIC_traps(void) | |||
2048 | */ | 2061 | */ |
2049 | if (irq < 16) | 2062 | if (irq < 16) |
2050 | make_8259A_irq(irq); | 2063 | make_8259A_irq(irq); |
2051 | else | 2064 | else { |
2065 | desc = irq_to_desc(irq); | ||
2052 | /* Strange. Oh, well.. */ | 2066 | /* Strange. Oh, well.. */ |
2053 | irq_desc[irq].chip = &no_irq_chip; | 2067 | desc->chip = &no_irq_chip; |
2068 | } | ||
2054 | } | 2069 | } |
2055 | } | 2070 | } |
2056 | } | 2071 | } |
@@ -2089,7 +2104,10 @@ static struct irq_chip lapic_chip __read_mostly = { | |||
2089 | 2104 | ||
2090 | static void lapic_register_intr(int irq, int vector) | 2105 | static void lapic_register_intr(int irq, int vector) |
2091 | { | 2106 | { |
2092 | irq_desc[irq].status &= ~IRQ_LEVEL; | 2107 | struct irq_desc *desc; |
2108 | |||
2109 | desc = irq_to_desc(irq); | ||
2110 | desc->status &= ~IRQ_LEVEL; | ||
2093 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, | 2111 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, |
2094 | "edge"); | 2112 | "edge"); |
2095 | set_intr_gate(vector, interrupt[irq]); | 2113 | set_intr_gate(vector, interrupt[irq]); |
@@ -2556,6 +2574,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2556 | unsigned int dest; | 2574 | unsigned int dest; |
2557 | cpumask_t tmp; | 2575 | cpumask_t tmp; |
2558 | int vector; | 2576 | int vector; |
2577 | struct irq_desc *desc; | ||
2559 | 2578 | ||
2560 | cpus_and(tmp, mask, cpu_online_map); | 2579 | cpus_and(tmp, mask, cpu_online_map); |
2561 | if (cpus_empty(tmp)) | 2580 | if (cpus_empty(tmp)) |
@@ -2575,7 +2594,8 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2575 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 2594 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
2576 | 2595 | ||
2577 | write_msi_msg(irq, &msg); | 2596 | write_msi_msg(irq, &msg); |
2578 | irq_desc[irq].affinity = mask; | 2597 | desc = irq_to_desc(irq); |
2598 | desc->affinity = mask; | ||
2579 | } | 2599 | } |
2580 | #endif /* CONFIG_SMP */ | 2600 | #endif /* CONFIG_SMP */ |
2581 | 2601 | ||
@@ -2649,6 +2669,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2649 | { | 2669 | { |
2650 | unsigned int dest; | 2670 | unsigned int dest; |
2651 | cpumask_t tmp; | 2671 | cpumask_t tmp; |
2672 | struct irq_desc *desc; | ||
2652 | 2673 | ||
2653 | cpus_and(tmp, mask, cpu_online_map); | 2674 | cpus_and(tmp, mask, cpu_online_map); |
2654 | if (cpus_empty(tmp)) | 2675 | if (cpus_empty(tmp)) |
@@ -2659,7 +2680,8 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2659 | dest = cpu_mask_to_apicid(mask); | 2680 | dest = cpu_mask_to_apicid(mask); |
2660 | 2681 | ||
2661 | target_ht_irq(irq, dest); | 2682 | target_ht_irq(irq, dest); |
2662 | irq_desc[irq].affinity = mask; | 2683 | desc = irq_to_desc(irq); |
2684 | desc->affinity = mask; | ||
2663 | } | 2685 | } |
2664 | #endif | 2686 | #endif |
2665 | 2687 | ||
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 93a3ffabfe6a..cab5a25d81b1 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -345,6 +345,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | |||
345 | unsigned long flags; | 345 | unsigned long flags; |
346 | unsigned int dest; | 346 | unsigned int dest; |
347 | cpumask_t tmp; | 347 | cpumask_t tmp; |
348 | struct irq_desc *desc; | ||
348 | 349 | ||
349 | cpus_and(tmp, mask, cpu_online_map); | 350 | cpus_and(tmp, mask, cpu_online_map); |
350 | if (cpus_empty(tmp)) | 351 | if (cpus_empty(tmp)) |
@@ -361,9 +362,10 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | |||
361 | */ | 362 | */ |
362 | dest = SET_APIC_LOGICAL_ID(dest); | 363 | dest = SET_APIC_LOGICAL_ID(dest); |
363 | 364 | ||
365 | desc = irq_to_desc(irq); | ||
364 | spin_lock_irqsave(&ioapic_lock, flags); | 366 | spin_lock_irqsave(&ioapic_lock, flags); |
365 | __target_IO_APIC_irq(irq, dest, cfg->vector); | 367 | __target_IO_APIC_irq(irq, dest, cfg->vector); |
366 | irq_desc[irq].affinity = mask; | 368 | desc->affinity = mask; |
367 | spin_unlock_irqrestore(&ioapic_lock, flags); | 369 | spin_unlock_irqrestore(&ioapic_lock, flags); |
368 | } | 370 | } |
369 | #endif | 371 | #endif |
@@ -933,14 +935,17 @@ static struct irq_chip ir_ioapic_chip; | |||
933 | 935 | ||
934 | static void ioapic_register_intr(int irq, unsigned long trigger) | 936 | static void ioapic_register_intr(int irq, unsigned long trigger) |
935 | { | 937 | { |
938 | struct irq_desc *desc; | ||
939 | |||
940 | desc = irq_to_desc(irq); | ||
936 | if (trigger) | 941 | if (trigger) |
937 | irq_desc[irq].status |= IRQ_LEVEL; | 942 | desc->status |= IRQ_LEVEL; |
938 | else | 943 | else |
939 | irq_desc[irq].status &= ~IRQ_LEVEL; | 944 | desc->status &= ~IRQ_LEVEL; |
940 | 945 | ||
941 | #ifdef CONFIG_INTR_REMAP | 946 | #ifdef CONFIG_INTR_REMAP |
942 | if (irq_remapped(irq)) { | 947 | if (irq_remapped(irq)) { |
943 | irq_desc[irq].status |= IRQ_MOVE_PCNTXT; | 948 | desc->status |= IRQ_MOVE_PCNTXT; |
944 | if (trigger) | 949 | if (trigger) |
945 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | 950 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, |
946 | handle_fasteoi_irq, | 951 | handle_fasteoi_irq, |
@@ -1596,10 +1601,10 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); | |||
1596 | static void migrate_ioapic_irq(int irq, cpumask_t mask) | 1601 | static void migrate_ioapic_irq(int irq, cpumask_t mask) |
1597 | { | 1602 | { |
1598 | struct irq_cfg *cfg = irq_cfg + irq; | 1603 | struct irq_cfg *cfg = irq_cfg + irq; |
1599 | struct irq_desc *desc = irq_desc + irq; | 1604 | struct irq_desc *desc; |
1600 | cpumask_t tmp, cleanup_mask; | 1605 | cpumask_t tmp, cleanup_mask; |
1601 | struct irte irte; | 1606 | struct irte irte; |
1602 | int modify_ioapic_rte = desc->status & IRQ_LEVEL; | 1607 | int modify_ioapic_rte; |
1603 | unsigned int dest; | 1608 | unsigned int dest; |
1604 | unsigned long flags; | 1609 | unsigned long flags; |
1605 | 1610 | ||
@@ -1616,6 +1621,8 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask) | |||
1616 | cpus_and(tmp, cfg->domain, mask); | 1621 | cpus_and(tmp, cfg->domain, mask); |
1617 | dest = cpu_mask_to_apicid(tmp); | 1622 | dest = cpu_mask_to_apicid(tmp); |
1618 | 1623 | ||
1624 | desc = irq_to_desc(irq); | ||
1625 | modify_ioapic_rte = desc->status & IRQ_LEVEL; | ||
1619 | if (modify_ioapic_rte) { | 1626 | if (modify_ioapic_rte) { |
1620 | spin_lock_irqsave(&ioapic_lock, flags); | 1627 | spin_lock_irqsave(&ioapic_lock, flags); |
1621 | __target_IO_APIC_irq(irq, dest, cfg->vector); | 1628 | __target_IO_APIC_irq(irq, dest, cfg->vector); |
@@ -1637,12 +1644,13 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask) | |||
1637 | cfg->move_in_progress = 0; | 1644 | cfg->move_in_progress = 0; |
1638 | } | 1645 | } |
1639 | 1646 | ||
1640 | irq_desc[irq].affinity = mask; | 1647 | desc->affinity = mask; |
1641 | } | 1648 | } |
1642 | 1649 | ||
1643 | static int migrate_irq_remapped_level(int irq) | 1650 | static int migrate_irq_remapped_level(int irq) |
1644 | { | 1651 | { |
1645 | int ret = -1; | 1652 | int ret = -1; |
1653 | struct irq_desc *desc = irq_to_desc(irq); | ||
1646 | 1654 | ||
1647 | mask_IO_APIC_irq(irq); | 1655 | mask_IO_APIC_irq(irq); |
1648 | 1656 | ||
@@ -1658,11 +1666,11 @@ static int migrate_irq_remapped_level(int irq) | |||
1658 | } | 1666 | } |
1659 | 1667 | ||
1660 | /* everthing is clear. we have right of way */ | 1668 | /* everthing is clear. we have right of way */ |
1661 | migrate_ioapic_irq(irq, irq_desc[irq].pending_mask); | 1669 | migrate_ioapic_irq(irq, desc->pending_mask); |
1662 | 1670 | ||
1663 | ret = 0; | 1671 | ret = 0; |
1664 | irq_desc[irq].status &= ~IRQ_MOVE_PENDING; | 1672 | desc->status &= ~IRQ_MOVE_PENDING; |
1665 | cpus_clear(irq_desc[irq].pending_mask); | 1673 | cpus_clear(desc->pending_mask); |
1666 | 1674 | ||
1667 | unmask: | 1675 | unmask: |
1668 | unmask_IO_APIC_irq(irq); | 1676 | unmask_IO_APIC_irq(irq); |
@@ -1674,7 +1682,7 @@ static void ir_irq_migration(struct work_struct *work) | |||
1674 | int irq; | 1682 | int irq; |
1675 | 1683 | ||
1676 | for (irq = 0; irq < nr_irqs; irq++) { | 1684 | for (irq = 0; irq < nr_irqs; irq++) { |
1677 | struct irq_desc *desc = irq_desc + irq; | 1685 | struct irq_desc *desc = irq_to_desc(irq); |
1678 | if (desc->status & IRQ_MOVE_PENDING) { | 1686 | if (desc->status & IRQ_MOVE_PENDING) { |
1679 | unsigned long flags; | 1687 | unsigned long flags; |
1680 | 1688 | ||
@@ -1686,8 +1694,7 @@ static void ir_irq_migration(struct work_struct *work) | |||
1686 | continue; | 1694 | continue; |
1687 | } | 1695 | } |
1688 | 1696 | ||
1689 | desc->chip->set_affinity(irq, | 1697 | desc->chip->set_affinity(irq, desc->pending_mask); |
1690 | irq_desc[irq].pending_mask); | ||
1691 | spin_unlock_irqrestore(&desc->lock, flags); | 1698 | spin_unlock_irqrestore(&desc->lock, flags); |
1692 | } | 1699 | } |
1693 | } | 1700 | } |
@@ -1698,9 +1705,11 @@ static void ir_irq_migration(struct work_struct *work) | |||
1698 | */ | 1705 | */ |
1699 | static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 1706 | static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) |
1700 | { | 1707 | { |
1701 | if (irq_desc[irq].status & IRQ_LEVEL) { | 1708 | struct irq_desc *desc = irq_to_desc(irq); |
1702 | irq_desc[irq].status |= IRQ_MOVE_PENDING; | 1709 | |
1703 | irq_desc[irq].pending_mask = mask; | 1710 | if (desc->status & IRQ_LEVEL) { |
1711 | desc->status |= IRQ_MOVE_PENDING; | ||
1712 | desc->pending_mask = mask; | ||
1704 | migrate_irq_remapped_level(irq); | 1713 | migrate_irq_remapped_level(irq); |
1705 | return; | 1714 | return; |
1706 | } | 1715 | } |
@@ -1725,7 +1734,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
1725 | if (irq >= nr_irqs) | 1734 | if (irq >= nr_irqs) |
1726 | continue; | 1735 | continue; |
1727 | 1736 | ||
1728 | desc = irq_desc + irq; | 1737 | desc = irq_to_desc(irq); |
1729 | cfg = irq_cfg + irq; | 1738 | cfg = irq_cfg + irq; |
1730 | spin_lock(&desc->lock); | 1739 | spin_lock(&desc->lock); |
1731 | if (!cfg->move_cleanup_count) | 1740 | if (!cfg->move_cleanup_count) |
@@ -1791,7 +1800,7 @@ static void ack_apic_level(unsigned int irq) | |||
1791 | irq_complete_move(irq); | 1800 | irq_complete_move(irq); |
1792 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 1801 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
1793 | /* If we are moving the irq we need to mask it */ | 1802 | /* If we are moving the irq we need to mask it */ |
1794 | if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { | 1803 | if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { |
1795 | do_unmask_irq = 1; | 1804 | do_unmask_irq = 1; |
1796 | mask_IO_APIC_irq(irq); | 1805 | mask_IO_APIC_irq(irq); |
1797 | } | 1806 | } |
@@ -1868,6 +1877,7 @@ static struct irq_chip ir_ioapic_chip __read_mostly = { | |||
1868 | static inline void init_IO_APIC_traps(void) | 1877 | static inline void init_IO_APIC_traps(void) |
1869 | { | 1878 | { |
1870 | int irq; | 1879 | int irq; |
1880 | struct irq_desc *desc; | ||
1871 | 1881 | ||
1872 | /* | 1882 | /* |
1873 | * NOTE! The local APIC isn't very good at handling | 1883 | * NOTE! The local APIC isn't very good at handling |
@@ -1889,9 +1899,11 @@ static inline void init_IO_APIC_traps(void) | |||
1889 | */ | 1899 | */ |
1890 | if (irq < 16) | 1900 | if (irq < 16) |
1891 | make_8259A_irq(irq); | 1901 | make_8259A_irq(irq); |
1892 | else | 1902 | else { |
1903 | desc = irq_to_desc(irq); | ||
1893 | /* Strange. Oh, well.. */ | 1904 | /* Strange. Oh, well.. */ |
1894 | irq_desc[irq].chip = &no_irq_chip; | 1905 | desc->chip = &no_irq_chip; |
1906 | } | ||
1895 | } | 1907 | } |
1896 | } | 1908 | } |
1897 | } | 1909 | } |
@@ -1926,7 +1938,10 @@ static struct irq_chip lapic_chip __read_mostly = { | |||
1926 | 1938 | ||
1927 | static void lapic_register_intr(int irq) | 1939 | static void lapic_register_intr(int irq) |
1928 | { | 1940 | { |
1929 | irq_desc[irq].status &= ~IRQ_LEVEL; | 1941 | struct irq_desc *desc; |
1942 | |||
1943 | desc = irq_to_desc(irq); | ||
1944 | desc->status &= ~IRQ_LEVEL; | ||
1930 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, | 1945 | set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, |
1931 | "edge"); | 1946 | "edge"); |
1932 | } | 1947 | } |
@@ -2402,6 +2417,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2402 | struct msi_msg msg; | 2417 | struct msi_msg msg; |
2403 | unsigned int dest; | 2418 | unsigned int dest; |
2404 | cpumask_t tmp; | 2419 | cpumask_t tmp; |
2420 | struct irq_desc *desc; | ||
2405 | 2421 | ||
2406 | cpus_and(tmp, mask, cpu_online_map); | 2422 | cpus_and(tmp, mask, cpu_online_map); |
2407 | if (cpus_empty(tmp)) | 2423 | if (cpus_empty(tmp)) |
@@ -2421,7 +2437,8 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2421 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 2437 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
2422 | 2438 | ||
2423 | write_msi_msg(irq, &msg); | 2439 | write_msi_msg(irq, &msg); |
2424 | irq_desc[irq].affinity = mask; | 2440 | desc = irq_to_desc(irq); |
2441 | desc->affinity = mask; | ||
2425 | } | 2442 | } |
2426 | 2443 | ||
2427 | #ifdef CONFIG_INTR_REMAP | 2444 | #ifdef CONFIG_INTR_REMAP |
@@ -2435,6 +2452,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2435 | unsigned int dest; | 2452 | unsigned int dest; |
2436 | cpumask_t tmp, cleanup_mask; | 2453 | cpumask_t tmp, cleanup_mask; |
2437 | struct irte irte; | 2454 | struct irte irte; |
2455 | struct irq_desc *desc; | ||
2438 | 2456 | ||
2439 | cpus_and(tmp, mask, cpu_online_map); | 2457 | cpus_and(tmp, mask, cpu_online_map); |
2440 | if (cpus_empty(tmp)) | 2458 | if (cpus_empty(tmp)) |
@@ -2469,7 +2487,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2469 | cfg->move_in_progress = 0; | 2487 | cfg->move_in_progress = 0; |
2470 | } | 2488 | } |
2471 | 2489 | ||
2472 | irq_desc[irq].affinity = mask; | 2490 | desc = irq_to_desc(irq); |
2491 | desc->affinity = mask; | ||
2473 | } | 2492 | } |
2474 | #endif | 2493 | #endif |
2475 | #endif /* CONFIG_SMP */ | 2494 | #endif /* CONFIG_SMP */ |
@@ -2543,7 +2562,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq) | |||
2543 | 2562 | ||
2544 | #ifdef CONFIG_INTR_REMAP | 2563 | #ifdef CONFIG_INTR_REMAP |
2545 | if (irq_remapped(irq)) { | 2564 | if (irq_remapped(irq)) { |
2546 | struct irq_desc *desc = irq_desc + irq; | 2565 | struct irq_desc *desc = irq_to_desc(irq); |
2547 | /* | 2566 | /* |
2548 | * irq migration in process context | 2567 | * irq migration in process context |
2549 | */ | 2568 | */ |
@@ -2655,6 +2674,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
2655 | struct msi_msg msg; | 2674 | struct msi_msg msg; |
2656 | unsigned int dest; | 2675 | unsigned int dest; |
2657 | cpumask_t tmp; | 2676 | cpumask_t tmp; |
2677 | struct irq_desc *desc; | ||
2658 | 2678 | ||
2659 | cpus_and(tmp, mask, cpu_online_map); | 2679 | cpus_and(tmp, mask, cpu_online_map); |
2660 | if (cpus_empty(tmp)) | 2680 | if (cpus_empty(tmp)) |
@@ -2674,7 +2694,8 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
2674 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 2694 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
2675 | 2695 | ||
2676 | dmar_msi_write(irq, &msg); | 2696 | dmar_msi_write(irq, &msg); |
2677 | irq_desc[irq].affinity = mask; | 2697 | desc = irq_to_desc(irq); |
2698 | desc->affinity = mask; | ||
2678 | } | 2699 | } |
2679 | #endif /* CONFIG_SMP */ | 2700 | #endif /* CONFIG_SMP */ |
2680 | 2701 | ||
@@ -2731,6 +2752,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2731 | struct irq_cfg *cfg = irq_cfg + irq; | 2752 | struct irq_cfg *cfg = irq_cfg + irq; |
2732 | unsigned int dest; | 2753 | unsigned int dest; |
2733 | cpumask_t tmp; | 2754 | cpumask_t tmp; |
2755 | struct irq_desc *desc; | ||
2734 | 2756 | ||
2735 | cpus_and(tmp, mask, cpu_online_map); | 2757 | cpus_and(tmp, mask, cpu_online_map); |
2736 | if (cpus_empty(tmp)) | 2758 | if (cpus_empty(tmp)) |
@@ -2743,7 +2765,8 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2743 | dest = cpu_mask_to_apicid(tmp); | 2765 | dest = cpu_mask_to_apicid(tmp); |
2744 | 2766 | ||
2745 | target_ht_irq(irq, dest, cfg->vector); | 2767 | target_ht_irq(irq, dest, cfg->vector); |
2746 | irq_desc[irq].affinity = mask; | 2768 | desc = irq_to_desc(irq); |
2769 | desc->affinity = mask; | ||
2747 | } | 2770 | } |
2748 | #endif | 2771 | #endif |
2749 | 2772 | ||
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 4c7ffb32854c..ede513be517d 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -224,7 +224,7 @@ unsigned int do_IRQ(struct pt_regs *regs) | |||
224 | struct pt_regs *old_regs; | 224 | struct pt_regs *old_regs; |
225 | /* high bit used in ret_from_ code */ | 225 | /* high bit used in ret_from_ code */ |
226 | int overflow, irq = ~regs->orig_ax; | 226 | int overflow, irq = ~regs->orig_ax; |
227 | struct irq_desc *desc = irq_desc + irq; | 227 | struct irq_desc *desc = irq_to_desc(irq); |
228 | 228 | ||
229 | if (unlikely((unsigned)irq >= nr_irqs)) { | 229 | if (unlikely((unsigned)irq >= nr_irqs)) { |
230 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | 230 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", |
@@ -273,15 +273,16 @@ int show_interrupts(struct seq_file *p, void *v) | |||
273 | 273 | ||
274 | if (i < nr_irqs) { | 274 | if (i < nr_irqs) { |
275 | unsigned any_count = 0; | 275 | unsigned any_count = 0; |
276 | struct irq_desc *desc = irq_to_desc(i); | ||
276 | 277 | ||
277 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 278 | spin_lock_irqsave(&desc->lock, flags); |
278 | #ifndef CONFIG_SMP | 279 | #ifndef CONFIG_SMP |
279 | any_count = kstat_irqs(i); | 280 | any_count = kstat_irqs(i); |
280 | #else | 281 | #else |
281 | for_each_online_cpu(j) | 282 | for_each_online_cpu(j) |
282 | any_count |= kstat_cpu(j).irqs[i]; | 283 | any_count |= kstat_cpu(j).irqs[i]; |
283 | #endif | 284 | #endif |
284 | action = irq_desc[i].action; | 285 | action = desc->action; |
285 | if (!action && !any_count) | 286 | if (!action && !any_count) |
286 | goto skip; | 287 | goto skip; |
287 | seq_printf(p, "%3d: ",i); | 288 | seq_printf(p, "%3d: ",i); |
@@ -291,8 +292,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
291 | for_each_online_cpu(j) | 292 | for_each_online_cpu(j) |
292 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 293 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
293 | #endif | 294 | #endif |
294 | seq_printf(p, " %8s", irq_desc[i].chip->name); | 295 | seq_printf(p, " %8s", desc->chip->name); |
295 | seq_printf(p, "-%-8s", irq_desc[i].name); | 296 | seq_printf(p, "-%-8s", desc->name); |
296 | 297 | ||
297 | if (action) { | 298 | if (action) { |
298 | seq_printf(p, " %s", action->name); | 299 | seq_printf(p, " %s", action->name); |
@@ -302,7 +303,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
302 | 303 | ||
303 | seq_putc(p, '\n'); | 304 | seq_putc(p, '\n'); |
304 | skip: | 305 | skip: |
305 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 306 | spin_unlock_irqrestore(&desc->lock, flags); |
306 | } else if (i == nr_irqs) { | 307 | } else if (i == nr_irqs) { |
307 | seq_printf(p, "NMI: "); | 308 | seq_printf(p, "NMI: "); |
308 | for_each_online_cpu(j) | 309 | for_each_online_cpu(j) |
@@ -398,17 +399,20 @@ void fixup_irqs(cpumask_t map) | |||
398 | 399 | ||
399 | for (irq = 0; irq < nr_irqs; irq++) { | 400 | for (irq = 0; irq < nr_irqs; irq++) { |
400 | cpumask_t mask; | 401 | cpumask_t mask; |
402 | struct irq_desc *desc; | ||
403 | |||
401 | if (irq == 2) | 404 | if (irq == 2) |
402 | continue; | 405 | continue; |
403 | 406 | ||
404 | cpus_and(mask, irq_desc[irq].affinity, map); | 407 | desc = irq_to_desc(irq); |
408 | cpus_and(mask, desc->affinity, map); | ||
405 | if (any_online_cpu(mask) == NR_CPUS) { | 409 | if (any_online_cpu(mask) == NR_CPUS) { |
406 | printk("Breaking affinity for irq %i\n", irq); | 410 | printk("Breaking affinity for irq %i\n", irq); |
407 | mask = map; | 411 | mask = map; |
408 | } | 412 | } |
409 | if (irq_desc[irq].chip->set_affinity) | 413 | if (desc->chip->set_affinity) |
410 | irq_desc[irq].chip->set_affinity(irq, mask); | 414 | desc->chip->set_affinity(irq, mask); |
411 | else if (irq_desc[irq].action && !(warned++)) | 415 | else if (desc->action && !(warned++)) |
412 | printk("Cannot set affinity for irq %i\n", irq); | 416 | printk("Cannot set affinity for irq %i\n", irq); |
413 | } | 417 | } |
414 | 418 | ||
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index e1f0839430d2..738eb65a924e 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -83,15 +83,16 @@ int show_interrupts(struct seq_file *p, void *v) | |||
83 | 83 | ||
84 | if (i < nr_irqs) { | 84 | if (i < nr_irqs) { |
85 | unsigned any_count = 0; | 85 | unsigned any_count = 0; |
86 | struct irq_desc *desc = irq_to_desc(i); | ||
86 | 87 | ||
87 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 88 | spin_lock_irqsave(&desc->lock, flags); |
88 | #ifndef CONFIG_SMP | 89 | #ifndef CONFIG_SMP |
89 | any_count = kstat_irqs(i); | 90 | any_count = kstat_irqs(i); |
90 | #else | 91 | #else |
91 | for_each_online_cpu(j) | 92 | for_each_online_cpu(j) |
92 | any_count |= kstat_cpu(j).irqs[i]; | 93 | any_count |= kstat_cpu(j).irqs[i]; |
93 | #endif | 94 | #endif |
94 | action = irq_desc[i].action; | 95 | action = desc->action; |
95 | if (!action && !any_count) | 96 | if (!action && !any_count) |
96 | goto skip; | 97 | goto skip; |
97 | seq_printf(p, "%3d: ",i); | 98 | seq_printf(p, "%3d: ",i); |
@@ -101,8 +102,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
101 | for_each_online_cpu(j) | 102 | for_each_online_cpu(j) |
102 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 103 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
103 | #endif | 104 | #endif |
104 | seq_printf(p, " %8s", irq_desc[i].chip->name); | 105 | seq_printf(p, " %8s", desc->chip->name); |
105 | seq_printf(p, "-%-8s", irq_desc[i].name); | 106 | seq_printf(p, "-%-8s", desc->name); |
106 | 107 | ||
107 | if (action) { | 108 | if (action) { |
108 | seq_printf(p, " %s", action->name); | 109 | seq_printf(p, " %s", action->name); |
@@ -111,7 +112,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
111 | } | 112 | } |
112 | seq_putc(p, '\n'); | 113 | seq_putc(p, '\n'); |
113 | skip: | 114 | skip: |
114 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 115 | spin_unlock_irqrestore(&desc->lock, flags); |
115 | } else if (i == nr_irqs) { | 116 | } else if (i == nr_irqs) { |
116 | seq_printf(p, "NMI: "); | 117 | seq_printf(p, "NMI: "); |
117 | for_each_online_cpu(j) | 118 | for_each_online_cpu(j) |
@@ -228,37 +229,39 @@ void fixup_irqs(cpumask_t map) | |||
228 | cpumask_t mask; | 229 | cpumask_t mask; |
229 | int break_affinity = 0; | 230 | int break_affinity = 0; |
230 | int set_affinity = 1; | 231 | int set_affinity = 1; |
232 | struct irq_desc *desc; | ||
231 | 233 | ||
232 | if (irq == 2) | 234 | if (irq == 2) |
233 | continue; | 235 | continue; |
234 | 236 | ||
237 | desc = irq_to_desc(irq); | ||
235 | /* interrupt's are disabled at this point */ | 238 | /* interrupt's are disabled at this point */ |
236 | spin_lock(&irq_desc[irq].lock); | 239 | spin_lock(&desc->lock); |
237 | 240 | ||
238 | if (!irq_has_action(irq) || | 241 | if (!irq_has_action(irq) || |
239 | cpus_equal(irq_desc[irq].affinity, map)) { | 242 | cpus_equal(desc->affinity, map)) { |
240 | spin_unlock(&irq_desc[irq].lock); | 243 | spin_unlock(&desc->lock); |
241 | continue; | 244 | continue; |
242 | } | 245 | } |
243 | 246 | ||
244 | cpus_and(mask, irq_desc[irq].affinity, map); | 247 | cpus_and(mask, desc->affinity, map); |
245 | if (cpus_empty(mask)) { | 248 | if (cpus_empty(mask)) { |
246 | break_affinity = 1; | 249 | break_affinity = 1; |
247 | mask = map; | 250 | mask = map; |
248 | } | 251 | } |
249 | 252 | ||
250 | if (irq_desc[irq].chip->mask) | 253 | if (desc->chip->mask) |
251 | irq_desc[irq].chip->mask(irq); | 254 | desc->chip->mask(irq); |
252 | 255 | ||
253 | if (irq_desc[irq].chip->set_affinity) | 256 | if (desc->chip->set_affinity) |
254 | irq_desc[irq].chip->set_affinity(irq, mask); | 257 | desc->chip->set_affinity(irq, mask); |
255 | else if (!(warned++)) | 258 | else if (!(warned++)) |
256 | set_affinity = 0; | 259 | set_affinity = 0; |
257 | 260 | ||
258 | if (irq_desc[irq].chip->unmask) | 261 | if (desc->chip->unmask) |
259 | irq_desc[irq].chip->unmask(irq); | 262 | desc->chip->unmask(irq); |
260 | 263 | ||
261 | spin_unlock(&irq_desc[irq].lock); | 264 | spin_unlock(&desc->lock); |
262 | 265 | ||
263 | if (break_affinity && set_affinity) | 266 | if (break_affinity && set_affinity) |
264 | printk("Broke affinity for irq %i\n", irq); | 267 | printk("Broke affinity for irq %i\n", irq); |
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 165c5d9b0d1a..0744b49b4d12 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c | |||
@@ -143,9 +143,11 @@ void __init init_ISA_irqs(void) | |||
143 | init_8259A(0); | 143 | init_8259A(0); |
144 | 144 | ||
145 | for (i = 0; i < nr_irqs; i++) { | 145 | for (i = 0; i < nr_irqs; i++) { |
146 | irq_desc[i].status = IRQ_DISABLED; | 146 | struct irq_desc *desc = irq_to_desc(i); |
147 | irq_desc[i].action = NULL; | 147 | |
148 | irq_desc[i].depth = 1; | 148 | desc->status = IRQ_DISABLED; |
149 | desc->action = NULL; | ||
150 | desc->depth = 1; | ||
149 | 151 | ||
150 | if (i < 16) { | 152 | if (i < 16) { |
151 | /* | 153 | /* |
@@ -157,7 +159,7 @@ void __init init_ISA_irqs(void) | |||
157 | /* | 159 | /* |
158 | * 'high' PCI IRQs filled in on demand | 160 | * 'high' PCI IRQs filled in on demand |
159 | */ | 161 | */ |
160 | irq_desc[i].chip = &no_irq_chip; | 162 | desc->chip = &no_irq_chip; |
161 | } | 163 | } |
162 | } | 164 | } |
163 | } | 165 | } |
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 61a97e616f70..9d85ab384435 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
@@ -484,10 +484,11 @@ static void disable_cobalt_irq(unsigned int irq) | |||
484 | static unsigned int startup_cobalt_irq(unsigned int irq) | 484 | static unsigned int startup_cobalt_irq(unsigned int irq) |
485 | { | 485 | { |
486 | unsigned long flags; | 486 | unsigned long flags; |
487 | struct irq_desc *desc = irq_to_desc(irq); | ||
487 | 488 | ||
488 | spin_lock_irqsave(&cobalt_lock, flags); | 489 | spin_lock_irqsave(&cobalt_lock, flags); |
489 | if ((irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) | 490 | if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) |
490 | irq_desc[irq].status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING); | 491 | desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING); |
491 | enable_cobalt_irq(irq); | 492 | enable_cobalt_irq(irq); |
492 | spin_unlock_irqrestore(&cobalt_lock, flags); | 493 | spin_unlock_irqrestore(&cobalt_lock, flags); |
493 | return 0; | 494 | return 0; |
@@ -506,9 +507,10 @@ static void ack_cobalt_irq(unsigned int irq) | |||
506 | static void end_cobalt_irq(unsigned int irq) | 507 | static void end_cobalt_irq(unsigned int irq) |
507 | { | 508 | { |
508 | unsigned long flags; | 509 | unsigned long flags; |
510 | struct irq_desc *desc = irq_to_desc(irq); | ||
509 | 511 | ||
510 | spin_lock_irqsave(&cobalt_lock, flags); | 512 | spin_lock_irqsave(&cobalt_lock, flags); |
511 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) | 513 | if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) |
512 | enable_cobalt_irq(irq); | 514 | enable_cobalt_irq(irq); |
513 | spin_unlock_irqrestore(&cobalt_lock, flags); | 515 | spin_unlock_irqrestore(&cobalt_lock, flags); |
514 | } | 516 | } |
@@ -626,7 +628,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) | |||
626 | 628 | ||
627 | spin_unlock_irqrestore(&i8259A_lock, flags); | 629 | spin_unlock_irqrestore(&i8259A_lock, flags); |
628 | 630 | ||
629 | desc = irq_desc + realirq; | 631 | desc = irq_to_desc(realirq); |
630 | 632 | ||
631 | /* | 633 | /* |
632 | * handle this 'virtual interrupt' as a Cobalt one now. | 634 | * handle this 'virtual interrupt' as a Cobalt one now. |
@@ -662,27 +664,29 @@ void init_VISWS_APIC_irqs(void) | |||
662 | int i; | 664 | int i; |
663 | 665 | ||
664 | for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { | 666 | for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { |
665 | irq_desc[i].status = IRQ_DISABLED; | 667 | struct irq_desc *desc = irq_to_desc(i); |
666 | irq_desc[i].action = 0; | 668 | |
667 | irq_desc[i].depth = 1; | 669 | desc->status = IRQ_DISABLED; |
670 | desc->action = 0; | ||
671 | desc->depth = 1; | ||
668 | 672 | ||
669 | if (i == 0) { | 673 | if (i == 0) { |
670 | irq_desc[i].chip = &cobalt_irq_type; | 674 | desc->chip = &cobalt_irq_type; |
671 | } | 675 | } |
672 | else if (i == CO_IRQ_IDE0) { | 676 | else if (i == CO_IRQ_IDE0) { |
673 | irq_desc[i].chip = &cobalt_irq_type; | 677 | desc->chip = &cobalt_irq_type; |
674 | } | 678 | } |
675 | else if (i == CO_IRQ_IDE1) { | 679 | else if (i == CO_IRQ_IDE1) { |
676 | irq_desc[i].chip = &cobalt_irq_type; | 680 | desc->chip = &cobalt_irq_type; |
677 | } | 681 | } |
678 | else if (i == CO_IRQ_8259) { | 682 | else if (i == CO_IRQ_8259) { |
679 | irq_desc[i].chip = &piix4_master_irq_type; | 683 | desc->chip = &piix4_master_irq_type; |
680 | } | 684 | } |
681 | else if (i < CO_IRQ_APIC0) { | 685 | else if (i < CO_IRQ_APIC0) { |
682 | irq_desc[i].chip = &piix4_virtual_irq_type; | 686 | desc->chip = &piix4_virtual_irq_type; |
683 | } | 687 | } |
684 | else if (IS_CO_APIC(i)) { | 688 | else if (IS_CO_APIC(i)) { |
685 | irq_desc[i].chip = &cobalt_irq_type; | 689 | desc->chip = &cobalt_irq_type; |
686 | } | 690 | } |
687 | } | 691 | } |
688 | 692 | ||
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 199a5f4a873c..0f6e8a6523ae 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -1483,7 +1483,7 @@ static void disable_local_vic_irq(unsigned int irq) | |||
1483 | * the interrupt off to another CPU */ | 1483 | * the interrupt off to another CPU */ |
1484 | static void before_handle_vic_irq(unsigned int irq) | 1484 | static void before_handle_vic_irq(unsigned int irq) |
1485 | { | 1485 | { |
1486 | irq_desc_t *desc = irq_desc + irq; | 1486 | irq_desc_t *desc = irq_to_desc(irq); |
1487 | __u8 cpu = smp_processor_id(); | 1487 | __u8 cpu = smp_processor_id(); |
1488 | 1488 | ||
1489 | _raw_spin_lock(&vic_irq_lock); | 1489 | _raw_spin_lock(&vic_irq_lock); |
@@ -1518,7 +1518,7 @@ static void before_handle_vic_irq(unsigned int irq) | |||
1518 | /* Finish the VIC interrupt: basically mask */ | 1518 | /* Finish the VIC interrupt: basically mask */ |
1519 | static void after_handle_vic_irq(unsigned int irq) | 1519 | static void after_handle_vic_irq(unsigned int irq) |
1520 | { | 1520 | { |
1521 | irq_desc_t *desc = irq_desc + irq; | 1521 | irq_desc_t *desc = irq_to_desc(irq); |
1522 | 1522 | ||
1523 | _raw_spin_lock(&vic_irq_lock); | 1523 | _raw_spin_lock(&vic_irq_lock); |
1524 | { | 1524 | { |