aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/io_apic.c111
1 files changed, 50 insertions, 61 deletions
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 65d72189eb6a..dd6580c66f61 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -47,6 +47,31 @@
47#include <asm/msidef.h> 47#include <asm/msidef.h>
48#include <asm/hypertransport.h> 48#include <asm/hypertransport.h>
49 49
50struct irq_cfg {
51 cpumask_t domain;
52 u8 vector;
53};
54
55/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
56struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
57 [0] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 0 },
58 [1] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 1 },
59 [2] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 2 },
60 [3] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 3 },
61 [4] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 4 },
62 [5] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 5 },
63 [6] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 6 },
64 [7] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 7 },
65 [8] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 8 },
66 [9] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 9 },
67 [10] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 10 },
68 [11] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 11 },
69 [12] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 12 },
70 [13] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 13 },
71 [14] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 14 },
72 [15] = { .domain = CPU_MASK_ALL, .vector = FIRST_EXTERNAL_VECTOR + 15 },
73};
74
50static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result); 75static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result);
51 76
52#define __apicdebuginit __init 77#define __apicdebuginit __init
@@ -613,46 +638,6 @@ static int pin_2_irq(int idx, int apic, int pin)
613 return irq; 638 return irq;
614} 639}
615 640
616
617/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
618static u8 irq_vector[NR_IRQS] __read_mostly = {
619 [0] = FIRST_EXTERNAL_VECTOR + 0,
620 [1] = FIRST_EXTERNAL_VECTOR + 1,
621 [2] = FIRST_EXTERNAL_VECTOR + 2,
622 [3] = FIRST_EXTERNAL_VECTOR + 3,
623 [4] = FIRST_EXTERNAL_VECTOR + 4,
624 [5] = FIRST_EXTERNAL_VECTOR + 5,
625 [6] = FIRST_EXTERNAL_VECTOR + 6,
626 [7] = FIRST_EXTERNAL_VECTOR + 7,
627 [8] = FIRST_EXTERNAL_VECTOR + 8,
628 [9] = FIRST_EXTERNAL_VECTOR + 9,
629 [10] = FIRST_EXTERNAL_VECTOR + 10,
630 [11] = FIRST_EXTERNAL_VECTOR + 11,
631 [12] = FIRST_EXTERNAL_VECTOR + 12,
632 [13] = FIRST_EXTERNAL_VECTOR + 13,
633 [14] = FIRST_EXTERNAL_VECTOR + 14,
634 [15] = FIRST_EXTERNAL_VECTOR + 15,
635};
636
637static cpumask_t irq_domain[NR_IRQS] __read_mostly = {
638 [0] = CPU_MASK_ALL,
639 [1] = CPU_MASK_ALL,
640 [2] = CPU_MASK_ALL,
641 [3] = CPU_MASK_ALL,
642 [4] = CPU_MASK_ALL,
643 [5] = CPU_MASK_ALL,
644 [6] = CPU_MASK_ALL,
645 [7] = CPU_MASK_ALL,
646 [8] = CPU_MASK_ALL,
647 [9] = CPU_MASK_ALL,
648 [10] = CPU_MASK_ALL,
649 [11] = CPU_MASK_ALL,
650 [12] = CPU_MASK_ALL,
651 [13] = CPU_MASK_ALL,
652 [14] = CPU_MASK_ALL,
653 [15] = CPU_MASK_ALL,
654};
655
656static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) 641static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
657{ 642{
658 /* 643 /*
@@ -670,19 +655,21 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
670 cpumask_t old_mask = CPU_MASK_NONE; 655 cpumask_t old_mask = CPU_MASK_NONE;
671 int old_vector = -1; 656 int old_vector = -1;
672 int cpu; 657 int cpu;
658 struct irq_cfg *cfg;
673 659
674 BUG_ON((unsigned)irq >= NR_IRQS); 660 BUG_ON((unsigned)irq >= NR_IRQS);
661 cfg = &irq_cfg[irq];
675 662
676 /* Only try and allocate irqs on cpus that are present */ 663 /* Only try and allocate irqs on cpus that are present */
677 cpus_and(mask, mask, cpu_online_map); 664 cpus_and(mask, mask, cpu_online_map);
678 665
679 if (irq_vector[irq] > 0) 666 if (cfg->vector > 0)
680 old_vector = irq_vector[irq]; 667 old_vector = cfg->vector;
681 if (old_vector > 0) { 668 if (old_vector > 0) {
682 cpus_and(*result, irq_domain[irq], mask); 669 cpus_and(*result, cfg->domain, mask);
683 if (!cpus_empty(*result)) 670 if (!cpus_empty(*result))
684 return old_vector; 671 return old_vector;
685 cpus_and(old_mask, irq_domain[irq], cpu_online_map); 672 cpus_and(old_mask, cfg->domain, cpu_online_map);
686 } 673 }
687 674
688 for_each_cpu_mask(cpu, mask) { 675 for_each_cpu_mask(cpu, mask) {
@@ -716,8 +703,8 @@ next:
716 per_cpu(vector_irq, old_cpu)[old_vector] = -1; 703 per_cpu(vector_irq, old_cpu)[old_vector] = -1;
717 for_each_cpu_mask(new_cpu, new_mask) 704 for_each_cpu_mask(new_cpu, new_mask)
718 per_cpu(vector_irq, new_cpu)[vector] = irq; 705 per_cpu(vector_irq, new_cpu)[vector] = irq;
719 irq_vector[irq] = vector; 706 cfg->vector = vector;
720 irq_domain[irq] = domain; 707 cfg->domain = domain;
721 cpus_and(*result, domain, mask); 708 cpus_and(*result, domain, mask);
722 return vector; 709 return vector;
723 } 710 }
@@ -737,18 +724,21 @@ static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
737 724
738static void __clear_irq_vector(int irq) 725static void __clear_irq_vector(int irq)
739{ 726{
727 struct irq_cfg *cfg;
740 cpumask_t mask; 728 cpumask_t mask;
741 int cpu, vector; 729 int cpu, vector;
742 730
743 BUG_ON(!irq_vector[irq]); 731 BUG_ON((unsigned)irq >= NR_IRQS);
732 cfg = &irq_cfg[irq];
733 BUG_ON(!cfg->vector);
744 734
745 vector = irq_vector[irq]; 735 vector = cfg->vector;
746 cpus_and(mask, irq_domain[irq], cpu_online_map); 736 cpus_and(mask, cfg->domain, cpu_online_map);
747 for_each_cpu_mask(cpu, mask) 737 for_each_cpu_mask(cpu, mask)
748 per_cpu(vector_irq, cpu)[vector] = -1; 738 per_cpu(vector_irq, cpu)[vector] = -1;
749 739
750 irq_vector[irq] = 0; 740 cfg->vector = 0;
751 irq_domain[irq] = CPU_MASK_NONE; 741 cfg->domain = CPU_MASK_NONE;
752} 742}
753 743
754void __setup_vector_irq(int cpu) 744void __setup_vector_irq(int cpu)
@@ -759,9 +749,9 @@ void __setup_vector_irq(int cpu)
759 749
760 /* Mark the inuse vectors */ 750 /* Mark the inuse vectors */
761 for (irq = 0; irq < NR_IRQS; ++irq) { 751 for (irq = 0; irq < NR_IRQS; ++irq) {
762 if (!cpu_isset(cpu, irq_domain[irq])) 752 if (!cpu_isset(cpu, irq_cfg[irq].domain))
763 continue; 753 continue;
764 vector = irq_vector[irq]; 754 vector = irq_cfg[irq].vector;
765 per_cpu(vector_irq, cpu)[vector] = irq; 755 per_cpu(vector_irq, cpu)[vector] = irq;
766 } 756 }
767 /* Mark the free vectors */ 757 /* Mark the free vectors */
@@ -769,7 +759,7 @@ void __setup_vector_irq(int cpu)
769 irq = per_cpu(vector_irq, cpu)[vector]; 759 irq = per_cpu(vector_irq, cpu)[vector];
770 if (irq < 0) 760 if (irq < 0)
771 continue; 761 continue;
772 if (!cpu_isset(cpu, irq_domain[irq])) 762 if (!cpu_isset(cpu, irq_cfg[irq].domain))
773 per_cpu(vector_irq, cpu)[vector] = -1; 763 per_cpu(vector_irq, cpu)[vector] = -1;
774 } 764 }
775} 765}
@@ -1346,16 +1336,15 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
1346 1336
1347static int ioapic_retrigger_irq(unsigned int irq) 1337static int ioapic_retrigger_irq(unsigned int irq)
1348{ 1338{
1339 struct irq_cfg *cfg = &irq_cfg[irq];
1349 cpumask_t mask; 1340 cpumask_t mask;
1350 unsigned vector;
1351 unsigned long flags; 1341 unsigned long flags;
1352 1342
1353 spin_lock_irqsave(&vector_lock, flags); 1343 spin_lock_irqsave(&vector_lock, flags);
1354 vector = irq_vector[irq];
1355 cpus_clear(mask); 1344 cpus_clear(mask);
1356 cpu_set(first_cpu(irq_domain[irq]), mask); 1345 cpu_set(first_cpu(cfg->domain), mask);
1357 1346
1358 send_IPI_mask(mask, vector); 1347 send_IPI_mask(mask, cfg->vector);
1359 spin_unlock_irqrestore(&vector_lock, flags); 1348 spin_unlock_irqrestore(&vector_lock, flags);
1360 1349
1361 return 1; 1350 return 1;
@@ -1430,7 +1419,7 @@ static inline void init_IO_APIC_traps(void)
1430 */ 1419 */
1431 for (irq = 0; irq < NR_IRQS ; irq++) { 1420 for (irq = 0; irq < NR_IRQS ; irq++) {
1432 int tmp = irq; 1421 int tmp = irq;
1433 if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) { 1422 if (IO_APIC_IRQ(tmp) && !irq_cfg[tmp].vector) {
1434 /* 1423 /*
1435 * Hmm.. We don't have an entry for this, 1424 * Hmm.. We don't have an entry for this,
1436 * so default to an old-fashioned 8259 1425 * so default to an old-fashioned 8259
@@ -1816,7 +1805,7 @@ int create_irq(void)
1816 for (new = (NR_IRQS - 1); new >= 0; new--) { 1805 for (new = (NR_IRQS - 1); new >= 0; new--) {
1817 if (platform_legacy_irq(new)) 1806 if (platform_legacy_irq(new))
1818 continue; 1807 continue;
1819 if (irq_vector[new] != 0) 1808 if (irq_cfg[new].vector != 0)
1820 continue; 1809 continue;
1821 vector = __assign_irq_vector(new, TARGET_CPUS, &mask); 1810 vector = __assign_irq_vector(new, TARGET_CPUS, &mask);
1822 if (likely(vector > 0)) 1811 if (likely(vector > 0))
@@ -2108,7 +2097,7 @@ void __init setup_ioapic_dest(void)
2108 * when you have too many devices, because at that time only boot 2097 * when you have too many devices, because at that time only boot
2109 * cpu is online. 2098 * cpu is online.
2110 */ 2099 */
2111 if(!irq_vector[irq]) 2100 if (!irq_cfg[irq].vector)
2112 setup_IO_APIC_irq(ioapic, pin, irq, 2101 setup_IO_APIC_irq(ioapic, pin, irq,
2113 irq_trigger(irq_entry), 2102 irq_trigger(irq_entry),
2114 irq_polarity(irq_entry)); 2103 irq_polarity(irq_entry));