diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-08-19 23:50:06 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-16 10:52:30 -0400 |
commit | 3ac2de48ed3c998df7f366e039c97eedb27e7c3d (patch) | |
tree | d9dfda60e670fc5c67cf4cabef8325602d568902 | |
parent | 3bf52a4df3ccd25d4154797977c556a2a8b3bc1e (diff) |
x86: add irq_cfg in io_apic_64.c
preallocate size is 32, and if it is not enough, irq_cfg will more
via alloc_bootmem() or kzalloc(). (depending on how early we are in
system setup)
v2: fix typo about size of init_one_irq_cfg ... should use sizeof(struct irq_cfg)
v3: according to Eric, change get_irq_cfg() to irq_cfg()
v4: squash add irq_cfg_alloc in
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/io_apic_64.c | 209 |
1 files changed, 169 insertions, 40 deletions
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index cab5a25d81b1..858c37a31a2f 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -57,7 +57,11 @@ | |||
57 | 57 | ||
58 | #define __apicdebuginit(type) static type __init | 58 | #define __apicdebuginit(type) static type __init |
59 | 59 | ||
60 | struct irq_cfg; | ||
61 | |||
60 | struct irq_cfg { | 62 | struct irq_cfg { |
63 | unsigned int irq; | ||
64 | struct irq_cfg *next; | ||
61 | cpumask_t domain; | 65 | cpumask_t domain; |
62 | cpumask_t old_domain; | 66 | cpumask_t old_domain; |
63 | unsigned move_cleanup_count; | 67 | unsigned move_cleanup_count; |
@@ -67,34 +71,132 @@ struct irq_cfg { | |||
67 | 71 | ||
68 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ | 72 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ |
69 | static struct irq_cfg irq_cfg_legacy[] __initdata = { | 73 | static struct irq_cfg irq_cfg_legacy[] __initdata = { |
70 | [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, | 74 | [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, |
71 | [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, | 75 | [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, |
72 | [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, | 76 | [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, |
73 | [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, | 77 | [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, |
74 | [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, | 78 | [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, |
75 | [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, | 79 | [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, |
76 | [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, | 80 | [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, |
77 | [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, | 81 | [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, |
78 | [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, | 82 | [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, |
79 | [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, | 83 | [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, |
80 | [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, | 84 | [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, |
81 | [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, | 85 | [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, |
82 | [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, | 86 | [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, |
83 | [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, | 87 | [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, |
84 | [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, | 88 | [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, |
85 | [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, | 89 | [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, |
86 | }; | 90 | }; |
87 | 91 | ||
88 | static struct irq_cfg *irq_cfg; | 92 | static struct irq_cfg irq_cfg_init = { .irq = -1U, }; |
93 | /* need to be biger than size of irq_cfg_legacy */ | ||
94 | static int nr_irq_cfg = 32; | ||
95 | |||
96 | static int __init parse_nr_irq_cfg(char *arg) | ||
97 | { | ||
98 | if (arg) { | ||
99 | nr_irq_cfg = simple_strtoul(arg, NULL, 0); | ||
100 | if (nr_irq_cfg < 32) | ||
101 | nr_irq_cfg = 32; | ||
102 | } | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | early_param("nr_irq_cfg", parse_nr_irq_cfg); | ||
107 | |||
108 | static void init_one_irq_cfg(struct irq_cfg *cfg) | ||
109 | { | ||
110 | memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg)); | ||
111 | } | ||
89 | 112 | ||
90 | static void __init init_work(void *data) | 113 | static void __init init_work(void *data) |
91 | { | 114 | { |
92 | struct dyn_array *da = data; | 115 | struct dyn_array *da = data; |
116 | struct irq_cfg *cfg; | ||
117 | int i; | ||
93 | 118 | ||
94 | memcpy(*da->name, irq_cfg_legacy, sizeof(irq_cfg_legacy)); | 119 | cfg = *da->name; |
120 | |||
121 | memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy)); | ||
122 | |||
123 | i = sizeof(irq_cfg_legacy)/sizeof(irq_cfg_legacy[0]); | ||
124 | for (; i < *da->nr; i++) | ||
125 | init_one_irq_cfg(&cfg[i]); | ||
126 | |||
127 | for (i = 1; i < *da->nr; i++) | ||
128 | cfg[i-1].next = &cfg[i]; | ||
95 | } | 129 | } |
96 | 130 | ||
97 | DEFINE_DYN_ARRAY(irq_cfg, sizeof(struct irq_cfg), nr_irqs, PAGE_SIZE, init_work); | 131 | static struct irq_cfg *irq_cfgx; |
132 | DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work); | ||
133 | |||
134 | static struct irq_cfg *irq_cfg(unsigned int irq) | ||
135 | { | ||
136 | struct irq_cfg *cfg; | ||
137 | |||
138 | BUG_ON(irq == -1U); | ||
139 | |||
140 | cfg = &irq_cfgx[0]; | ||
141 | while (cfg) { | ||
142 | if (cfg->irq == irq) | ||
143 | return cfg; | ||
144 | |||
145 | if (cfg->irq == -1U) | ||
146 | return NULL; | ||
147 | |||
148 | cfg = cfg->next; | ||
149 | } | ||
150 | |||
151 | return NULL; | ||
152 | } | ||
153 | |||
154 | static struct irq_cfg *irq_cfg_alloc(unsigned int irq) | ||
155 | { | ||
156 | struct irq_cfg *cfg, *cfg_pri; | ||
157 | int i; | ||
158 | int count = 0; | ||
159 | |||
160 | BUG_ON(irq == -1U); | ||
161 | |||
162 | cfg_pri = cfg = &irq_cfgx[0]; | ||
163 | while (cfg) { | ||
164 | if (cfg->irq == irq) | ||
165 | return cfg; | ||
166 | |||
167 | if (cfg->irq == -1U) { | ||
168 | cfg->irq = irq; | ||
169 | return cfg; | ||
170 | } | ||
171 | cfg_pri = cfg; | ||
172 | cfg = cfg->next; | ||
173 | count++; | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * we run out of pre-allocate ones, allocate more | ||
178 | */ | ||
179 | printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg); | ||
180 | |||
181 | if (after_bootmem) | ||
182 | cfg = kzalloc(sizeof(struct irq_cfg)*nr_irq_cfg, GFP_ATOMIC); | ||
183 | else | ||
184 | cfg = __alloc_bootmem_nopanic(sizeof(struct irq_cfg)*nr_irq_cfg, PAGE_SIZE, 0); | ||
185 | |||
186 | if (!cfg) | ||
187 | panic("please boot with nr_irq_cfg= %d\n", count * 2); | ||
188 | |||
189 | for (i = 0; i < nr_irq_cfg; i++) | ||
190 | init_one_irq_cfg(&cfg[i]); | ||
191 | |||
192 | for (i = 1; i < nr_irq_cfg; i++) | ||
193 | cfg[i-1].next = &cfg[i]; | ||
194 | |||
195 | cfg->irq = irq; | ||
196 | cfg_pri->next = cfg; | ||
197 | |||
198 | return cfg; | ||
199 | } | ||
98 | 200 | ||
99 | static int assign_irq_vector(int irq, cpumask_t mask); | 201 | static int assign_irq_vector(int irq, cpumask_t mask); |
100 | 202 | ||
@@ -341,7 +443,7 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
341 | 443 | ||
342 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 444 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) |
343 | { | 445 | { |
344 | struct irq_cfg *cfg = irq_cfg + irq; | 446 | struct irq_cfg *cfg = irq_cfg(irq); |
345 | unsigned long flags; | 447 | unsigned long flags; |
346 | unsigned int dest; | 448 | unsigned int dest; |
347 | cpumask_t tmp; | 449 | cpumask_t tmp; |
@@ -381,6 +483,8 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin) | |||
381 | struct irq_pin_list *entry = irq_2_pin + irq; | 483 | struct irq_pin_list *entry = irq_2_pin + irq; |
382 | 484 | ||
383 | BUG_ON(irq >= nr_irqs); | 485 | BUG_ON(irq >= nr_irqs); |
486 | irq_cfg_alloc(irq); | ||
487 | |||
384 | while (entry->next) | 488 | while (entry->next) |
385 | entry = irq_2_pin + entry->next; | 489 | entry = irq_2_pin + entry->next; |
386 | 490 | ||
@@ -819,7 +923,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask) | |||
819 | struct irq_cfg *cfg; | 923 | struct irq_cfg *cfg; |
820 | 924 | ||
821 | BUG_ON((unsigned)irq >= nr_irqs); | 925 | BUG_ON((unsigned)irq >= nr_irqs); |
822 | cfg = &irq_cfg[irq]; | 926 | cfg = irq_cfg(irq); |
823 | 927 | ||
824 | /* Only try and allocate irqs on cpus that are present */ | 928 | /* Only try and allocate irqs on cpus that are present */ |
825 | cpus_and(mask, mask, cpu_online_map); | 929 | cpus_and(mask, mask, cpu_online_map); |
@@ -893,7 +997,7 @@ static void __clear_irq_vector(int irq) | |||
893 | int cpu, vector; | 997 | int cpu, vector; |
894 | 998 | ||
895 | BUG_ON((unsigned)irq >= nr_irqs); | 999 | BUG_ON((unsigned)irq >= nr_irqs); |
896 | cfg = &irq_cfg[irq]; | 1000 | cfg = irq_cfg(irq); |
897 | BUG_ON(!cfg->vector); | 1001 | BUG_ON(!cfg->vector); |
898 | 1002 | ||
899 | vector = cfg->vector; | 1003 | vector = cfg->vector; |
@@ -913,17 +1017,23 @@ void __setup_vector_irq(int cpu) | |||
913 | 1017 | ||
914 | /* Mark the inuse vectors */ | 1018 | /* Mark the inuse vectors */ |
915 | for (irq = 0; irq < nr_irqs; ++irq) { | 1019 | for (irq = 0; irq < nr_irqs; ++irq) { |
916 | if (!cpu_isset(cpu, irq_cfg[irq].domain)) | 1020 | struct irq_cfg *cfg = irq_cfg(irq); |
1021 | |||
1022 | if (!cpu_isset(cpu, cfg->domain)) | ||
917 | continue; | 1023 | continue; |
918 | vector = irq_cfg[irq].vector; | 1024 | vector = cfg->vector; |
919 | per_cpu(vector_irq, cpu)[vector] = irq; | 1025 | per_cpu(vector_irq, cpu)[vector] = irq; |
920 | } | 1026 | } |
921 | /* Mark the free vectors */ | 1027 | /* Mark the free vectors */ |
922 | for (vector = 0; vector < NR_VECTORS; ++vector) { | 1028 | for (vector = 0; vector < NR_VECTORS; ++vector) { |
1029 | struct irq_cfg *cfg; | ||
1030 | |||
923 | irq = per_cpu(vector_irq, cpu)[vector]; | 1031 | irq = per_cpu(vector_irq, cpu)[vector]; |
924 | if (irq < 0) | 1032 | if (irq < 0) |
925 | continue; | 1033 | continue; |
926 | if (!cpu_isset(cpu, irq_cfg[irq].domain)) | 1034 | |
1035 | cfg = irq_cfg(irq); | ||
1036 | if (!cpu_isset(cpu, cfg->domain)) | ||
927 | per_cpu(vector_irq, cpu)[vector] = -1; | 1037 | per_cpu(vector_irq, cpu)[vector] = -1; |
928 | } | 1038 | } |
929 | } | 1039 | } |
@@ -1029,13 +1139,15 @@ static int setup_ioapic_entry(int apic, int irq, | |||
1029 | static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, | 1139 | static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, |
1030 | int trigger, int polarity) | 1140 | int trigger, int polarity) |
1031 | { | 1141 | { |
1032 | struct irq_cfg *cfg = irq_cfg + irq; | 1142 | struct irq_cfg *cfg; |
1033 | struct IO_APIC_route_entry entry; | 1143 | struct IO_APIC_route_entry entry; |
1034 | cpumask_t mask; | 1144 | cpumask_t mask; |
1035 | 1145 | ||
1036 | if (!IO_APIC_IRQ(irq)) | 1146 | if (!IO_APIC_IRQ(irq)) |
1037 | return; | 1147 | return; |
1038 | 1148 | ||
1149 | cfg = irq_cfg(irq); | ||
1150 | |||
1039 | mask = TARGET_CPUS; | 1151 | mask = TARGET_CPUS; |
1040 | if (assign_irq_vector(irq, mask)) | 1152 | if (assign_irq_vector(irq, mask)) |
1041 | return; | 1153 | return; |
@@ -1553,7 +1665,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq) | |||
1553 | 1665 | ||
1554 | static int ioapic_retrigger_irq(unsigned int irq) | 1666 | static int ioapic_retrigger_irq(unsigned int irq) |
1555 | { | 1667 | { |
1556 | struct irq_cfg *cfg = &irq_cfg[irq]; | 1668 | struct irq_cfg *cfg = irq_cfg(irq); |
1557 | unsigned long flags; | 1669 | unsigned long flags; |
1558 | 1670 | ||
1559 | spin_lock_irqsave(&vector_lock, flags); | 1671 | spin_lock_irqsave(&vector_lock, flags); |
@@ -1600,7 +1712,7 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); | |||
1600 | */ | 1712 | */ |
1601 | static void migrate_ioapic_irq(int irq, cpumask_t mask) | 1713 | static void migrate_ioapic_irq(int irq, cpumask_t mask) |
1602 | { | 1714 | { |
1603 | struct irq_cfg *cfg = irq_cfg + irq; | 1715 | struct irq_cfg *cfg; |
1604 | struct irq_desc *desc; | 1716 | struct irq_desc *desc; |
1605 | cpumask_t tmp, cleanup_mask; | 1717 | cpumask_t tmp, cleanup_mask; |
1606 | struct irte irte; | 1718 | struct irte irte; |
@@ -1618,6 +1730,7 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask) | |||
1618 | if (assign_irq_vector(irq, mask)) | 1730 | if (assign_irq_vector(irq, mask)) |
1619 | return; | 1731 | return; |
1620 | 1732 | ||
1733 | cfg = irq_cfg(irq); | ||
1621 | cpus_and(tmp, cfg->domain, mask); | 1734 | cpus_and(tmp, cfg->domain, mask); |
1622 | dest = cpu_mask_to_apicid(tmp); | 1735 | dest = cpu_mask_to_apicid(tmp); |
1623 | 1736 | ||
@@ -1735,7 +1848,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
1735 | continue; | 1848 | continue; |
1736 | 1849 | ||
1737 | desc = irq_to_desc(irq); | 1850 | desc = irq_to_desc(irq); |
1738 | cfg = irq_cfg + irq; | 1851 | cfg = irq_cfg(irq); |
1739 | spin_lock(&desc->lock); | 1852 | spin_lock(&desc->lock); |
1740 | if (!cfg->move_cleanup_count) | 1853 | if (!cfg->move_cleanup_count) |
1741 | goto unlock; | 1854 | goto unlock; |
@@ -1754,7 +1867,7 @@ unlock: | |||
1754 | 1867 | ||
1755 | static void irq_complete_move(unsigned int irq) | 1868 | static void irq_complete_move(unsigned int irq) |
1756 | { | 1869 | { |
1757 | struct irq_cfg *cfg = irq_cfg + irq; | 1870 | struct irq_cfg *cfg = irq_cfg(irq); |
1758 | unsigned vector, me; | 1871 | unsigned vector, me; |
1759 | 1872 | ||
1760 | if (likely(!cfg->move_in_progress)) | 1873 | if (likely(!cfg->move_in_progress)) |
@@ -1891,7 +2004,10 @@ static inline void init_IO_APIC_traps(void) | |||
1891 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 2004 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
1892 | */ | 2005 | */ |
1893 | for (irq = 0; irq < nr_irqs ; irq++) { | 2006 | for (irq = 0; irq < nr_irqs ; irq++) { |
1894 | if (IO_APIC_IRQ(irq) && !irq_cfg[irq].vector) { | 2007 | struct irq_cfg *cfg; |
2008 | |||
2009 | cfg = irq_cfg(irq); | ||
2010 | if (IO_APIC_IRQ(irq) && !cfg->vector) { | ||
1895 | /* | 2011 | /* |
1896 | * Hmm.. We don't have an entry for this, | 2012 | * Hmm.. We don't have an entry for this, |
1897 | * so default to an old-fashioned 8259 | 2013 | * so default to an old-fashioned 8259 |
@@ -2028,7 +2144,7 @@ static inline void __init unlock_ExtINT_logic(void) | |||
2028 | */ | 2144 | */ |
2029 | static inline void __init check_timer(void) | 2145 | static inline void __init check_timer(void) |
2030 | { | 2146 | { |
2031 | struct irq_cfg *cfg = irq_cfg + 0; | 2147 | struct irq_cfg *cfg = irq_cfg(0); |
2032 | int apic1, pin1, apic2, pin2; | 2148 | int apic1, pin1, apic2, pin2; |
2033 | unsigned long flags; | 2149 | unsigned long flags; |
2034 | int no_pin1 = 0; | 2150 | int no_pin1 = 0; |
@@ -2306,14 +2422,19 @@ int create_irq(void) | |||
2306 | int irq; | 2422 | int irq; |
2307 | int new; | 2423 | int new; |
2308 | unsigned long flags; | 2424 | unsigned long flags; |
2425 | struct irq_cfg *cfg_new; | ||
2309 | 2426 | ||
2310 | irq = -ENOSPC; | 2427 | irq = -ENOSPC; |
2311 | spin_lock_irqsave(&vector_lock, flags); | 2428 | spin_lock_irqsave(&vector_lock, flags); |
2312 | for (new = (nr_irqs - 1); new >= 0; new--) { | 2429 | for (new = (nr_irqs - 1); new >= 0; new--) { |
2313 | if (platform_legacy_irq(new)) | 2430 | if (platform_legacy_irq(new)) |
2314 | continue; | 2431 | continue; |
2315 | if (irq_cfg[new].vector != 0) | 2432 | cfg_new = irq_cfg(new); |
2433 | if (cfg_new && cfg_new->vector != 0) | ||
2316 | continue; | 2434 | continue; |
2435 | /* check if need to create one */ | ||
2436 | if (!cfg_new) | ||
2437 | cfg_new = irq_cfg_alloc(new); | ||
2317 | if (__assign_irq_vector(new, TARGET_CPUS) == 0) | 2438 | if (__assign_irq_vector(new, TARGET_CPUS) == 0) |
2318 | irq = new; | 2439 | irq = new; |
2319 | break; | 2440 | break; |
@@ -2346,7 +2467,7 @@ void destroy_irq(unsigned int irq) | |||
2346 | #ifdef CONFIG_PCI_MSI | 2467 | #ifdef CONFIG_PCI_MSI |
2347 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) | 2468 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) |
2348 | { | 2469 | { |
2349 | struct irq_cfg *cfg = irq_cfg + irq; | 2470 | struct irq_cfg *cfg; |
2350 | int err; | 2471 | int err; |
2351 | unsigned dest; | 2472 | unsigned dest; |
2352 | cpumask_t tmp; | 2473 | cpumask_t tmp; |
@@ -2356,6 +2477,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
2356 | if (err) | 2477 | if (err) |
2357 | return err; | 2478 | return err; |
2358 | 2479 | ||
2480 | cfg = irq_cfg(irq); | ||
2359 | cpus_and(tmp, cfg->domain, tmp); | 2481 | cpus_and(tmp, cfg->domain, tmp); |
2360 | dest = cpu_mask_to_apicid(tmp); | 2482 | dest = cpu_mask_to_apicid(tmp); |
2361 | 2483 | ||
@@ -2413,7 +2535,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
2413 | #ifdef CONFIG_SMP | 2535 | #ifdef CONFIG_SMP |
2414 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 2536 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) |
2415 | { | 2537 | { |
2416 | struct irq_cfg *cfg = irq_cfg + irq; | 2538 | struct irq_cfg *cfg; |
2417 | struct msi_msg msg; | 2539 | struct msi_msg msg; |
2418 | unsigned int dest; | 2540 | unsigned int dest; |
2419 | cpumask_t tmp; | 2541 | cpumask_t tmp; |
@@ -2426,6 +2548,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2426 | if (assign_irq_vector(irq, mask)) | 2548 | if (assign_irq_vector(irq, mask)) |
2427 | return; | 2549 | return; |
2428 | 2550 | ||
2551 | cfg = irq_cfg(irq); | ||
2429 | cpus_and(tmp, cfg->domain, mask); | 2552 | cpus_and(tmp, cfg->domain, mask); |
2430 | dest = cpu_mask_to_apicid(tmp); | 2553 | dest = cpu_mask_to_apicid(tmp); |
2431 | 2554 | ||
@@ -2448,7 +2571,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2448 | */ | 2571 | */ |
2449 | static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 2572 | static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) |
2450 | { | 2573 | { |
2451 | struct irq_cfg *cfg = irq_cfg + irq; | 2574 | struct irq_cfg *cfg; |
2452 | unsigned int dest; | 2575 | unsigned int dest; |
2453 | cpumask_t tmp, cleanup_mask; | 2576 | cpumask_t tmp, cleanup_mask; |
2454 | struct irte irte; | 2577 | struct irte irte; |
@@ -2464,6 +2587,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2464 | if (assign_irq_vector(irq, mask)) | 2587 | if (assign_irq_vector(irq, mask)) |
2465 | return; | 2588 | return; |
2466 | 2589 | ||
2590 | cfg = irq_cfg(irq); | ||
2467 | cpus_and(tmp, cfg->domain, mask); | 2591 | cpus_and(tmp, cfg->domain, mask); |
2468 | dest = cpu_mask_to_apicid(tmp); | 2592 | dest = cpu_mask_to_apicid(tmp); |
2469 | 2593 | ||
@@ -2670,7 +2794,7 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
2670 | #ifdef CONFIG_SMP | 2794 | #ifdef CONFIG_SMP |
2671 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | 2795 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) |
2672 | { | 2796 | { |
2673 | struct irq_cfg *cfg = irq_cfg + irq; | 2797 | struct irq_cfg *cfg; |
2674 | struct msi_msg msg; | 2798 | struct msi_msg msg; |
2675 | unsigned int dest; | 2799 | unsigned int dest; |
2676 | cpumask_t tmp; | 2800 | cpumask_t tmp; |
@@ -2683,6 +2807,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
2683 | if (assign_irq_vector(irq, mask)) | 2807 | if (assign_irq_vector(irq, mask)) |
2684 | return; | 2808 | return; |
2685 | 2809 | ||
2810 | cfg = irq_cfg(irq); | ||
2686 | cpus_and(tmp, cfg->domain, mask); | 2811 | cpus_and(tmp, cfg->domain, mask); |
2687 | dest = cpu_mask_to_apicid(tmp); | 2812 | dest = cpu_mask_to_apicid(tmp); |
2688 | 2813 | ||
@@ -2749,7 +2874,7 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
2749 | 2874 | ||
2750 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | 2875 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) |
2751 | { | 2876 | { |
2752 | struct irq_cfg *cfg = irq_cfg + irq; | 2877 | struct irq_cfg *cfg; |
2753 | unsigned int dest; | 2878 | unsigned int dest; |
2754 | cpumask_t tmp; | 2879 | cpumask_t tmp; |
2755 | struct irq_desc *desc; | 2880 | struct irq_desc *desc; |
@@ -2761,6 +2886,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2761 | if (assign_irq_vector(irq, mask)) | 2886 | if (assign_irq_vector(irq, mask)) |
2762 | return; | 2887 | return; |
2763 | 2888 | ||
2889 | cfg = irq_cfg(irq); | ||
2764 | cpus_and(tmp, cfg->domain, mask); | 2890 | cpus_and(tmp, cfg->domain, mask); |
2765 | dest = cpu_mask_to_apicid(tmp); | 2891 | dest = cpu_mask_to_apicid(tmp); |
2766 | 2892 | ||
@@ -2783,7 +2909,7 @@ static struct irq_chip ht_irq_chip = { | |||
2783 | 2909 | ||
2784 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | 2910 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) |
2785 | { | 2911 | { |
2786 | struct irq_cfg *cfg = irq_cfg + irq; | 2912 | struct irq_cfg *cfg; |
2787 | int err; | 2913 | int err; |
2788 | cpumask_t tmp; | 2914 | cpumask_t tmp; |
2789 | 2915 | ||
@@ -2793,6 +2919,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
2793 | struct ht_irq_msg msg; | 2919 | struct ht_irq_msg msg; |
2794 | unsigned dest; | 2920 | unsigned dest; |
2795 | 2921 | ||
2922 | cfg = irq_cfg(irq); | ||
2796 | cpus_and(tmp, cfg->domain, tmp); | 2923 | cpus_and(tmp, cfg->domain, tmp); |
2797 | dest = cpu_mask_to_apicid(tmp); | 2924 | dest = cpu_mask_to_apicid(tmp); |
2798 | 2925 | ||
@@ -2891,6 +3018,7 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | |||
2891 | void __init setup_ioapic_dest(void) | 3018 | void __init setup_ioapic_dest(void) |
2892 | { | 3019 | { |
2893 | int pin, ioapic, irq, irq_entry; | 3020 | int pin, ioapic, irq, irq_entry; |
3021 | struct irq_cfg *cfg; | ||
2894 | 3022 | ||
2895 | if (skip_ioapic_setup == 1) | 3023 | if (skip_ioapic_setup == 1) |
2896 | return; | 3024 | return; |
@@ -2906,7 +3034,8 @@ void __init setup_ioapic_dest(void) | |||
2906 | * when you have too many devices, because at that time only boot | 3034 | * when you have too many devices, because at that time only boot |
2907 | * cpu is online. | 3035 | * cpu is online. |
2908 | */ | 3036 | */ |
2909 | if (!irq_cfg[irq].vector) | 3037 | cfg = irq_cfg(irq); |
3038 | if (!cfg->vector) | ||
2910 | setup_IO_APIC_irq(ioapic, pin, irq, | 3039 | setup_IO_APIC_irq(ioapic, pin, irq, |
2911 | irq_trigger(irq_entry), | 3040 | irq_trigger(irq_entry), |
2912 | irq_polarity(irq_entry)); | 3041 | irq_polarity(irq_entry)); |