diff options
-rw-r--r-- | arch/x86/Kconfig | 10 | ||||
-rw-r--r-- | arch/x86/include/asm/irq_vectors.h | 9 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic.c | 301 | ||||
-rw-r--r-- | arch/x86/kernel/irq.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/irq_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/irq_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/irqinit_32.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/irqinit_64.c | 1 | ||||
-rw-r--r-- | drivers/char/random.c | 22 | ||||
-rw-r--r-- | drivers/pci/intr_remapping.c | 76 | ||||
-rw-r--r-- | drivers/xen/events.c | 12 | ||||
-rw-r--r-- | fs/proc/stat.c | 17 | ||||
-rw-r--r-- | include/linux/interrupt.h | 2 | ||||
-rw-r--r-- | include/linux/irq.h | 54 | ||||
-rw-r--r-- | include/linux/irqnr.h | 14 | ||||
-rw-r--r-- | include/linux/kernel_stat.h | 14 | ||||
-rw-r--r-- | include/linux/random.h | 51 | ||||
-rw-r--r-- | init/main.c | 11 | ||||
-rw-r--r-- | kernel/irq/autoprobe.c | 15 | ||||
-rw-r--r-- | kernel/irq/chip.c | 3 | ||||
-rw-r--r-- | kernel/irq/handle.c | 181 | ||||
-rw-r--r-- | kernel/irq/proc.c | 6 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 5 |
23 files changed, 649 insertions, 163 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ac22bb7719f7..48ac688de3cd 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -238,6 +238,16 @@ config X86_HAS_BOOT_CPU_ID | |||
238 | def_bool y | 238 | def_bool y |
239 | depends on X86_VOYAGER | 239 | depends on X86_VOYAGER |
240 | 240 | ||
241 | config SPARSE_IRQ | ||
242 | bool "Support sparse irq numbering" | ||
243 | depends on PCI_MSI || HT_IRQ | ||
244 | default y | ||
245 | help | ||
246 | This enables support for sparse irq, esp for msi/msi-x. You may need | ||
247 | if you have lots of cards supports msi-x installed. | ||
248 | |||
249 | If you don't know what to do here, say Y. | ||
250 | |||
241 | config X86_FIND_SMP_CONFIG | 251 | config X86_FIND_SMP_CONFIG |
242 | def_bool y | 252 | def_bool y |
243 | depends on X86_MPPARSE || X86_VOYAGER | 253 | depends on X86_MPPARSE || X86_VOYAGER |
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 0005adb0f941..bb6b69a6b125 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -102,11 +102,20 @@ | |||
102 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) | 102 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) |
103 | 103 | ||
104 | #if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) | 104 | #if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) |
105 | |||
106 | #ifndef CONFIG_SPARSE_IRQ | ||
105 | # if NR_CPUS < MAX_IO_APICS | 107 | # if NR_CPUS < MAX_IO_APICS |
106 | # define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) | 108 | # define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) |
107 | # else | 109 | # else |
108 | # define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) | 110 | # define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) |
109 | # endif | 111 | # endif |
112 | #else | ||
113 | # if (8 * NR_CPUS) > (32 * MAX_IO_APICS) | ||
114 | # define NR_IRQS (NR_VECTORS + (8 * NR_CPUS)) | ||
115 | # else | ||
116 | # define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) | ||
117 | # endif | ||
118 | #endif | ||
110 | 119 | ||
111 | #elif defined(CONFIG_X86_VOYAGER) | 120 | #elif defined(CONFIG_X86_VOYAGER) |
112 | 121 | ||
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 9043251210fb..9de17f5c1125 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -108,8 +108,33 @@ static int __init parse_noapic(char *str) | |||
108 | early_param("noapic", parse_noapic); | 108 | early_param("noapic", parse_noapic); |
109 | 109 | ||
110 | struct irq_pin_list; | 110 | struct irq_pin_list; |
111 | |||
112 | /* | ||
113 | * This is performance-critical, we want to do it O(1) | ||
114 | * | ||
115 | * the indexing order of this array favors 1:1 mappings | ||
116 | * between pins and IRQs. | ||
117 | */ | ||
118 | |||
119 | struct irq_pin_list { | ||
120 | int apic, pin; | ||
121 | struct irq_pin_list *next; | ||
122 | }; | ||
123 | |||
124 | static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) | ||
125 | { | ||
126 | struct irq_pin_list *pin; | ||
127 | int node; | ||
128 | |||
129 | node = cpu_to_node(cpu); | ||
130 | |||
131 | pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); | ||
132 | printk(KERN_DEBUG " alloc irq_2_pin on cpu %d node %d\n", cpu, node); | ||
133 | |||
134 | return pin; | ||
135 | } | ||
136 | |||
111 | struct irq_cfg { | 137 | struct irq_cfg { |
112 | unsigned int irq; | ||
113 | struct irq_pin_list *irq_2_pin; | 138 | struct irq_pin_list *irq_2_pin; |
114 | cpumask_t domain; | 139 | cpumask_t domain; |
115 | cpumask_t old_domain; | 140 | cpumask_t old_domain; |
@@ -119,83 +144,93 @@ struct irq_cfg { | |||
119 | }; | 144 | }; |
120 | 145 | ||
121 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ | 146 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ |
147 | #ifdef CONFIG_SPARSE_IRQ | ||
148 | static struct irq_cfg irq_cfgx[] = { | ||
149 | #else | ||
122 | static struct irq_cfg irq_cfgx[NR_IRQS] = { | 150 | static struct irq_cfg irq_cfgx[NR_IRQS] = { |
123 | [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, | 151 | #endif |
124 | [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, | 152 | [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, |
125 | [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, | 153 | [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, |
126 | [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, | 154 | [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, |
127 | [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, | 155 | [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, |
128 | [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, | 156 | [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, |
129 | [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, | 157 | [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, |
130 | [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, | 158 | [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, |
131 | [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, | 159 | [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, |
132 | [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, | 160 | [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, |
133 | [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, | 161 | [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, |
134 | [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, | 162 | [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, |
135 | [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, | 163 | [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, |
136 | [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, | 164 | [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, |
137 | [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, | 165 | [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, |
138 | [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, | 166 | [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, |
167 | [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, | ||
139 | }; | 168 | }; |
140 | 169 | ||
141 | #define for_each_irq_cfg(irq, cfg) \ | 170 | void __init arch_early_irq_init(void) |
142 | for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++) | ||
143 | |||
144 | static struct irq_cfg *irq_cfg(unsigned int irq) | ||
145 | { | 171 | { |
146 | return irq < nr_irqs ? irq_cfgx + irq : NULL; | 172 | struct irq_cfg *cfg; |
147 | } | 173 | struct irq_desc *desc; |
174 | int count; | ||
175 | int i; | ||
148 | 176 | ||
149 | static struct irq_cfg *irq_cfg_alloc(unsigned int irq) | 177 | cfg = irq_cfgx; |
150 | { | 178 | count = ARRAY_SIZE(irq_cfgx); |
151 | return irq_cfg(irq); | ||
152 | } | ||
153 | 179 | ||
154 | /* | 180 | for (i = 0; i < count; i++) { |
155 | * Rough estimation of how many shared IRQs there are, can be changed | 181 | desc = irq_to_desc(i); |
156 | * anytime. | 182 | desc->chip_data = &cfg[i]; |
157 | */ | 183 | } |
158 | #define MAX_PLUS_SHARED_IRQS NR_IRQS | 184 | } |
159 | #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS) | ||
160 | 185 | ||
161 | /* | 186 | #ifdef CONFIG_SPARSE_IRQ |
162 | * This is performance-critical, we want to do it O(1) | 187 | static struct irq_cfg *irq_cfg(unsigned int irq) |
163 | * | 188 | { |
164 | * the indexing order of this array favors 1:1 mappings | 189 | struct irq_cfg *cfg = NULL; |
165 | * between pins and IRQs. | 190 | struct irq_desc *desc; |
166 | */ | ||
167 | 191 | ||
168 | struct irq_pin_list { | 192 | desc = irq_to_desc(irq); |
169 | int apic, pin; | 193 | if (desc) |
170 | struct irq_pin_list *next; | 194 | cfg = desc->chip_data; |
171 | }; | ||
172 | 195 | ||
173 | static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE]; | 196 | return cfg; |
174 | static struct irq_pin_list *irq_2_pin_ptr; | 197 | } |
175 | 198 | ||
176 | static void __init irq_2_pin_init(void) | 199 | static struct irq_cfg *get_one_free_irq_cfg(int cpu) |
177 | { | 200 | { |
178 | struct irq_pin_list *pin = irq_2_pin_head; | 201 | struct irq_cfg *cfg; |
179 | int i; | 202 | int node; |
203 | |||
204 | node = cpu_to_node(cpu); | ||
180 | 205 | ||
181 | for (i = 1; i < PIN_MAP_SIZE; i++) | 206 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); |
182 | pin[i-1].next = &pin[i]; | 207 | printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); |
183 | 208 | ||
184 | irq_2_pin_ptr = &pin[0]; | 209 | return cfg; |
185 | } | 210 | } |
186 | 211 | ||
187 | static struct irq_pin_list *get_one_free_irq_2_pin(void) | 212 | void arch_init_chip_data(struct irq_desc *desc, int cpu) |
188 | { | 213 | { |
189 | struct irq_pin_list *pin = irq_2_pin_ptr; | 214 | struct irq_cfg *cfg; |
190 | 215 | ||
191 | if (!pin) | 216 | cfg = desc->chip_data; |
192 | panic("can not get more irq_2_pin\n"); | 217 | if (!cfg) { |
218 | desc->chip_data = get_one_free_irq_cfg(cpu); | ||
219 | if (!desc->chip_data) { | ||
220 | printk(KERN_ERR "can not alloc irq_cfg\n"); | ||
221 | BUG_ON(1); | ||
222 | } | ||
223 | } | ||
224 | } | ||
193 | 225 | ||
194 | irq_2_pin_ptr = pin->next; | 226 | #else |
195 | pin->next = NULL; | 227 | static struct irq_cfg *irq_cfg(unsigned int irq) |
196 | return pin; | 228 | { |
229 | return irq < nr_irqs ? irq_cfgx + irq : NULL; | ||
197 | } | 230 | } |
198 | 231 | ||
232 | #endif | ||
233 | |||
199 | struct io_apic { | 234 | struct io_apic { |
200 | unsigned int index; | 235 | unsigned int index; |
201 | unsigned int unused[3]; | 236 | unsigned int unused[3]; |
@@ -397,16 +432,19 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | |||
397 | * shared ISA-space IRQs, so we have to support them. We are super | 432 | * shared ISA-space IRQs, so we have to support them. We are super |
398 | * fast in the common case, and fast for shared ISA-space IRQs. | 433 | * fast in the common case, and fast for shared ISA-space IRQs. |
399 | */ | 434 | */ |
400 | static void add_pin_to_irq(unsigned int irq, int apic, int pin) | 435 | static void add_pin_to_irq_cpu(unsigned int irq, int cpu, int apic, int pin) |
401 | { | 436 | { |
402 | struct irq_cfg *cfg; | ||
403 | struct irq_pin_list *entry; | 437 | struct irq_pin_list *entry; |
438 | struct irq_cfg *cfg = irq_cfg(irq); | ||
404 | 439 | ||
405 | /* first time to refer irq_cfg, so with new */ | ||
406 | cfg = irq_cfg_alloc(irq); | ||
407 | entry = cfg->irq_2_pin; | 440 | entry = cfg->irq_2_pin; |
408 | if (!entry) { | 441 | if (!entry) { |
409 | entry = get_one_free_irq_2_pin(); | 442 | entry = get_one_free_irq_2_pin(cpu); |
443 | if (!entry) { | ||
444 | printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n", | ||
445 | apic, pin); | ||
446 | return; | ||
447 | } | ||
410 | cfg->irq_2_pin = entry; | 448 | cfg->irq_2_pin = entry; |
411 | entry->apic = apic; | 449 | entry->apic = apic; |
412 | entry->pin = pin; | 450 | entry->pin = pin; |
@@ -421,7 +459,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin) | |||
421 | entry = entry->next; | 459 | entry = entry->next; |
422 | } | 460 | } |
423 | 461 | ||
424 | entry->next = get_one_free_irq_2_pin(); | 462 | entry->next = get_one_free_irq_2_pin(cpu); |
425 | entry = entry->next; | 463 | entry = entry->next; |
426 | entry->apic = apic; | 464 | entry->apic = apic; |
427 | entry->pin = pin; | 465 | entry->pin = pin; |
@@ -430,7 +468,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin) | |||
430 | /* | 468 | /* |
431 | * Reroute an IRQ to a different pin. | 469 | * Reroute an IRQ to a different pin. |
432 | */ | 470 | */ |
433 | static void __init replace_pin_at_irq(unsigned int irq, | 471 | static void __init replace_pin_at_irq(unsigned int irq, int cpu, |
434 | int oldapic, int oldpin, | 472 | int oldapic, int oldpin, |
435 | int newapic, int newpin) | 473 | int newapic, int newpin) |
436 | { | 474 | { |
@@ -451,7 +489,7 @@ static void __init replace_pin_at_irq(unsigned int irq, | |||
451 | 489 | ||
452 | /* why? call replace before add? */ | 490 | /* why? call replace before add? */ |
453 | if (!replaced) | 491 | if (!replaced) |
454 | add_pin_to_irq(irq, newapic, newpin); | 492 | add_pin_to_irq_cpu(irq, cpu, newapic, newpin); |
455 | } | 493 | } |
456 | 494 | ||
457 | static inline void io_apic_modify_irq(unsigned int irq, | 495 | static inline void io_apic_modify_irq(unsigned int irq, |
@@ -1162,9 +1200,13 @@ void __setup_vector_irq(int cpu) | |||
1162 | /* This function must be called with vector_lock held */ | 1200 | /* This function must be called with vector_lock held */ |
1163 | int irq, vector; | 1201 | int irq, vector; |
1164 | struct irq_cfg *cfg; | 1202 | struct irq_cfg *cfg; |
1203 | struct irq_desc *desc; | ||
1165 | 1204 | ||
1166 | /* Mark the inuse vectors */ | 1205 | /* Mark the inuse vectors */ |
1167 | for_each_irq_cfg(irq, cfg) { | 1206 | for_each_irq_desc(irq, desc) { |
1207 | if (!desc) | ||
1208 | continue; | ||
1209 | cfg = desc->chip_data; | ||
1168 | if (!cpu_isset(cpu, cfg->domain)) | 1210 | if (!cpu_isset(cpu, cfg->domain)) |
1169 | continue; | 1211 | continue; |
1170 | vector = cfg->vector; | 1212 | vector = cfg->vector; |
@@ -1356,6 +1398,8 @@ static void __init setup_IO_APIC_irqs(void) | |||
1356 | { | 1398 | { |
1357 | int apic, pin, idx, irq; | 1399 | int apic, pin, idx, irq; |
1358 | int notcon = 0; | 1400 | int notcon = 0; |
1401 | struct irq_desc *desc; | ||
1402 | int cpu = boot_cpu_id; | ||
1359 | 1403 | ||
1360 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | 1404 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); |
1361 | 1405 | ||
@@ -1387,7 +1431,12 @@ static void __init setup_IO_APIC_irqs(void) | |||
1387 | if (multi_timer_check(apic, irq)) | 1431 | if (multi_timer_check(apic, irq)) |
1388 | continue; | 1432 | continue; |
1389 | #endif | 1433 | #endif |
1390 | add_pin_to_irq(irq, apic, pin); | 1434 | desc = irq_to_desc_alloc_cpu(irq, cpu); |
1435 | if (!desc) { | ||
1436 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
1437 | continue; | ||
1438 | } | ||
1439 | add_pin_to_irq_cpu(irq, cpu, apic, pin); | ||
1391 | 1440 | ||
1392 | setup_IO_APIC_irq(apic, pin, irq, | 1441 | setup_IO_APIC_irq(apic, pin, irq, |
1393 | irq_trigger(idx), irq_polarity(idx)); | 1442 | irq_trigger(idx), irq_polarity(idx)); |
@@ -1448,6 +1497,7 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1448 | union IO_APIC_reg_03 reg_03; | 1497 | union IO_APIC_reg_03 reg_03; |
1449 | unsigned long flags; | 1498 | unsigned long flags; |
1450 | struct irq_cfg *cfg; | 1499 | struct irq_cfg *cfg; |
1500 | struct irq_desc *desc; | ||
1451 | unsigned int irq; | 1501 | unsigned int irq; |
1452 | 1502 | ||
1453 | if (apic_verbosity == APIC_QUIET) | 1503 | if (apic_verbosity == APIC_QUIET) |
@@ -1537,8 +1587,13 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1537 | } | 1587 | } |
1538 | } | 1588 | } |
1539 | printk(KERN_DEBUG "IRQ to pin mappings:\n"); | 1589 | printk(KERN_DEBUG "IRQ to pin mappings:\n"); |
1540 | for_each_irq_cfg(irq, cfg) { | 1590 | for_each_irq_desc(irq, desc) { |
1541 | struct irq_pin_list *entry = cfg->irq_2_pin; | 1591 | struct irq_pin_list *entry; |
1592 | |||
1593 | if (!desc) | ||
1594 | continue; | ||
1595 | cfg = desc->chip_data; | ||
1596 | entry = cfg->irq_2_pin; | ||
1542 | if (!entry) | 1597 | if (!entry) |
1543 | continue; | 1598 | continue; |
1544 | printk(KERN_DEBUG "IRQ%d ", irq); | 1599 | printk(KERN_DEBUG "IRQ%d ", irq); |
@@ -2022,6 +2077,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq) | |||
2022 | { | 2077 | { |
2023 | int was_pending = 0; | 2078 | int was_pending = 0; |
2024 | unsigned long flags; | 2079 | unsigned long flags; |
2080 | struct irq_cfg *cfg; | ||
2025 | 2081 | ||
2026 | spin_lock_irqsave(&ioapic_lock, flags); | 2082 | spin_lock_irqsave(&ioapic_lock, flags); |
2027 | if (irq < 16) { | 2083 | if (irq < 16) { |
@@ -2029,6 +2085,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq) | |||
2029 | if (i8259A_irq_pending(irq)) | 2085 | if (i8259A_irq_pending(irq)) |
2030 | was_pending = 1; | 2086 | was_pending = 1; |
2031 | } | 2087 | } |
2088 | cfg = irq_cfg(irq); | ||
2032 | __unmask_IO_APIC_irq(irq); | 2089 | __unmask_IO_APIC_irq(irq); |
2033 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2090 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2034 | 2091 | ||
@@ -2178,6 +2235,9 @@ static void ir_irq_migration(struct work_struct *work) | |||
2178 | struct irq_desc *desc; | 2235 | struct irq_desc *desc; |
2179 | 2236 | ||
2180 | for_each_irq_desc(irq, desc) { | 2237 | for_each_irq_desc(irq, desc) { |
2238 | if (!desc) | ||
2239 | continue; | ||
2240 | |||
2181 | if (desc->status & IRQ_MOVE_PENDING) { | 2241 | if (desc->status & IRQ_MOVE_PENDING) { |
2182 | unsigned long flags; | 2242 | unsigned long flags; |
2183 | 2243 | ||
@@ -2229,6 +2289,9 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2229 | struct irq_cfg *cfg; | 2289 | struct irq_cfg *cfg; |
2230 | irq = __get_cpu_var(vector_irq)[vector]; | 2290 | irq = __get_cpu_var(vector_irq)[vector]; |
2231 | 2291 | ||
2292 | if (irq == -1) | ||
2293 | continue; | ||
2294 | |||
2232 | desc = irq_to_desc(irq); | 2295 | desc = irq_to_desc(irq); |
2233 | if (!desc) | 2296 | if (!desc) |
2234 | continue; | 2297 | continue; |
@@ -2430,8 +2493,12 @@ static inline void init_IO_APIC_traps(void) | |||
2430 | * Also, we've got to be careful not to trash gate | 2493 | * Also, we've got to be careful not to trash gate |
2431 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 2494 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
2432 | */ | 2495 | */ |
2433 | for_each_irq_cfg(irq, cfg) { | 2496 | for_each_irq_desc(irq, desc) { |
2434 | if (IO_APIC_IRQ(irq) && !cfg->vector) { | 2497 | if (!desc) |
2498 | continue; | ||
2499 | |||
2500 | cfg = desc->chip_data; | ||
2501 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { | ||
2435 | /* | 2502 | /* |
2436 | * Hmm.. We don't have an entry for this, | 2503 | * Hmm.. We don't have an entry for this, |
2437 | * so default to an old-fashioned 8259 | 2504 | * so default to an old-fashioned 8259 |
@@ -2439,11 +2506,9 @@ static inline void init_IO_APIC_traps(void) | |||
2439 | */ | 2506 | */ |
2440 | if (irq < 16) | 2507 | if (irq < 16) |
2441 | make_8259A_irq(irq); | 2508 | make_8259A_irq(irq); |
2442 | else { | 2509 | else |
2443 | desc = irq_to_desc(irq); | ||
2444 | /* Strange. Oh, well.. */ | 2510 | /* Strange. Oh, well.. */ |
2445 | desc->chip = &no_irq_chip; | 2511 | desc->chip = &no_irq_chip; |
2446 | } | ||
2447 | } | 2512 | } |
2448 | } | 2513 | } |
2449 | } | 2514 | } |
@@ -2654,7 +2719,7 @@ static inline void __init check_timer(void) | |||
2654 | * Ok, does IRQ0 through the IOAPIC work? | 2719 | * Ok, does IRQ0 through the IOAPIC work? |
2655 | */ | 2720 | */ |
2656 | if (no_pin1) { | 2721 | if (no_pin1) { |
2657 | add_pin_to_irq(0, apic1, pin1); | 2722 | add_pin_to_irq_cpu(0, boot_cpu_id, apic1, pin1); |
2658 | setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); | 2723 | setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); |
2659 | } | 2724 | } |
2660 | unmask_IO_APIC_irq(0); | 2725 | unmask_IO_APIC_irq(0); |
@@ -2683,7 +2748,7 @@ static inline void __init check_timer(void) | |||
2683 | /* | 2748 | /* |
2684 | * legacy devices should be connected to IO APIC #0 | 2749 | * legacy devices should be connected to IO APIC #0 |
2685 | */ | 2750 | */ |
2686 | replace_pin_at_irq(0, apic1, pin1, apic2, pin2); | 2751 | replace_pin_at_irq(0, boot_cpu_id, apic1, pin1, apic2, pin2); |
2687 | setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); | 2752 | setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); |
2688 | unmask_IO_APIC_irq(0); | 2753 | unmask_IO_APIC_irq(0); |
2689 | enable_8259A_irq(0); | 2754 | enable_8259A_irq(0); |
@@ -2902,21 +2967,25 @@ unsigned int create_irq_nr(unsigned int irq_want) | |||
2902 | unsigned int irq; | 2967 | unsigned int irq; |
2903 | unsigned int new; | 2968 | unsigned int new; |
2904 | unsigned long flags; | 2969 | unsigned long flags; |
2905 | struct irq_cfg *cfg_new; | 2970 | struct irq_cfg *cfg_new = NULL; |
2906 | 2971 | int cpu = boot_cpu_id; | |
2907 | irq_want = nr_irqs - 1; | 2972 | struct irq_desc *desc_new = NULL; |
2908 | 2973 | ||
2909 | irq = 0; | 2974 | irq = 0; |
2910 | spin_lock_irqsave(&vector_lock, flags); | 2975 | spin_lock_irqsave(&vector_lock, flags); |
2911 | for (new = irq_want; new > 0; new--) { | 2976 | for (new = irq_want; new > 0; new--) { |
2912 | if (platform_legacy_irq(new)) | 2977 | if (platform_legacy_irq(new)) |
2913 | continue; | 2978 | continue; |
2914 | cfg_new = irq_cfg(new); | 2979 | |
2915 | if (cfg_new && cfg_new->vector != 0) | 2980 | desc_new = irq_to_desc_alloc_cpu(new, cpu); |
2981 | if (!desc_new) { | ||
2982 | printk(KERN_INFO "can not get irq_desc for %d\n", new); | ||
2983 | continue; | ||
2984 | } | ||
2985 | cfg_new = desc_new->chip_data; | ||
2986 | |||
2987 | if (cfg_new->vector != 0) | ||
2916 | continue; | 2988 | continue; |
2917 | /* check if need to create one */ | ||
2918 | if (!cfg_new) | ||
2919 | cfg_new = irq_cfg_alloc(new); | ||
2920 | if (__assign_irq_vector(new, TARGET_CPUS) == 0) | 2989 | if (__assign_irq_vector(new, TARGET_CPUS) == 0) |
2921 | irq = new; | 2990 | irq = new; |
2922 | break; | 2991 | break; |
@@ -2925,6 +2994,9 @@ unsigned int create_irq_nr(unsigned int irq_want) | |||
2925 | 2994 | ||
2926 | if (irq > 0) { | 2995 | if (irq > 0) { |
2927 | dynamic_irq_init(irq); | 2996 | dynamic_irq_init(irq); |
2997 | /* restore it, in case dynamic_irq_init clear it */ | ||
2998 | if (desc_new) | ||
2999 | desc_new->chip_data = cfg_new; | ||
2928 | } | 3000 | } |
2929 | return irq; | 3001 | return irq; |
2930 | } | 3002 | } |
@@ -2944,8 +3016,16 @@ int create_irq(void) | |||
2944 | void destroy_irq(unsigned int irq) | 3016 | void destroy_irq(unsigned int irq) |
2945 | { | 3017 | { |
2946 | unsigned long flags; | 3018 | unsigned long flags; |
3019 | struct irq_cfg *cfg; | ||
3020 | struct irq_desc *desc; | ||
2947 | 3021 | ||
3022 | /* store it, in case dynamic_irq_cleanup clear it */ | ||
3023 | desc = irq_to_desc(irq); | ||
3024 | cfg = desc->chip_data; | ||
2948 | dynamic_irq_cleanup(irq); | 3025 | dynamic_irq_cleanup(irq); |
3026 | /* connect back irq_cfg */ | ||
3027 | if (desc) | ||
3028 | desc->chip_data = cfg; | ||
2949 | 3029 | ||
2950 | #ifdef CONFIG_INTR_REMAP | 3030 | #ifdef CONFIG_INTR_REMAP |
2951 | free_irte(irq); | 3031 | free_irte(irq); |
@@ -3195,26 +3275,13 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq) | |||
3195 | return 0; | 3275 | return 0; |
3196 | } | 3276 | } |
3197 | 3277 | ||
3198 | static unsigned int build_irq_for_pci_dev(struct pci_dev *dev) | 3278 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc) |
3199 | { | ||
3200 | unsigned int irq; | ||
3201 | |||
3202 | irq = dev->bus->number; | ||
3203 | irq <<= 8; | ||
3204 | irq |= dev->devfn; | ||
3205 | irq <<= 12; | ||
3206 | |||
3207 | return irq; | ||
3208 | } | ||
3209 | |||
3210 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | ||
3211 | { | 3279 | { |
3212 | unsigned int irq; | 3280 | unsigned int irq; |
3213 | int ret; | 3281 | int ret; |
3214 | unsigned int irq_want; | 3282 | unsigned int irq_want; |
3215 | 3283 | ||
3216 | irq_want = build_irq_for_pci_dev(dev) + 0x100; | 3284 | irq_want = nr_irqs - 1; |
3217 | |||
3218 | irq = create_irq_nr(irq_want); | 3285 | irq = create_irq_nr(irq_want); |
3219 | if (irq == 0) | 3286 | if (irq == 0) |
3220 | return -1; | 3287 | return -1; |
@@ -3228,7 +3295,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | |||
3228 | goto error; | 3295 | goto error; |
3229 | no_ir: | 3296 | no_ir: |
3230 | #endif | 3297 | #endif |
3231 | ret = setup_msi_irq(dev, desc, irq); | 3298 | ret = setup_msi_irq(dev, msidesc, irq); |
3232 | if (ret < 0) { | 3299 | if (ret < 0) { |
3233 | destroy_irq(irq); | 3300 | destroy_irq(irq); |
3234 | return ret; | 3301 | return ret; |
@@ -3246,7 +3313,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
3246 | { | 3313 | { |
3247 | unsigned int irq; | 3314 | unsigned int irq; |
3248 | int ret, sub_handle; | 3315 | int ret, sub_handle; |
3249 | struct msi_desc *desc; | 3316 | struct msi_desc *msidesc; |
3250 | unsigned int irq_want; | 3317 | unsigned int irq_want; |
3251 | 3318 | ||
3252 | #ifdef CONFIG_INTR_REMAP | 3319 | #ifdef CONFIG_INTR_REMAP |
@@ -3254,10 +3321,11 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
3254 | int index = 0; | 3321 | int index = 0; |
3255 | #endif | 3322 | #endif |
3256 | 3323 | ||
3257 | irq_want = build_irq_for_pci_dev(dev) + 0x100; | 3324 | irq_want = nr_irqs - 1; |
3258 | sub_handle = 0; | 3325 | sub_handle = 0; |
3259 | list_for_each_entry(desc, &dev->msi_list, list) { | 3326 | list_for_each_entry(msidesc, &dev->msi_list, list) { |
3260 | irq = create_irq_nr(irq_want--); | 3327 | irq = create_irq_nr(irq_want); |
3328 | irq_want--; | ||
3261 | if (irq == 0) | 3329 | if (irq == 0) |
3262 | return -1; | 3330 | return -1; |
3263 | #ifdef CONFIG_INTR_REMAP | 3331 | #ifdef CONFIG_INTR_REMAP |
@@ -3289,7 +3357,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
3289 | } | 3357 | } |
3290 | no_ir: | 3358 | no_ir: |
3291 | #endif | 3359 | #endif |
3292 | ret = setup_msi_irq(dev, desc, irq); | 3360 | ret = setup_msi_irq(dev, msidesc, irq); |
3293 | if (ret < 0) | 3361 | if (ret < 0) |
3294 | goto error; | 3362 | goto error; |
3295 | sub_handle++; | 3363 | sub_handle++; |
@@ -3707,17 +3775,29 @@ int __init io_apic_get_version(int ioapic) | |||
3707 | 3775 | ||
3708 | int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) | 3776 | int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) |
3709 | { | 3777 | { |
3778 | struct irq_desc *desc; | ||
3779 | struct irq_cfg *cfg; | ||
3780 | int cpu = boot_cpu_id; | ||
3781 | |||
3710 | if (!IO_APIC_IRQ(irq)) { | 3782 | if (!IO_APIC_IRQ(irq)) { |
3711 | apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", | 3783 | apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", |
3712 | ioapic); | 3784 | ioapic); |
3713 | return -EINVAL; | 3785 | return -EINVAL; |
3714 | } | 3786 | } |
3715 | 3787 | ||
3788 | desc = irq_to_desc_alloc_cpu(irq, cpu); | ||
3789 | if (!desc) { | ||
3790 | printk(KERN_INFO "can not get irq_desc %d\n", irq); | ||
3791 | return 0; | ||
3792 | } | ||
3793 | |||
3716 | /* | 3794 | /* |
3717 | * IRQs < 16 are already in the irq_2_pin[] map | 3795 | * IRQs < 16 are already in the irq_2_pin[] map |
3718 | */ | 3796 | */ |
3719 | if (irq >= 16) | 3797 | if (irq >= 16) { |
3720 | add_pin_to_irq(irq, ioapic, pin); | 3798 | cfg = desc->chip_data; |
3799 | add_pin_to_irq_cpu(irq, cpu, ioapic, pin); | ||
3800 | } | ||
3721 | 3801 | ||
3722 | setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity); | 3802 | setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity); |
3723 | 3803 | ||
@@ -3773,7 +3853,8 @@ void __init setup_ioapic_dest(void) | |||
3773 | * when you have too many devices, because at that time only boot | 3853 | * when you have too many devices, because at that time only boot |
3774 | * cpu is online. | 3854 | * cpu is online. |
3775 | */ | 3855 | */ |
3776 | cfg = irq_cfg(irq); | 3856 | desc = irq_to_desc(irq); |
3857 | cfg = desc->chip_data; | ||
3777 | if (!cfg->vector) { | 3858 | if (!cfg->vector) { |
3778 | setup_IO_APIC_irq(ioapic, pin, irq, | 3859 | setup_IO_APIC_irq(ioapic, pin, irq, |
3779 | irq_trigger(irq_entry), | 3860 | irq_trigger(irq_entry), |
@@ -3785,7 +3866,6 @@ void __init setup_ioapic_dest(void) | |||
3785 | /* | 3866 | /* |
3786 | * Honour affinities which have been set in early boot | 3867 | * Honour affinities which have been set in early boot |
3787 | */ | 3868 | */ |
3788 | desc = irq_to_desc(irq); | ||
3789 | if (desc->status & | 3869 | if (desc->status & |
3790 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | 3870 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) |
3791 | mask = desc->affinity; | 3871 | mask = desc->affinity; |
@@ -3846,7 +3926,6 @@ void __init ioapic_init_mappings(void) | |||
3846 | struct resource *ioapic_res; | 3926 | struct resource *ioapic_res; |
3847 | int i; | 3927 | int i; |
3848 | 3928 | ||
3849 | irq_2_pin_init(); | ||
3850 | ioapic_res = ioapic_setup_resources(); | 3929 | ioapic_res = ioapic_setup_resources(); |
3851 | for (i = 0; i < nr_ioapics; i++) { | 3930 | for (i = 0; i < nr_ioapics; i++) { |
3852 | if (smp_found_config) { | 3931 | if (smp_found_config) { |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index d1d4dc52f649..3f1d9d18df67 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -118,6 +118,9 @@ int show_interrupts(struct seq_file *p, void *v) | |||
118 | } | 118 | } |
119 | 119 | ||
120 | desc = irq_to_desc(i); | 120 | desc = irq_to_desc(i); |
121 | if (!desc) | ||
122 | return 0; | ||
123 | |||
121 | spin_lock_irqsave(&desc->lock, flags); | 124 | spin_lock_irqsave(&desc->lock, flags); |
122 | #ifndef CONFIG_SMP | 125 | #ifndef CONFIG_SMP |
123 | any_count = kstat_irqs(i); | 126 | any_count = kstat_irqs(i); |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index a51382672de0..119fc9c8ff7f 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -242,6 +242,8 @@ void fixup_irqs(cpumask_t map) | |||
242 | for_each_irq_desc(irq, desc) { | 242 | for_each_irq_desc(irq, desc) { |
243 | cpumask_t mask; | 243 | cpumask_t mask; |
244 | 244 | ||
245 | if (!desc) | ||
246 | continue; | ||
245 | if (irq == 2) | 247 | if (irq == 2) |
246 | continue; | 248 | continue; |
247 | 249 | ||
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 60eb84eb77a0..900009c70591 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -94,6 +94,8 @@ void fixup_irqs(cpumask_t map) | |||
94 | int break_affinity = 0; | 94 | int break_affinity = 0; |
95 | int set_affinity = 1; | 95 | int set_affinity = 1; |
96 | 96 | ||
97 | if (!desc) | ||
98 | continue; | ||
97 | if (irq == 2) | 99 | if (irq == 2) |
98 | continue; | 100 | continue; |
99 | 101 | ||
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 845aa9803e80..5a5651b7f9e6 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c | |||
@@ -69,7 +69,6 @@ void __init init_ISA_irqs (void) | |||
69 | * 16 old-style INTA-cycle interrupts: | 69 | * 16 old-style INTA-cycle interrupts: |
70 | */ | 70 | */ |
71 | for (i = 0; i < 16; i++) { | 71 | for (i = 0; i < 16; i++) { |
72 | /* first time call this irq_desc */ | ||
73 | struct irq_desc *desc = irq_to_desc(i); | 72 | struct irq_desc *desc = irq_to_desc(i); |
74 | 73 | ||
75 | desc->status = IRQ_DISABLED; | 74 | desc->status = IRQ_DISABLED; |
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index ff0235391285..cd9f42d028d9 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c | |||
@@ -143,7 +143,6 @@ void __init init_ISA_irqs(void) | |||
143 | init_8259A(0); | 143 | init_8259A(0); |
144 | 144 | ||
145 | for (i = 0; i < 16; i++) { | 145 | for (i = 0; i < 16; i++) { |
146 | /* first time call this irq_desc */ | ||
147 | struct irq_desc *desc = irq_to_desc(i); | 146 | struct irq_desc *desc = irq_to_desc(i); |
148 | 147 | ||
149 | desc->status = IRQ_DISABLED; | 148 | desc->status = IRQ_DISABLED; |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 675076f5fca8..d26891bfcd41 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -558,23 +558,9 @@ struct timer_rand_state { | |||
558 | unsigned dont_count_entropy:1; | 558 | unsigned dont_count_entropy:1; |
559 | }; | 559 | }; |
560 | 560 | ||
561 | static struct timer_rand_state *irq_timer_state[NR_IRQS]; | 561 | #ifndef CONFIG_SPARSE_IRQ |
562 | 562 | struct timer_rand_state *irq_timer_state[NR_IRQS]; | |
563 | static struct timer_rand_state *get_timer_rand_state(unsigned int irq) | 563 | #endif |
564 | { | ||
565 | if (irq >= nr_irqs) | ||
566 | return NULL; | ||
567 | |||
568 | return irq_timer_state[irq]; | ||
569 | } | ||
570 | |||
571 | static void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state) | ||
572 | { | ||
573 | if (irq >= nr_irqs) | ||
574 | return; | ||
575 | |||
576 | irq_timer_state[irq] = state; | ||
577 | } | ||
578 | 564 | ||
579 | static struct timer_rand_state input_timer_state; | 565 | static struct timer_rand_state input_timer_state; |
580 | 566 | ||
@@ -933,8 +919,10 @@ void rand_initialize_irq(int irq) | |||
933 | { | 919 | { |
934 | struct timer_rand_state *state; | 920 | struct timer_rand_state *state; |
935 | 921 | ||
922 | #ifndef CONFIG_SPARSE_IRQ | ||
936 | if (irq >= nr_irqs) | 923 | if (irq >= nr_irqs) |
937 | return; | 924 | return; |
925 | #endif | ||
938 | 926 | ||
939 | state = get_timer_rand_state(irq); | 927 | state = get_timer_rand_state(irq); |
940 | 928 | ||
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 2de5a3238c94..c9958ec5e25e 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -19,17 +19,75 @@ struct irq_2_iommu { | |||
19 | u8 irte_mask; | 19 | u8 irte_mask; |
20 | }; | 20 | }; |
21 | 21 | ||
22 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | 22 | #ifdef CONFIG_SPARSE_IRQ |
23 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) | ||
24 | { | ||
25 | struct irq_2_iommu *iommu; | ||
26 | int node; | ||
27 | |||
28 | node = cpu_to_node(cpu); | ||
29 | |||
30 | iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); | ||
31 | printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node); | ||
32 | |||
33 | return iommu; | ||
34 | } | ||
23 | 35 | ||
24 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | 36 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
25 | { | 37 | { |
26 | return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL; | 38 | struct irq_desc *desc; |
39 | |||
40 | desc = irq_to_desc(irq); | ||
41 | |||
42 | if (WARN_ON_ONCE(!desc)) | ||
43 | return NULL; | ||
44 | |||
45 | return desc->irq_2_iommu; | ||
46 | } | ||
47 | |||
48 | static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) | ||
49 | { | ||
50 | struct irq_desc *desc; | ||
51 | struct irq_2_iommu *irq_iommu; | ||
52 | |||
53 | /* | ||
54 | * alloc irq desc if not allocated already. | ||
55 | */ | ||
56 | desc = irq_to_desc_alloc_cpu(irq, cpu); | ||
57 | if (!desc) { | ||
58 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
59 | return NULL; | ||
60 | } | ||
61 | |||
62 | irq_iommu = desc->irq_2_iommu; | ||
63 | |||
64 | if (!irq_iommu) | ||
65 | desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu); | ||
66 | |||
67 | return desc->irq_2_iommu; | ||
27 | } | 68 | } |
28 | 69 | ||
29 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | 70 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) |
30 | { | 71 | { |
72 | return irq_2_iommu_alloc_cpu(irq, boot_cpu_id); | ||
73 | } | ||
74 | |||
75 | #else /* !CONFIG_SPARSE_IRQ */ | ||
76 | |||
77 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | ||
78 | |||
79 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | ||
80 | { | ||
81 | if (irq < nr_irqs) | ||
82 | return &irq_2_iommuX[irq]; | ||
83 | |||
84 | return NULL; | ||
85 | } | ||
86 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) | ||
87 | { | ||
31 | return irq_2_iommu(irq); | 88 | return irq_2_iommu(irq); |
32 | } | 89 | } |
90 | #endif | ||
33 | 91 | ||
34 | static DEFINE_SPINLOCK(irq_2_ir_lock); | 92 | static DEFINE_SPINLOCK(irq_2_ir_lock); |
35 | 93 | ||
@@ -86,9 +144,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
86 | if (!count) | 144 | if (!count) |
87 | return -1; | 145 | return -1; |
88 | 146 | ||
147 | #ifndef CONFIG_SPARSE_IRQ | ||
89 | /* protect irq_2_iommu_alloc later */ | 148 | /* protect irq_2_iommu_alloc later */ |
90 | if (irq >= nr_irqs) | 149 | if (irq >= nr_irqs) |
91 | return -1; | 150 | return -1; |
151 | #endif | ||
92 | 152 | ||
93 | /* | 153 | /* |
94 | * start the IRTE search from index 0. | 154 | * start the IRTE search from index 0. |
@@ -130,6 +190,12 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
130 | table->base[i].present = 1; | 190 | table->base[i].present = 1; |
131 | 191 | ||
132 | irq_iommu = irq_2_iommu_alloc(irq); | 192 | irq_iommu = irq_2_iommu_alloc(irq); |
193 | if (!irq_iommu) { | ||
194 | spin_unlock(&irq_2_ir_lock); | ||
195 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
196 | return -1; | ||
197 | } | ||
198 | |||
133 | irq_iommu->iommu = iommu; | 199 | irq_iommu->iommu = iommu; |
134 | irq_iommu->irte_index = index; | 200 | irq_iommu->irte_index = index; |
135 | irq_iommu->sub_handle = 0; | 201 | irq_iommu->sub_handle = 0; |
@@ -177,6 +243,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
177 | 243 | ||
178 | irq_iommu = irq_2_iommu_alloc(irq); | 244 | irq_iommu = irq_2_iommu_alloc(irq); |
179 | 245 | ||
246 | if (!irq_iommu) { | ||
247 | spin_unlock(&irq_2_ir_lock); | ||
248 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | ||
249 | return -1; | ||
250 | } | ||
251 | |||
180 | irq_iommu->iommu = iommu; | 252 | irq_iommu->iommu = iommu; |
181 | irq_iommu->irte_index = index; | 253 | irq_iommu->irte_index = index; |
182 | irq_iommu->sub_handle = subhandle; | 254 | irq_iommu->sub_handle = subhandle; |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 1e3b934a4cf7..2924faa7f6c4 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -141,8 +141,12 @@ static void init_evtchn_cpu_bindings(void) | |||
141 | int i; | 141 | int i; |
142 | 142 | ||
143 | /* By default all event channels notify CPU#0. */ | 143 | /* By default all event channels notify CPU#0. */ |
144 | for_each_irq_desc(i, desc) | 144 | for_each_irq_desc(i, desc) { |
145 | if (!desc) | ||
146 | continue; | ||
147 | |||
145 | desc->affinity = cpumask_of_cpu(0); | 148 | desc->affinity = cpumask_of_cpu(0); |
149 | } | ||
146 | #endif | 150 | #endif |
147 | 151 | ||
148 | memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); | 152 | memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); |
@@ -231,7 +235,7 @@ static int find_unbound_irq(void) | |||
231 | int irq; | 235 | int irq; |
232 | 236 | ||
233 | /* Only allocate from dynirq range */ | 237 | /* Only allocate from dynirq range */ |
234 | for_each_irq_nr(irq) | 238 | for (irq = 0; irq < nr_irqs; irq++) |
235 | if (irq_bindcount[irq] == 0) | 239 | if (irq_bindcount[irq] == 0) |
236 | break; | 240 | break; |
237 | 241 | ||
@@ -792,7 +796,7 @@ void xen_irq_resume(void) | |||
792 | mask_evtchn(evtchn); | 796 | mask_evtchn(evtchn); |
793 | 797 | ||
794 | /* No IRQ <-> event-channel mappings. */ | 798 | /* No IRQ <-> event-channel mappings. */ |
795 | for_each_irq_nr(irq) | 799 | for (irq = 0; irq < nr_irqs; irq++) |
796 | irq_info[irq].evtchn = 0; /* zap event-channel binding */ | 800 | irq_info[irq].evtchn = 0; /* zap event-channel binding */ |
797 | 801 | ||
798 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | 802 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) |
@@ -824,7 +828,7 @@ void __init xen_init_IRQ(void) | |||
824 | mask_evtchn(i); | 828 | mask_evtchn(i); |
825 | 829 | ||
826 | /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ | 830 | /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ |
827 | for_each_irq_nr(i) | 831 | for (i = 0; i < nr_irqs; i++) |
828 | irq_bindcount[i] = 0; | 832 | irq_bindcount[i] = 0; |
829 | 833 | ||
830 | irq_ctx_init(smp_processor_id()); | 834 | irq_ctx_init(smp_processor_id()); |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 81904f07679d..a13431ab7c65 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
@@ -27,6 +27,7 @@ static int show_stat(struct seq_file *p, void *v) | |||
27 | u64 sum = 0; | 27 | u64 sum = 0; |
28 | struct timespec boottime; | 28 | struct timespec boottime; |
29 | unsigned int per_irq_sum; | 29 | unsigned int per_irq_sum; |
30 | struct irq_desc *desc; | ||
30 | 31 | ||
31 | user = nice = system = idle = iowait = | 32 | user = nice = system = idle = iowait = |
32 | irq = softirq = steal = cputime64_zero; | 33 | irq = softirq = steal = cputime64_zero; |
@@ -44,10 +45,11 @@ static int show_stat(struct seq_file *p, void *v) | |||
44 | softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); | 45 | softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); |
45 | steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); | 46 | steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); |
46 | guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); | 47 | guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); |
47 | 48 | for_each_irq_desc(j, desc) { | |
48 | for_each_irq_nr(j) | 49 | if (!desc) |
50 | continue; | ||
49 | sum += kstat_irqs_cpu(j, i); | 51 | sum += kstat_irqs_cpu(j, i); |
50 | 52 | } | |
51 | sum += arch_irq_stat_cpu(i); | 53 | sum += arch_irq_stat_cpu(i); |
52 | } | 54 | } |
53 | sum += arch_irq_stat(); | 55 | sum += arch_irq_stat(); |
@@ -90,11 +92,14 @@ static int show_stat(struct seq_file *p, void *v) | |||
90 | seq_printf(p, "intr %llu", (unsigned long long)sum); | 92 | seq_printf(p, "intr %llu", (unsigned long long)sum); |
91 | 93 | ||
92 | /* sum again ? it could be updated? */ | 94 | /* sum again ? it could be updated? */ |
93 | for_each_irq_nr(j) { | 95 | for (j = 0; j < NR_IRQS; j++) { |
96 | desc = irq_to_desc(j); | ||
94 | per_irq_sum = 0; | 97 | per_irq_sum = 0; |
95 | 98 | ||
96 | for_each_possible_cpu(i) | 99 | if (desc) { |
97 | per_irq_sum += kstat_irqs_cpu(j, i); | 100 | for_each_possible_cpu(i) |
101 | per_irq_sum += kstat_irqs_cpu(j, i); | ||
102 | } | ||
98 | 103 | ||
99 | seq_printf(p, " %u", per_irq_sum); | 104 | seq_printf(p, " %u", per_irq_sum); |
100 | } | 105 | } |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f58a0cf8929a..79e915e7e8a5 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | #include <asm/system.h> | 19 | #include <asm/system.h> |
20 | 20 | ||
21 | extern int nr_irqs; | ||
22 | |||
21 | /* | 23 | /* |
22 | * These correspond to the IORESOURCE_IRQ_* defines in | 24 | * These correspond to the IORESOURCE_IRQ_* defines in |
23 | * linux/ioport.h to select the interrupt line behaviour. When | 25 | * linux/ioport.h to select the interrupt line behaviour. When |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 3dddfa703ebd..63b00439d4d2 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -129,6 +129,8 @@ struct irq_chip { | |||
129 | const char *typename; | 129 | const char *typename; |
130 | }; | 130 | }; |
131 | 131 | ||
132 | struct timer_rand_state; | ||
133 | struct irq_2_iommu; | ||
132 | /** | 134 | /** |
133 | * struct irq_desc - interrupt descriptor | 135 | * struct irq_desc - interrupt descriptor |
134 | * @irq: interrupt number for this descriptor | 136 | * @irq: interrupt number for this descriptor |
@@ -154,6 +156,13 @@ struct irq_chip { | |||
154 | */ | 156 | */ |
155 | struct irq_desc { | 157 | struct irq_desc { |
156 | unsigned int irq; | 158 | unsigned int irq; |
159 | #ifdef CONFIG_SPARSE_IRQ | ||
160 | struct timer_rand_state *timer_rand_state; | ||
161 | unsigned int *kstat_irqs; | ||
162 | # ifdef CONFIG_INTR_REMAP | ||
163 | struct irq_2_iommu *irq_2_iommu; | ||
164 | # endif | ||
165 | #endif | ||
157 | irq_flow_handler_t handle_irq; | 166 | irq_flow_handler_t handle_irq; |
158 | struct irq_chip *chip; | 167 | struct irq_chip *chip; |
159 | struct msi_desc *msi_desc; | 168 | struct msi_desc *msi_desc; |
@@ -181,14 +190,52 @@ struct irq_desc { | |||
181 | const char *name; | 190 | const char *name; |
182 | } ____cacheline_internodealigned_in_smp; | 191 | } ____cacheline_internodealigned_in_smp; |
183 | 192 | ||
193 | extern void early_irq_init(void); | ||
194 | extern void arch_early_irq_init(void); | ||
195 | extern void arch_init_chip_data(struct irq_desc *desc, int cpu); | ||
196 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | ||
197 | struct irq_desc *desc, int cpu); | ||
198 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | ||
199 | |||
200 | #ifndef CONFIG_SPARSE_IRQ | ||
184 | 201 | ||
185 | extern struct irq_desc irq_desc[NR_IRQS]; | 202 | extern struct irq_desc irq_desc[NR_IRQS]; |
186 | 203 | ||
187 | static inline struct irq_desc *irq_to_desc(unsigned int irq) | 204 | static inline struct irq_desc *irq_to_desc(unsigned int irq) |
188 | { | 205 | { |
189 | return (irq < nr_irqs) ? irq_desc + irq : NULL; | 206 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
207 | } | ||
208 | static inline struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | ||
209 | { | ||
210 | return irq_to_desc(irq); | ||
190 | } | 211 | } |
191 | 212 | ||
213 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
214 | # define for_each_irq_desc(irq, desc) \ | ||
215 | for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) | ||
216 | # define for_each_irq_desc_reverse(irq, desc) \ | ||
217 | for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ | ||
218 | irq >= 0; irq--, desc--) | ||
219 | #endif | ||
220 | |||
221 | #else | ||
222 | |||
223 | extern struct irq_desc *irq_to_desc(unsigned int irq); | ||
224 | extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu); | ||
225 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu); | ||
226 | |||
227 | # define for_each_irq_desc(irq, desc) \ | ||
228 | for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; irq++, desc = irq_to_desc(irq)) | ||
229 | # define for_each_irq_desc_reverse(irq, desc) \ | ||
230 | for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; irq--, desc = irq_to_desc(irq)) | ||
231 | |||
232 | #define kstat_irqs_this_cpu(DESC) \ | ||
233 | ((DESC)->kstat_irqs[smp_processor_id()]) | ||
234 | #define kstat_incr_irqs_this_cpu(irqno, DESC) \ | ||
235 | ((DESC)->kstat_irqs[smp_processor_id()]++) | ||
236 | |||
237 | #endif | ||
238 | |||
192 | /* | 239 | /* |
193 | * Migration helpers for obsolete names, they will go away: | 240 | * Migration helpers for obsolete names, they will go away: |
194 | */ | 241 | */ |
@@ -380,6 +427,11 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | |||
380 | #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) | 427 | #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) |
381 | #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) | 428 | #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) |
382 | 429 | ||
430 | #define get_irq_desc_chip(desc) ((desc)->chip) | ||
431 | #define get_irq_desc_chip_data(desc) ((desc)->chip_data) | ||
432 | #define get_irq_desc_data(desc) ((desc)->handler_data) | ||
433 | #define get_irq_desc_msi(desc) ((desc)->msi_desc) | ||
434 | |||
383 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 435 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
384 | 436 | ||
385 | #endif /* !CONFIG_S390 */ | 437 | #endif /* !CONFIG_S390 */ |
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 452c280c8115..7a299e989f8b 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h | |||
@@ -7,18 +7,10 @@ | |||
7 | 7 | ||
8 | # define for_each_irq_desc(irq, desc) \ | 8 | # define for_each_irq_desc(irq, desc) \ |
9 | for (irq = 0; irq < nr_irqs; irq++) | 9 | for (irq = 0; irq < nr_irqs; irq++) |
10 | #else | ||
11 | extern int nr_irqs; | ||
12 | 10 | ||
13 | # define for_each_irq_desc(irq, desc) \ | 11 | static inline early_sparse_irq_init(void) |
14 | for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) | 12 | { |
15 | 13 | } | |
16 | # define for_each_irq_desc_reverse(irq, desc) \ | ||
17 | for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ | ||
18 | irq >= 0; irq--, desc--) | ||
19 | #endif | 14 | #endif |
20 | 15 | ||
21 | #define for_each_irq_nr(irq) \ | ||
22 | for (irq = 0; irq < nr_irqs; irq++) | ||
23 | |||
24 | #endif | 16 | #endif |
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 4a145caeee07..4ee4b3d2316f 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
@@ -28,7 +28,9 @@ struct cpu_usage_stat { | |||
28 | 28 | ||
29 | struct kernel_stat { | 29 | struct kernel_stat { |
30 | struct cpu_usage_stat cpustat; | 30 | struct cpu_usage_stat cpustat; |
31 | unsigned int irqs[NR_IRQS]; | 31 | #ifndef CONFIG_SPARSE_IRQ |
32 | unsigned int irqs[NR_IRQS]; | ||
33 | #endif | ||
32 | }; | 34 | }; |
33 | 35 | ||
34 | DECLARE_PER_CPU(struct kernel_stat, kstat); | 36 | DECLARE_PER_CPU(struct kernel_stat, kstat); |
@@ -39,6 +41,10 @@ DECLARE_PER_CPU(struct kernel_stat, kstat); | |||
39 | 41 | ||
40 | extern unsigned long long nr_context_switches(void); | 42 | extern unsigned long long nr_context_switches(void); |
41 | 43 | ||
44 | #ifndef CONFIG_SPARSE_IRQ | ||
45 | #define kstat_irqs_this_cpu(irq) \ | ||
46 | (kstat_this_cpu.irqs[irq]) | ||
47 | |||
42 | struct irq_desc; | 48 | struct irq_desc; |
43 | 49 | ||
44 | static inline void kstat_incr_irqs_this_cpu(unsigned int irq, | 50 | static inline void kstat_incr_irqs_this_cpu(unsigned int irq, |
@@ -46,11 +52,17 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, | |||
46 | { | 52 | { |
47 | kstat_this_cpu.irqs[irq]++; | 53 | kstat_this_cpu.irqs[irq]++; |
48 | } | 54 | } |
55 | #endif | ||
56 | |||
49 | 57 | ||
58 | #ifndef CONFIG_SPARSE_IRQ | ||
50 | static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 59 | static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
51 | { | 60 | { |
52 | return kstat_cpu(cpu).irqs[irq]; | 61 | return kstat_cpu(cpu).irqs[irq]; |
53 | } | 62 | } |
63 | #else | ||
64 | extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); | ||
65 | #endif | ||
54 | 66 | ||
55 | /* | 67 | /* |
56 | * Number of interrupts per specific IRQ source, since bootup | 68 | * Number of interrupts per specific IRQ source, since bootup |
diff --git a/include/linux/random.h b/include/linux/random.h index 36f125c0c603..ad9daa2374d5 100644 --- a/include/linux/random.h +++ b/include/linux/random.h | |||
@@ -44,6 +44,57 @@ struct rand_pool_info { | |||
44 | 44 | ||
45 | extern void rand_initialize_irq(int irq); | 45 | extern void rand_initialize_irq(int irq); |
46 | 46 | ||
47 | struct timer_rand_state; | ||
48 | #ifndef CONFIG_SPARSE_IRQ | ||
49 | |||
50 | extern struct timer_rand_state *irq_timer_state[]; | ||
51 | |||
52 | extern int nr_irqs; | ||
53 | static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq) | ||
54 | { | ||
55 | if (irq >= nr_irqs) | ||
56 | return NULL; | ||
57 | |||
58 | return irq_timer_state[irq]; | ||
59 | } | ||
60 | |||
61 | static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state) | ||
62 | { | ||
63 | if (irq >= nr_irqs) | ||
64 | return; | ||
65 | |||
66 | irq_timer_state[irq] = state; | ||
67 | } | ||
68 | |||
69 | #else | ||
70 | |||
71 | #include <linux/irq.h> | ||
72 | static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq) | ||
73 | { | ||
74 | struct irq_desc *desc; | ||
75 | |||
76 | desc = irq_to_desc(irq); | ||
77 | |||
78 | if (!desc) | ||
79 | return NULL; | ||
80 | |||
81 | return desc->timer_rand_state; | ||
82 | } | ||
83 | |||
84 | static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state) | ||
85 | { | ||
86 | struct irq_desc *desc; | ||
87 | |||
88 | desc = irq_to_desc(irq); | ||
89 | |||
90 | if (!desc) | ||
91 | return; | ||
92 | |||
93 | desc->timer_rand_state = state; | ||
94 | } | ||
95 | #endif | ||
96 | |||
97 | |||
47 | extern void add_input_randomness(unsigned int type, unsigned int code, | 98 | extern void add_input_randomness(unsigned int type, unsigned int code, |
48 | unsigned int value); | 99 | unsigned int value); |
49 | extern void add_interrupt_randomness(int irq); | 100 | extern void add_interrupt_randomness(int irq); |
diff --git a/init/main.c b/init/main.c index 7e117a231af1..c1f999a3cf31 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -539,6 +539,15 @@ void __init __weak thread_info_cache_init(void) | |||
539 | { | 539 | { |
540 | } | 540 | } |
541 | 541 | ||
542 | void __init __weak arch_early_irq_init(void) | ||
543 | { | ||
544 | } | ||
545 | |||
546 | void __init __weak early_irq_init(void) | ||
547 | { | ||
548 | arch_early_irq_init(); | ||
549 | } | ||
550 | |||
542 | asmlinkage void __init start_kernel(void) | 551 | asmlinkage void __init start_kernel(void) |
543 | { | 552 | { |
544 | char * command_line; | 553 | char * command_line; |
@@ -603,6 +612,8 @@ asmlinkage void __init start_kernel(void) | |||
603 | sort_main_extable(); | 612 | sort_main_extable(); |
604 | trap_init(); | 613 | trap_init(); |
605 | rcu_init(); | 614 | rcu_init(); |
615 | /* init some links before init_ISA_irqs() */ | ||
616 | early_irq_init(); | ||
606 | init_IRQ(); | 617 | init_IRQ(); |
607 | pidhash_init(); | 618 | pidhash_init(); |
608 | init_timers(); | 619 | init_timers(); |
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index cc0f7321b8ce..650ce4102a63 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c | |||
@@ -40,6 +40,9 @@ unsigned long probe_irq_on(void) | |||
40 | * flush such a longstanding irq before considering it as spurious. | 40 | * flush such a longstanding irq before considering it as spurious. |
41 | */ | 41 | */ |
42 | for_each_irq_desc_reverse(i, desc) { | 42 | for_each_irq_desc_reverse(i, desc) { |
43 | if (!desc) | ||
44 | continue; | ||
45 | |||
43 | spin_lock_irq(&desc->lock); | 46 | spin_lock_irq(&desc->lock); |
44 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { | 47 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { |
45 | /* | 48 | /* |
@@ -68,6 +71,9 @@ unsigned long probe_irq_on(void) | |||
68 | * happened in the previous stage, it may have masked itself) | 71 | * happened in the previous stage, it may have masked itself) |
69 | */ | 72 | */ |
70 | for_each_irq_desc_reverse(i, desc) { | 73 | for_each_irq_desc_reverse(i, desc) { |
74 | if (!desc) | ||
75 | continue; | ||
76 | |||
71 | spin_lock_irq(&desc->lock); | 77 | spin_lock_irq(&desc->lock); |
72 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { | 78 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { |
73 | desc->status |= IRQ_AUTODETECT | IRQ_WAITING; | 79 | desc->status |= IRQ_AUTODETECT | IRQ_WAITING; |
@@ -86,6 +92,9 @@ unsigned long probe_irq_on(void) | |||
86 | * Now filter out any obviously spurious interrupts | 92 | * Now filter out any obviously spurious interrupts |
87 | */ | 93 | */ |
88 | for_each_irq_desc(i, desc) { | 94 | for_each_irq_desc(i, desc) { |
95 | if (!desc) | ||
96 | continue; | ||
97 | |||
89 | spin_lock_irq(&desc->lock); | 98 | spin_lock_irq(&desc->lock); |
90 | status = desc->status; | 99 | status = desc->status; |
91 | 100 | ||
@@ -124,6 +133,9 @@ unsigned int probe_irq_mask(unsigned long val) | |||
124 | int i; | 133 | int i; |
125 | 134 | ||
126 | for_each_irq_desc(i, desc) { | 135 | for_each_irq_desc(i, desc) { |
136 | if (!desc) | ||
137 | continue; | ||
138 | |||
127 | spin_lock_irq(&desc->lock); | 139 | spin_lock_irq(&desc->lock); |
128 | status = desc->status; | 140 | status = desc->status; |
129 | 141 | ||
@@ -166,6 +178,9 @@ int probe_irq_off(unsigned long val) | |||
166 | unsigned int status; | 178 | unsigned int status; |
167 | 179 | ||
168 | for_each_irq_desc(i, desc) { | 180 | for_each_irq_desc(i, desc) { |
181 | if (!desc) | ||
182 | continue; | ||
183 | |||
169 | spin_lock_irq(&desc->lock); | 184 | spin_lock_irq(&desc->lock); |
170 | status = desc->status; | 185 | status = desc->status; |
171 | 186 | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 10b5092e9bfe..8e4fce4a1b1f 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -24,9 +24,10 @@ | |||
24 | */ | 24 | */ |
25 | void dynamic_irq_init(unsigned int irq) | 25 | void dynamic_irq_init(unsigned int irq) |
26 | { | 26 | { |
27 | struct irq_desc *desc = irq_to_desc(irq); | 27 | struct irq_desc *desc; |
28 | unsigned long flags; | 28 | unsigned long flags; |
29 | 29 | ||
30 | desc = irq_to_desc(irq); | ||
30 | if (!desc) { | 31 | if (!desc) { |
31 | WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); | 32 | WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); |
32 | return; | 33 | return; |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index c815b42d0f5b..96ca203eb51b 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -15,9 +15,16 @@ | |||
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
18 | #include <linux/rculist.h> | ||
19 | #include <linux/hash.h> | ||
18 | 20 | ||
19 | #include "internals.h" | 21 | #include "internals.h" |
20 | 22 | ||
23 | /* | ||
24 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
25 | */ | ||
26 | static struct lock_class_key irq_desc_lock_class; | ||
27 | |||
21 | /** | 28 | /** |
22 | * handle_bad_irq - handle spurious and unhandled irqs | 29 | * handle_bad_irq - handle spurious and unhandled irqs |
23 | * @irq: the interrupt number | 30 | * @irq: the interrupt number |
@@ -49,6 +56,155 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) | |||
49 | int nr_irqs = NR_IRQS; | 56 | int nr_irqs = NR_IRQS; |
50 | EXPORT_SYMBOL_GPL(nr_irqs); | 57 | EXPORT_SYMBOL_GPL(nr_irqs); |
51 | 58 | ||
59 | void __init __attribute__((weak)) arch_early_irq_init(void) | ||
60 | { | ||
61 | } | ||
62 | |||
63 | #ifdef CONFIG_SPARSE_IRQ | ||
64 | static struct irq_desc irq_desc_init = { | ||
65 | .irq = -1, | ||
66 | .status = IRQ_DISABLED, | ||
67 | .chip = &no_irq_chip, | ||
68 | .handle_irq = handle_bad_irq, | ||
69 | .depth = 1, | ||
70 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
71 | #ifdef CONFIG_SMP | ||
72 | .affinity = CPU_MASK_ALL | ||
73 | #endif | ||
74 | }; | ||
75 | |||
76 | static void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | ||
77 | { | ||
78 | unsigned long bytes; | ||
79 | char *ptr; | ||
80 | int node; | ||
81 | |||
82 | /* Compute how many bytes we need per irq and allocate them */ | ||
83 | bytes = nr * sizeof(unsigned int); | ||
84 | |||
85 | node = cpu_to_node(cpu); | ||
86 | ptr = kzalloc_node(bytes, GFP_ATOMIC, node); | ||
87 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node); | ||
88 | |||
89 | if (ptr) | ||
90 | desc->kstat_irqs = (unsigned int *)ptr; | ||
91 | } | ||
92 | |||
93 | void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu) | ||
94 | { | ||
95 | } | ||
96 | |||
97 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | ||
98 | { | ||
99 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | ||
100 | desc->irq = irq; | ||
101 | #ifdef CONFIG_SMP | ||
102 | desc->cpu = cpu; | ||
103 | #endif | ||
104 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
105 | init_kstat_irqs(desc, cpu, nr_cpu_ids); | ||
106 | if (!desc->kstat_irqs) { | ||
107 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | ||
108 | BUG_ON(1); | ||
109 | } | ||
110 | arch_init_chip_data(desc, cpu); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Protect the sparse_irqs: | ||
115 | */ | ||
116 | static DEFINE_SPINLOCK(sparse_irq_lock); | ||
117 | |||
118 | struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; | ||
119 | |||
120 | static struct irq_desc irq_desc_legacy[16] __cacheline_aligned_in_smp = { | ||
121 | [0 ... 15] = { | ||
122 | .irq = -1, | ||
123 | .status = IRQ_DISABLED, | ||
124 | .chip = &no_irq_chip, | ||
125 | .handle_irq = handle_bad_irq, | ||
126 | .depth = 1, | ||
127 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
128 | #ifdef CONFIG_SMP | ||
129 | .affinity = CPU_MASK_ALL | ||
130 | #endif | ||
131 | } | ||
132 | }; | ||
133 | |||
134 | /* FIXME: use bootmem alloc ...*/ | ||
135 | static unsigned int kstat_irqs_legacy[16][NR_CPUS]; | ||
136 | |||
137 | void __init early_irq_init(void) | ||
138 | { | ||
139 | struct irq_desc *desc; | ||
140 | int legacy_count; | ||
141 | int i; | ||
142 | |||
143 | desc = irq_desc_legacy; | ||
144 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | ||
145 | |||
146 | for (i = 0; i < legacy_count; i++) { | ||
147 | desc[i].irq = i; | ||
148 | desc[i].kstat_irqs = kstat_irqs_legacy[i]; | ||
149 | |||
150 | irq_desc_ptrs[i] = desc + i; | ||
151 | } | ||
152 | |||
153 | for (i = legacy_count; i < NR_IRQS; i++) | ||
154 | irq_desc_ptrs[i] = NULL; | ||
155 | |||
156 | arch_early_irq_init(); | ||
157 | } | ||
158 | |||
159 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
160 | { | ||
161 | return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; | ||
162 | } | ||
163 | |||
164 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | ||
165 | { | ||
166 | struct irq_desc *desc; | ||
167 | unsigned long flags; | ||
168 | int node; | ||
169 | |||
170 | if (irq >= NR_IRQS) { | ||
171 | printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", | ||
172 | irq, NR_IRQS); | ||
173 | WARN_ON(1); | ||
174 | return NULL; | ||
175 | } | ||
176 | |||
177 | desc = irq_desc_ptrs[irq]; | ||
178 | if (desc) | ||
179 | return desc; | ||
180 | |||
181 | spin_lock_irqsave(&sparse_irq_lock, flags); | ||
182 | |||
183 | /* We have to check it to avoid races with another CPU */ | ||
184 | desc = irq_desc_ptrs[irq]; | ||
185 | if (desc) | ||
186 | goto out_unlock; | ||
187 | |||
188 | node = cpu_to_node(cpu); | ||
189 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | ||
190 | printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", | ||
191 | irq, cpu, node); | ||
192 | if (!desc) { | ||
193 | printk(KERN_ERR "can not alloc irq_desc\n"); | ||
194 | BUG_ON(1); | ||
195 | } | ||
196 | init_one_irq_desc(irq, desc, cpu); | ||
197 | |||
198 | irq_desc_ptrs[irq] = desc; | ||
199 | |||
200 | out_unlock: | ||
201 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
202 | |||
203 | return desc; | ||
204 | } | ||
205 | |||
206 | #else | ||
207 | |||
52 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 208 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
53 | [0 ... NR_IRQS-1] = { | 209 | [0 ... NR_IRQS-1] = { |
54 | .status = IRQ_DISABLED, | 210 | .status = IRQ_DISABLED, |
@@ -62,6 +218,8 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
62 | } | 218 | } |
63 | }; | 219 | }; |
64 | 220 | ||
221 | #endif | ||
222 | |||
65 | /* | 223 | /* |
66 | * What should we do if we get a hw irq event on an illegal vector? | 224 | * What should we do if we get a hw irq event on an illegal vector? |
67 | * Each architecture has to answer this themself. | 225 | * Each architecture has to answer this themself. |
@@ -261,17 +419,28 @@ out: | |||
261 | 419 | ||
262 | 420 | ||
263 | #ifdef CONFIG_TRACE_IRQFLAGS | 421 | #ifdef CONFIG_TRACE_IRQFLAGS |
264 | /* | ||
265 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
266 | */ | ||
267 | static struct lock_class_key irq_desc_lock_class; | ||
268 | |||
269 | void early_init_irq_lock_class(void) | 422 | void early_init_irq_lock_class(void) |
270 | { | 423 | { |
424 | #ifndef CONFIG_SPARSE_IRQ | ||
271 | struct irq_desc *desc; | 425 | struct irq_desc *desc; |
272 | int i; | 426 | int i; |
273 | 427 | ||
274 | for_each_irq_desc(i, desc) | 428 | for_each_irq_desc(i, desc) { |
429 | if (!desc) | ||
430 | continue; | ||
431 | |||
275 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 432 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
433 | } | ||
434 | #endif | ||
435 | } | ||
436 | #endif | ||
437 | |||
438 | #ifdef CONFIG_SPARSE_IRQ | ||
439 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | ||
440 | { | ||
441 | struct irq_desc *desc = irq_to_desc(irq); | ||
442 | return desc->kstat_irqs[cpu]; | ||
276 | } | 443 | } |
277 | #endif | 444 | #endif |
445 | EXPORT_SYMBOL(kstat_irqs_cpu); | ||
446 | |||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index d257e7d6a8a4..f6b3440f05bc 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -243,7 +243,11 @@ void init_irq_proc(void) | |||
243 | /* | 243 | /* |
244 | * Create entries for all existing IRQs. | 244 | * Create entries for all existing IRQs. |
245 | */ | 245 | */ |
246 | for_each_irq_desc(irq, desc) | 246 | for_each_irq_desc(irq, desc) { |
247 | if (!desc) | ||
248 | continue; | ||
249 | |||
247 | register_irq_proc(irq, desc); | 250 | register_irq_proc(irq, desc); |
251 | } | ||
248 | } | 252 | } |
249 | 253 | ||
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index dd364c11e56e..3738107531fd 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -91,6 +91,9 @@ static int misrouted_irq(int irq) | |||
91 | int i, ok = 0; | 91 | int i, ok = 0; |
92 | 92 | ||
93 | for_each_irq_desc(i, desc) { | 93 | for_each_irq_desc(i, desc) { |
94 | if (!desc) | ||
95 | continue; | ||
96 | |||
94 | if (!i) | 97 | if (!i) |
95 | continue; | 98 | continue; |
96 | 99 | ||
@@ -112,6 +115,8 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
112 | for_each_irq_desc(i, desc) { | 115 | for_each_irq_desc(i, desc) { |
113 | unsigned int status; | 116 | unsigned int status; |
114 | 117 | ||
118 | if (!desc) | ||
119 | continue; | ||
115 | if (!i) | 120 | if (!i) |
116 | continue; | 121 | continue; |
117 | 122 | ||