aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apic/io_apic.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/apic/io_apic.c')
-rw-r--r--arch/x86/kernel/apic/io_apic.c849
1 files changed, 324 insertions, 525 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 9508811e8448..8ae808d110f4 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -131,13 +131,9 @@ struct irq_pin_list {
131 struct irq_pin_list *next; 131 struct irq_pin_list *next;
132}; 132};
133 133
134static struct irq_pin_list *get_one_free_irq_2_pin(int node) 134static struct irq_pin_list *alloc_irq_pin_list(int node)
135{ 135{
136 struct irq_pin_list *pin; 136 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node);
137
138 pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
139
140 return pin;
141} 137}
142 138
143/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 139/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
@@ -150,10 +146,7 @@ static struct irq_cfg irq_cfgx[NR_IRQS];
150int __init arch_early_irq_init(void) 146int __init arch_early_irq_init(void)
151{ 147{
152 struct irq_cfg *cfg; 148 struct irq_cfg *cfg;
153 struct irq_desc *desc; 149 int count, node, i;
154 int count;
155 int node;
156 int i;
157 150
158 if (!legacy_pic->nr_legacy_irqs) { 151 if (!legacy_pic->nr_legacy_irqs) {
159 nr_irqs_gsi = 0; 152 nr_irqs_gsi = 0;
@@ -164,11 +157,13 @@ int __init arch_early_irq_init(void)
164 count = ARRAY_SIZE(irq_cfgx); 157 count = ARRAY_SIZE(irq_cfgx);
165 node = cpu_to_node(0); 158 node = cpu_to_node(0);
166 159
160 /* Make sure the legacy interrupts are marked in the bitmap */
161 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs);
162
167 for (i = 0; i < count; i++) { 163 for (i = 0; i < count; i++) {
168 desc = irq_to_desc(i); 164 set_irq_chip_data(i, &cfg[i]);
169 desc->chip_data = &cfg[i]; 165 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node);
170 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); 166 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node);
171 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
172 /* 167 /*
173 * For legacy IRQ's, start with assigning irq0 to irq15 to 168 * For legacy IRQ's, start with assigning irq0 to irq15 to
174 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. 169 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
@@ -183,170 +178,88 @@ int __init arch_early_irq_init(void)
183} 178}
184 179
185#ifdef CONFIG_SPARSE_IRQ 180#ifdef CONFIG_SPARSE_IRQ
186struct irq_cfg *irq_cfg(unsigned int irq) 181static struct irq_cfg *irq_cfg(unsigned int irq)
187{ 182{
188 struct irq_cfg *cfg = NULL; 183 return get_irq_chip_data(irq);
189 struct irq_desc *desc;
190
191 desc = irq_to_desc(irq);
192 if (desc)
193 cfg = desc->chip_data;
194
195 return cfg;
196} 184}
197 185
198static struct irq_cfg *get_one_free_irq_cfg(int node) 186static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
199{ 187{
200 struct irq_cfg *cfg; 188 struct irq_cfg *cfg;
201 189
202 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 190 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
203 if (cfg) { 191 if (!cfg)
204 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { 192 return NULL;
205 kfree(cfg); 193 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
206 cfg = NULL; 194 goto out_cfg;
207 } else if (!zalloc_cpumask_var_node(&cfg->old_domain, 195 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
208 GFP_ATOMIC, node)) { 196 goto out_domain;
209 free_cpumask_var(cfg->domain);
210 kfree(cfg);
211 cfg = NULL;
212 }
213 }
214
215 return cfg; 197 return cfg;
198out_domain:
199 free_cpumask_var(cfg->domain);
200out_cfg:
201 kfree(cfg);
202 return NULL;
216} 203}
217 204
218int arch_init_chip_data(struct irq_desc *desc, int node) 205static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
219{
220 struct irq_cfg *cfg;
221
222 cfg = desc->chip_data;
223 if (!cfg) {
224 desc->chip_data = get_one_free_irq_cfg(node);
225 if (!desc->chip_data) {
226 printk(KERN_ERR "can not alloc irq_cfg\n");
227 BUG_ON(1);
228 }
229 }
230
231 return 0;
232}
233
234/* for move_irq_desc */
235static void
236init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node)
237{ 206{
238 struct irq_pin_list *old_entry, *head, *tail, *entry; 207 if (!cfg)
239
240 cfg->irq_2_pin = NULL;
241 old_entry = old_cfg->irq_2_pin;
242 if (!old_entry)
243 return;
244
245 entry = get_one_free_irq_2_pin(node);
246 if (!entry)
247 return; 208 return;
209 set_irq_chip_data(at, NULL);
210 free_cpumask_var(cfg->domain);
211 free_cpumask_var(cfg->old_domain);
212 kfree(cfg);
213}
248 214
249 entry->apic = old_entry->apic; 215#else
250 entry->pin = old_entry->pin;
251 head = entry;
252 tail = entry;
253 old_entry = old_entry->next;
254 while (old_entry) {
255 entry = get_one_free_irq_2_pin(node);
256 if (!entry) {
257 entry = head;
258 while (entry) {
259 head = entry->next;
260 kfree(entry);
261 entry = head;
262 }
263 /* still use the old one */
264 return;
265 }
266 entry->apic = old_entry->apic;
267 entry->pin = old_entry->pin;
268 tail->next = entry;
269 tail = entry;
270 old_entry = old_entry->next;
271 }
272 216
273 tail->next = NULL; 217struct irq_cfg *irq_cfg(unsigned int irq)
274 cfg->irq_2_pin = head; 218{
219 return irq < nr_irqs ? irq_cfgx + irq : NULL;
275} 220}
276 221
277static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg) 222static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
278{ 223{
279 struct irq_pin_list *entry, *next; 224 return irq_cfgx + irq;
280 225}
281 if (old_cfg->irq_2_pin == cfg->irq_2_pin)
282 return;
283 226
284 entry = old_cfg->irq_2_pin; 227static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { }
285 228
286 while (entry) { 229#endif
287 next = entry->next;
288 kfree(entry);
289 entry = next;
290 }
291 old_cfg->irq_2_pin = NULL;
292}
293 230
294void arch_init_copy_chip_data(struct irq_desc *old_desc, 231static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
295 struct irq_desc *desc, int node)
296{ 232{
233 int res = irq_alloc_desc_at(at, node);
297 struct irq_cfg *cfg; 234 struct irq_cfg *cfg;
298 struct irq_cfg *old_cfg;
299
300 cfg = get_one_free_irq_cfg(node);
301 235
302 if (!cfg) 236 if (res < 0) {
303 return; 237 if (res != -EEXIST)
304 238 return NULL;
305 desc->chip_data = cfg; 239 cfg = get_irq_chip_data(at);
306 240 if (cfg)
307 old_cfg = old_desc->chip_data; 241 return cfg;
308 242 }
309 cfg->vector = old_cfg->vector;
310 cfg->move_in_progress = old_cfg->move_in_progress;
311 cpumask_copy(cfg->domain, old_cfg->domain);
312 cpumask_copy(cfg->old_domain, old_cfg->old_domain);
313
314 init_copy_irq_2_pin(old_cfg, cfg, node);
315}
316 243
317static void free_irq_cfg(struct irq_cfg *cfg) 244 cfg = alloc_irq_cfg(at, node);
318{ 245 if (cfg)
319 free_cpumask_var(cfg->domain); 246 set_irq_chip_data(at, cfg);
320 free_cpumask_var(cfg->old_domain); 247 else
321 kfree(cfg); 248 irq_free_desc(at);
249 return cfg;
322} 250}
323 251
324void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) 252static int alloc_irq_from(unsigned int from, int node)
325{ 253{
326 struct irq_cfg *old_cfg, *cfg; 254 return irq_alloc_desc_from(from, node);
327
328 old_cfg = old_desc->chip_data;
329 cfg = desc->chip_data;
330
331 if (old_cfg == cfg)
332 return;
333
334 if (old_cfg) {
335 free_irq_2_pin(old_cfg, cfg);
336 free_irq_cfg(old_cfg);
337 old_desc->chip_data = NULL;
338 }
339} 255}
340/* end for move_irq_desc */
341 256
342#else 257static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
343struct irq_cfg *irq_cfg(unsigned int irq)
344{ 258{
345 return irq < nr_irqs ? irq_cfgx + irq : NULL; 259 free_irq_cfg(at, cfg);
260 irq_free_desc(at);
346} 261}
347 262
348#endif
349
350struct io_apic { 263struct io_apic {
351 unsigned int index; 264 unsigned int index;
352 unsigned int unused[3]; 265 unsigned int unused[3];
@@ -451,7 +364,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
451 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 364 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
452} 365}
453 366
454void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 367static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
455{ 368{
456 unsigned long flags; 369 unsigned long flags;
457 raw_spin_lock_irqsave(&ioapic_lock, flags); 370 raw_spin_lock_irqsave(&ioapic_lock, flags);
@@ -481,7 +394,7 @@ static void ioapic_mask_entry(int apic, int pin)
481 * fast in the common case, and fast for shared ISA-space IRQs. 394 * fast in the common case, and fast for shared ISA-space IRQs.
482 */ 395 */
483static int 396static int
484add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) 397__add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
485{ 398{
486 struct irq_pin_list **last, *entry; 399 struct irq_pin_list **last, *entry;
487 400
@@ -493,7 +406,7 @@ add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin)
493 last = &entry->next; 406 last = &entry->next;
494 } 407 }
495 408
496 entry = get_one_free_irq_2_pin(node); 409 entry = alloc_irq_pin_list(node);
497 if (!entry) { 410 if (!entry) {
498 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", 411 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n",
499 node, apic, pin); 412 node, apic, pin);
@@ -508,7 +421,7 @@ add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin)
508 421
509static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 422static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
510{ 423{
511 if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin)) 424 if (__add_pin_to_irq_node(cfg, node, apic, pin))
512 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 425 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
513} 426}
514 427
@@ -571,11 +484,6 @@ static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry)
571 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 484 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
572} 485}
573 486
574static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
575{
576 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
577}
578
579static void io_apic_sync(struct irq_pin_list *entry) 487static void io_apic_sync(struct irq_pin_list *entry)
580{ 488{
581 /* 489 /*
@@ -587,44 +495,37 @@ static void io_apic_sync(struct irq_pin_list *entry)
587 readl(&io_apic->data); 495 readl(&io_apic->data);
588} 496}
589 497
590static void __mask_IO_APIC_irq(struct irq_cfg *cfg) 498static void mask_ioapic(struct irq_cfg *cfg)
591{ 499{
500 unsigned long flags;
501
502 raw_spin_lock_irqsave(&ioapic_lock, flags);
592 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 503 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
504 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
593} 505}
594 506
595static void mask_IO_APIC_irq_desc(struct irq_desc *desc) 507static void mask_ioapic_irq(struct irq_data *data)
596{ 508{
597 struct irq_cfg *cfg = desc->chip_data; 509 mask_ioapic(data->chip_data);
598 unsigned long flags; 510}
599
600 BUG_ON(!cfg);
601 511
602 raw_spin_lock_irqsave(&ioapic_lock, flags); 512static void __unmask_ioapic(struct irq_cfg *cfg)
603 __mask_IO_APIC_irq(cfg); 513{
604 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 514 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
605} 515}
606 516
607static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) 517static void unmask_ioapic(struct irq_cfg *cfg)
608{ 518{
609 struct irq_cfg *cfg = desc->chip_data;
610 unsigned long flags; 519 unsigned long flags;
611 520
612 raw_spin_lock_irqsave(&ioapic_lock, flags); 521 raw_spin_lock_irqsave(&ioapic_lock, flags);
613 __unmask_IO_APIC_irq(cfg); 522 __unmask_ioapic(cfg);
614 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 523 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
615} 524}
616 525
617static void mask_IO_APIC_irq(unsigned int irq) 526static void unmask_ioapic_irq(struct irq_data *data)
618{ 527{
619 struct irq_desc *desc = irq_to_desc(irq); 528 unmask_ioapic(data->chip_data);
620
621 mask_IO_APIC_irq_desc(desc);
622}
623static void unmask_IO_APIC_irq(unsigned int irq)
624{
625 struct irq_desc *desc = irq_to_desc(irq);
626
627 unmask_IO_APIC_irq_desc(desc);
628} 529}
629 530
630static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 531static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
@@ -694,14 +595,14 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
694 struct IO_APIC_route_entry **ioapic_entries; 595 struct IO_APIC_route_entry **ioapic_entries;
695 596
696 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, 597 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
697 GFP_ATOMIC); 598 GFP_KERNEL);
698 if (!ioapic_entries) 599 if (!ioapic_entries)
699 return 0; 600 return 0;
700 601
701 for (apic = 0; apic < nr_ioapics; apic++) { 602 for (apic = 0; apic < nr_ioapics; apic++) {
702 ioapic_entries[apic] = 603 ioapic_entries[apic] =
703 kzalloc(sizeof(struct IO_APIC_route_entry) * 604 kzalloc(sizeof(struct IO_APIC_route_entry) *
704 nr_ioapic_registers[apic], GFP_ATOMIC); 605 nr_ioapic_registers[apic], GFP_KERNEL);
705 if (!ioapic_entries[apic]) 606 if (!ioapic_entries[apic])
706 goto nomem; 607 goto nomem;
707 } 608 }
@@ -1259,7 +1160,6 @@ void __setup_vector_irq(int cpu)
1259 /* Initialize vector_irq on a new cpu */ 1160 /* Initialize vector_irq on a new cpu */
1260 int irq, vector; 1161 int irq, vector;
1261 struct irq_cfg *cfg; 1162 struct irq_cfg *cfg;
1262 struct irq_desc *desc;
1263 1163
1264 /* 1164 /*
1265 * vector_lock will make sure that we don't run into irq vector 1165 * vector_lock will make sure that we don't run into irq vector
@@ -1268,9 +1168,10 @@ void __setup_vector_irq(int cpu)
1268 */ 1168 */
1269 raw_spin_lock(&vector_lock); 1169 raw_spin_lock(&vector_lock);
1270 /* Mark the inuse vectors */ 1170 /* Mark the inuse vectors */
1271 for_each_irq_desc(irq, desc) { 1171 for_each_active_irq(irq) {
1272 cfg = desc->chip_data; 1172 cfg = get_irq_chip_data(irq);
1273 1173 if (!cfg)
1174 continue;
1274 /* 1175 /*
1275 * If it is a legacy IRQ handled by the legacy PIC, this cpu 1176 * If it is a legacy IRQ handled by the legacy PIC, this cpu
1276 * will be part of the irq_cfg's domain. 1177 * will be part of the irq_cfg's domain.
@@ -1327,17 +1228,17 @@ static inline int IO_APIC_irq_trigger(int irq)
1327} 1228}
1328#endif 1229#endif
1329 1230
1330static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger) 1231static void ioapic_register_intr(unsigned int irq, unsigned long trigger)
1331{ 1232{
1332 1233
1333 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1234 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1334 trigger == IOAPIC_LEVEL) 1235 trigger == IOAPIC_LEVEL)
1335 desc->status |= IRQ_LEVEL; 1236 irq_set_status_flags(irq, IRQ_LEVEL);
1336 else 1237 else
1337 desc->status &= ~IRQ_LEVEL; 1238 irq_clear_status_flags(irq, IRQ_LEVEL);
1338 1239
1339 if (irq_remapped(irq)) { 1240 if (irq_remapped(get_irq_chip_data(irq))) {
1340 desc->status |= IRQ_MOVE_PCNTXT; 1241 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
1341 if (trigger) 1242 if (trigger)
1342 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, 1243 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1343 handle_fasteoi_irq, 1244 handle_fasteoi_irq,
@@ -1358,10 +1259,10 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t
1358 handle_edge_irq, "edge"); 1259 handle_edge_irq, "edge");
1359} 1260}
1360 1261
1361int setup_ioapic_entry(int apic_id, int irq, 1262static int setup_ioapic_entry(int apic_id, int irq,
1362 struct IO_APIC_route_entry *entry, 1263 struct IO_APIC_route_entry *entry,
1363 unsigned int destination, int trigger, 1264 unsigned int destination, int trigger,
1364 int polarity, int vector, int pin) 1265 int polarity, int vector, int pin)
1365{ 1266{
1366 /* 1267 /*
1367 * add it to the IO-APIC irq-routing table: 1268 * add it to the IO-APIC irq-routing table:
@@ -1417,18 +1318,14 @@ int setup_ioapic_entry(int apic_id, int irq,
1417 return 0; 1318 return 0;
1418} 1319}
1419 1320
1420static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc, 1321static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq,
1421 int trigger, int polarity) 1322 struct irq_cfg *cfg, int trigger, int polarity)
1422{ 1323{
1423 struct irq_cfg *cfg;
1424 struct IO_APIC_route_entry entry; 1324 struct IO_APIC_route_entry entry;
1425 unsigned int dest; 1325 unsigned int dest;
1426 1326
1427 if (!IO_APIC_IRQ(irq)) 1327 if (!IO_APIC_IRQ(irq))
1428 return; 1328 return;
1429
1430 cfg = desc->chip_data;
1431
1432 /* 1329 /*
1433 * For legacy irqs, cfg->domain starts with cpu 0 for legacy 1330 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
1434 * controllers like 8259. Now that IO-APIC can handle this irq, update 1331 * controllers like 8259. Now that IO-APIC can handle this irq, update
@@ -1457,9 +1354,9 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
1457 return; 1354 return;
1458 } 1355 }
1459 1356
1460 ioapic_register_intr(irq, desc, trigger); 1357 ioapic_register_intr(irq, trigger);
1461 if (irq < legacy_pic->nr_legacy_irqs) 1358 if (irq < legacy_pic->nr_legacy_irqs)
1462 legacy_pic->chip->mask(irq); 1359 legacy_pic->mask(irq);
1463 1360
1464 ioapic_write_entry(apic_id, pin, entry); 1361 ioapic_write_entry(apic_id, pin, entry);
1465} 1362}
@@ -1470,11 +1367,9 @@ static struct {
1470 1367
1471static void __init setup_IO_APIC_irqs(void) 1368static void __init setup_IO_APIC_irqs(void)
1472{ 1369{
1473 int apic_id, pin, idx, irq; 1370 int apic_id, pin, idx, irq, notcon = 0;
1474 int notcon = 0;
1475 struct irq_desc *desc;
1476 struct irq_cfg *cfg;
1477 int node = cpu_to_node(0); 1371 int node = cpu_to_node(0);
1372 struct irq_cfg *cfg;
1478 1373
1479 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1374 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1480 1375
@@ -1511,19 +1406,17 @@ static void __init setup_IO_APIC_irqs(void)
1511 apic->multi_timer_check(apic_id, irq)) 1406 apic->multi_timer_check(apic_id, irq))
1512 continue; 1407 continue;
1513 1408
1514 desc = irq_to_desc_alloc_node(irq, node); 1409 cfg = alloc_irq_and_cfg_at(irq, node);
1515 if (!desc) { 1410 if (!cfg)
1516 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1517 continue; 1411 continue;
1518 } 1412
1519 cfg = desc->chip_data;
1520 add_pin_to_irq_node(cfg, node, apic_id, pin); 1413 add_pin_to_irq_node(cfg, node, apic_id, pin);
1521 /* 1414 /*
1522 * don't mark it in pin_programmed, so later acpi could 1415 * don't mark it in pin_programmed, so later acpi could
1523 * set it correctly when irq < 16 1416 * set it correctly when irq < 16
1524 */ 1417 */
1525 setup_IO_APIC_irq(apic_id, pin, irq, desc, 1418 setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx),
1526 irq_trigger(idx), irq_polarity(idx)); 1419 irq_polarity(idx));
1527 } 1420 }
1528 1421
1529 if (notcon) 1422 if (notcon)
@@ -1538,9 +1431,7 @@ static void __init setup_IO_APIC_irqs(void)
1538 */ 1431 */
1539void setup_IO_APIC_irq_extra(u32 gsi) 1432void setup_IO_APIC_irq_extra(u32 gsi)
1540{ 1433{
1541 int apic_id = 0, pin, idx, irq; 1434 int apic_id = 0, pin, idx, irq, node = cpu_to_node(0);
1542 int node = cpu_to_node(0);
1543 struct irq_desc *desc;
1544 struct irq_cfg *cfg; 1435 struct irq_cfg *cfg;
1545 1436
1546 /* 1437 /*
@@ -1556,18 +1447,15 @@ void setup_IO_APIC_irq_extra(u32 gsi)
1556 return; 1447 return;
1557 1448
1558 irq = pin_2_irq(idx, apic_id, pin); 1449 irq = pin_2_irq(idx, apic_id, pin);
1559#ifdef CONFIG_SPARSE_IRQ 1450
1560 desc = irq_to_desc(irq); 1451 /* Only handle the non legacy irqs on secondary ioapics */
1561 if (desc) 1452 if (apic_id == 0 || irq < NR_IRQS_LEGACY)
1562 return; 1453 return;
1563#endif 1454
1564 desc = irq_to_desc_alloc_node(irq, node); 1455 cfg = alloc_irq_and_cfg_at(irq, node);
1565 if (!desc) { 1456 if (!cfg)
1566 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1567 return; 1457 return;
1568 }
1569 1458
1570 cfg = desc->chip_data;
1571 add_pin_to_irq_node(cfg, node, apic_id, pin); 1459 add_pin_to_irq_node(cfg, node, apic_id, pin);
1572 1460
1573 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { 1461 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
@@ -1577,7 +1465,7 @@ void setup_IO_APIC_irq_extra(u32 gsi)
1577 } 1465 }
1578 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); 1466 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
1579 1467
1580 setup_IO_APIC_irq(apic_id, pin, irq, desc, 1468 setup_ioapic_irq(apic_id, pin, irq, cfg,
1581 irq_trigger(idx), irq_polarity(idx)); 1469 irq_trigger(idx), irq_polarity(idx));
1582} 1470}
1583 1471
@@ -1628,7 +1516,6 @@ __apicdebuginit(void) print_IO_APIC(void)
1628 union IO_APIC_reg_03 reg_03; 1516 union IO_APIC_reg_03 reg_03;
1629 unsigned long flags; 1517 unsigned long flags;
1630 struct irq_cfg *cfg; 1518 struct irq_cfg *cfg;
1631 struct irq_desc *desc;
1632 unsigned int irq; 1519 unsigned int irq;
1633 1520
1634 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1521 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
@@ -1715,10 +1602,10 @@ __apicdebuginit(void) print_IO_APIC(void)
1715 } 1602 }
1716 } 1603 }
1717 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1604 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1718 for_each_irq_desc(irq, desc) { 1605 for_each_active_irq(irq) {
1719 struct irq_pin_list *entry; 1606 struct irq_pin_list *entry;
1720 1607
1721 cfg = desc->chip_data; 1608 cfg = get_irq_chip_data(irq);
1722 if (!cfg) 1609 if (!cfg)
1723 continue; 1610 continue;
1724 entry = cfg->irq_2_pin; 1611 entry = cfg->irq_2_pin;
@@ -2225,29 +2112,26 @@ static int __init timer_irq_works(void)
2225 * an edge even if it isn't on the 8259A... 2112 * an edge even if it isn't on the 8259A...
2226 */ 2113 */
2227 2114
2228static unsigned int startup_ioapic_irq(unsigned int irq) 2115static unsigned int startup_ioapic_irq(struct irq_data *data)
2229{ 2116{
2230 int was_pending = 0; 2117 int was_pending = 0, irq = data->irq;
2231 unsigned long flags; 2118 unsigned long flags;
2232 struct irq_cfg *cfg;
2233 2119
2234 raw_spin_lock_irqsave(&ioapic_lock, flags); 2120 raw_spin_lock_irqsave(&ioapic_lock, flags);
2235 if (irq < legacy_pic->nr_legacy_irqs) { 2121 if (irq < legacy_pic->nr_legacy_irqs) {
2236 legacy_pic->chip->mask(irq); 2122 legacy_pic->mask(irq);
2237 if (legacy_pic->irq_pending(irq)) 2123 if (legacy_pic->irq_pending(irq))
2238 was_pending = 1; 2124 was_pending = 1;
2239 } 2125 }
2240 cfg = irq_cfg(irq); 2126 __unmask_ioapic(data->chip_data);
2241 __unmask_IO_APIC_irq(cfg);
2242 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2127 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2243 2128
2244 return was_pending; 2129 return was_pending;
2245} 2130}
2246 2131
2247static int ioapic_retrigger_irq(unsigned int irq) 2132static int ioapic_retrigger_irq(struct irq_data *data)
2248{ 2133{
2249 2134 struct irq_cfg *cfg = data->chip_data;
2250 struct irq_cfg *cfg = irq_cfg(irq);
2251 unsigned long flags; 2135 unsigned long flags;
2252 2136
2253 raw_spin_lock_irqsave(&vector_lock, flags); 2137 raw_spin_lock_irqsave(&vector_lock, flags);
@@ -2298,7 +2182,7 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
2298 * With interrupt-remapping, destination information comes 2182 * With interrupt-remapping, destination information comes
2299 * from interrupt-remapping table entry. 2183 * from interrupt-remapping table entry.
2300 */ 2184 */
2301 if (!irq_remapped(irq)) 2185 if (!irq_remapped(cfg))
2302 io_apic_write(apic, 0x11 + pin*2, dest); 2186 io_apic_write(apic, 0x11 + pin*2, dest);
2303 reg = io_apic_read(apic, 0x10 + pin*2); 2187 reg = io_apic_read(apic, 0x10 + pin*2);
2304 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2188 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
@@ -2308,65 +2192,46 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
2308} 2192}
2309 2193
2310/* 2194/*
2311 * Either sets desc->affinity to a valid value, and returns 2195 * Either sets data->affinity to a valid value, and returns
2312 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2196 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2313 * leaves desc->affinity untouched. 2197 * leaves data->affinity untouched.
2314 */ 2198 */
2315unsigned int 2199int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2316set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask, 2200 unsigned int *dest_id)
2317 unsigned int *dest_id)
2318{ 2201{
2319 struct irq_cfg *cfg; 2202 struct irq_cfg *cfg = data->chip_data;
2320 unsigned int irq;
2321 2203
2322 if (!cpumask_intersects(mask, cpu_online_mask)) 2204 if (!cpumask_intersects(mask, cpu_online_mask))
2323 return -1; 2205 return -1;
2324 2206
2325 irq = desc->irq; 2207 if (assign_irq_vector(data->irq, data->chip_data, mask))
2326 cfg = desc->chip_data;
2327 if (assign_irq_vector(irq, cfg, mask))
2328 return -1; 2208 return -1;
2329 2209
2330 cpumask_copy(desc->affinity, mask); 2210 cpumask_copy(data->affinity, mask);
2331 2211
2332 *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); 2212 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain);
2333 return 0; 2213 return 0;
2334} 2214}
2335 2215
2336static int 2216static int
2337set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) 2217ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2218 bool force)
2338{ 2219{
2339 struct irq_cfg *cfg; 2220 unsigned int dest, irq = data->irq;
2340 unsigned long flags; 2221 unsigned long flags;
2341 unsigned int dest; 2222 int ret;
2342 unsigned int irq;
2343 int ret = -1;
2344
2345 irq = desc->irq;
2346 cfg = desc->chip_data;
2347 2223
2348 raw_spin_lock_irqsave(&ioapic_lock, flags); 2224 raw_spin_lock_irqsave(&ioapic_lock, flags);
2349 ret = set_desc_affinity(desc, mask, &dest); 2225 ret = __ioapic_set_affinity(data, mask, &dest);
2350 if (!ret) { 2226 if (!ret) {
2351 /* Only the high 8 bits are valid. */ 2227 /* Only the high 8 bits are valid. */
2352 dest = SET_APIC_LOGICAL_ID(dest); 2228 dest = SET_APIC_LOGICAL_ID(dest);
2353 __target_IO_APIC_irq(irq, dest, cfg); 2229 __target_IO_APIC_irq(irq, dest, data->chip_data);
2354 } 2230 }
2355 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2231 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2356
2357 return ret; 2232 return ret;
2358} 2233}
2359 2234
2360static int
2361set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
2362{
2363 struct irq_desc *desc;
2364
2365 desc = irq_to_desc(irq);
2366
2367 return set_ioapic_affinity_irq_desc(desc, mask);
2368}
2369
2370#ifdef CONFIG_INTR_REMAP 2235#ifdef CONFIG_INTR_REMAP
2371 2236
2372/* 2237/*
@@ -2381,24 +2246,21 @@ set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
2381 * the interrupt-remapping table entry. 2246 * the interrupt-remapping table entry.
2382 */ 2247 */
2383static int 2248static int
2384migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) 2249ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2250 bool force)
2385{ 2251{
2386 struct irq_cfg *cfg; 2252 struct irq_cfg *cfg = data->chip_data;
2253 unsigned int dest, irq = data->irq;
2387 struct irte irte; 2254 struct irte irte;
2388 unsigned int dest;
2389 unsigned int irq;
2390 int ret = -1;
2391 2255
2392 if (!cpumask_intersects(mask, cpu_online_mask)) 2256 if (!cpumask_intersects(mask, cpu_online_mask))
2393 return ret; 2257 return -EINVAL;
2394 2258
2395 irq = desc->irq;
2396 if (get_irte(irq, &irte)) 2259 if (get_irte(irq, &irte))
2397 return ret; 2260 return -EBUSY;
2398 2261
2399 cfg = desc->chip_data;
2400 if (assign_irq_vector(irq, cfg, mask)) 2262 if (assign_irq_vector(irq, cfg, mask))
2401 return ret; 2263 return -EBUSY;
2402 2264
2403 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 2265 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2404 2266
@@ -2413,29 +2275,14 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2413 if (cfg->move_in_progress) 2275 if (cfg->move_in_progress)
2414 send_cleanup_vector(cfg); 2276 send_cleanup_vector(cfg);
2415 2277
2416 cpumask_copy(desc->affinity, mask); 2278 cpumask_copy(data->affinity, mask);
2417
2418 return 0; 2279 return 0;
2419} 2280}
2420 2281
2421/*
2422 * Migrates the IRQ destination in the process context.
2423 */
2424static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2425 const struct cpumask *mask)
2426{
2427 return migrate_ioapic_irq_desc(desc, mask);
2428}
2429static int set_ir_ioapic_affinity_irq(unsigned int irq,
2430 const struct cpumask *mask)
2431{
2432 struct irq_desc *desc = irq_to_desc(irq);
2433
2434 return set_ir_ioapic_affinity_irq_desc(desc, mask);
2435}
2436#else 2282#else
2437static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, 2283static inline int
2438 const struct cpumask *mask) 2284ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2285 bool force)
2439{ 2286{
2440 return 0; 2287 return 0;
2441} 2288}
@@ -2497,10 +2344,8 @@ unlock:
2497 irq_exit(); 2344 irq_exit();
2498} 2345}
2499 2346
2500static void __irq_complete_move(struct irq_desc **descp, unsigned vector) 2347static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
2501{ 2348{
2502 struct irq_desc *desc = *descp;
2503 struct irq_cfg *cfg = desc->chip_data;
2504 unsigned me; 2349 unsigned me;
2505 2350
2506 if (likely(!cfg->move_in_progress)) 2351 if (likely(!cfg->move_in_progress))
@@ -2512,31 +2357,28 @@ static void __irq_complete_move(struct irq_desc **descp, unsigned vector)
2512 send_cleanup_vector(cfg); 2357 send_cleanup_vector(cfg);
2513} 2358}
2514 2359
2515static void irq_complete_move(struct irq_desc **descp) 2360static void irq_complete_move(struct irq_cfg *cfg)
2516{ 2361{
2517 __irq_complete_move(descp, ~get_irq_regs()->orig_ax); 2362 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
2518} 2363}
2519 2364
2520void irq_force_complete_move(int irq) 2365void irq_force_complete_move(int irq)
2521{ 2366{
2522 struct irq_desc *desc = irq_to_desc(irq); 2367 struct irq_cfg *cfg = get_irq_chip_data(irq);
2523 struct irq_cfg *cfg = desc->chip_data;
2524 2368
2525 if (!cfg) 2369 if (!cfg)
2526 return; 2370 return;
2527 2371
2528 __irq_complete_move(&desc, cfg->vector); 2372 __irq_complete_move(cfg, cfg->vector);
2529} 2373}
2530#else 2374#else
2531static inline void irq_complete_move(struct irq_desc **descp) {} 2375static inline void irq_complete_move(struct irq_cfg *cfg) { }
2532#endif 2376#endif
2533 2377
2534static void ack_apic_edge(unsigned int irq) 2378static void ack_apic_edge(struct irq_data *data)
2535{ 2379{
2536 struct irq_desc *desc = irq_to_desc(irq); 2380 irq_complete_move(data->chip_data);
2537 2381 move_native_irq(data->irq);
2538 irq_complete_move(&desc);
2539 move_native_irq(irq);
2540 ack_APIC_irq(); 2382 ack_APIC_irq();
2541} 2383}
2542 2384
@@ -2558,10 +2400,12 @@ atomic_t irq_mis_count;
2558 * Otherwise, we simulate the EOI message manually by changing the trigger 2400 * Otherwise, we simulate the EOI message manually by changing the trigger
2559 * mode to edge and then back to level, with RTE being masked during this. 2401 * mode to edge and then back to level, with RTE being masked during this.
2560*/ 2402*/
2561static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2403static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2562{ 2404{
2563 struct irq_pin_list *entry; 2405 struct irq_pin_list *entry;
2406 unsigned long flags;
2564 2407
2408 raw_spin_lock_irqsave(&ioapic_lock, flags);
2565 for_each_irq_pin(entry, cfg->irq_2_pin) { 2409 for_each_irq_pin(entry, cfg->irq_2_pin) {
2566 if (mp_ioapics[entry->apic].apicver >= 0x20) { 2410 if (mp_ioapics[entry->apic].apicver >= 0x20) {
2567 /* 2411 /*
@@ -2570,7 +2414,7 @@ static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2570 * intr-remapping table entry. Hence for the io-apic 2414 * intr-remapping table entry. Hence for the io-apic
2571 * EOI we use the pin number. 2415 * EOI we use the pin number.
2572 */ 2416 */
2573 if (irq_remapped(irq)) 2417 if (irq_remapped(cfg))
2574 io_apic_eoi(entry->apic, entry->pin); 2418 io_apic_eoi(entry->apic, entry->pin);
2575 else 2419 else
2576 io_apic_eoi(entry->apic, cfg->vector); 2420 io_apic_eoi(entry->apic, cfg->vector);
@@ -2579,36 +2423,22 @@ static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2579 __unmask_and_level_IO_APIC_irq(entry); 2423 __unmask_and_level_IO_APIC_irq(entry);
2580 } 2424 }
2581 } 2425 }
2582}
2583
2584static void eoi_ioapic_irq(struct irq_desc *desc)
2585{
2586 struct irq_cfg *cfg;
2587 unsigned long flags;
2588 unsigned int irq;
2589
2590 irq = desc->irq;
2591 cfg = desc->chip_data;
2592
2593 raw_spin_lock_irqsave(&ioapic_lock, flags);
2594 __eoi_ioapic_irq(irq, cfg);
2595 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2426 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2596} 2427}
2597 2428
2598static void ack_apic_level(unsigned int irq) 2429static void ack_apic_level(struct irq_data *data)
2599{ 2430{
2431 struct irq_cfg *cfg = data->chip_data;
2432 int i, do_unmask_irq = 0, irq = data->irq;
2600 struct irq_desc *desc = irq_to_desc(irq); 2433 struct irq_desc *desc = irq_to_desc(irq);
2601 unsigned long v; 2434 unsigned long v;
2602 int i;
2603 struct irq_cfg *cfg;
2604 int do_unmask_irq = 0;
2605 2435
2606 irq_complete_move(&desc); 2436 irq_complete_move(cfg);
2607#ifdef CONFIG_GENERIC_PENDING_IRQ 2437#ifdef CONFIG_GENERIC_PENDING_IRQ
2608 /* If we are moving the irq we need to mask it */ 2438 /* If we are moving the irq we need to mask it */
2609 if (unlikely(desc->status & IRQ_MOVE_PENDING)) { 2439 if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2610 do_unmask_irq = 1; 2440 do_unmask_irq = 1;
2611 mask_IO_APIC_irq_desc(desc); 2441 mask_ioapic(cfg);
2612 } 2442 }
2613#endif 2443#endif
2614 2444
@@ -2644,7 +2474,6 @@ static void ack_apic_level(unsigned int irq)
2644 * we use the above logic (mask+edge followed by unmask+level) from 2474 * we use the above logic (mask+edge followed by unmask+level) from
2645 * Manfred Spraul to clear the remote IRR. 2475 * Manfred Spraul to clear the remote IRR.
2646 */ 2476 */
2647 cfg = desc->chip_data;
2648 i = cfg->vector; 2477 i = cfg->vector;
2649 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2478 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2650 2479
@@ -2664,7 +2493,7 @@ static void ack_apic_level(unsigned int irq)
2664 if (!(v & (1 << (i & 0x1f)))) { 2493 if (!(v & (1 << (i & 0x1f)))) {
2665 atomic_inc(&irq_mis_count); 2494 atomic_inc(&irq_mis_count);
2666 2495
2667 eoi_ioapic_irq(desc); 2496 eoi_ioapic_irq(irq, cfg);
2668 } 2497 }
2669 2498
2670 /* Now we can move and renable the irq */ 2499 /* Now we can move and renable the irq */
@@ -2695,61 +2524,57 @@ static void ack_apic_level(unsigned int irq)
2695 * accurate and is causing problems then it is a hardware bug 2524 * accurate and is causing problems then it is a hardware bug
2696 * and you can go talk to the chipset vendor about it. 2525 * and you can go talk to the chipset vendor about it.
2697 */ 2526 */
2698 cfg = desc->chip_data;
2699 if (!io_apic_level_ack_pending(cfg)) 2527 if (!io_apic_level_ack_pending(cfg))
2700 move_masked_irq(irq); 2528 move_masked_irq(irq);
2701 unmask_IO_APIC_irq_desc(desc); 2529 unmask_ioapic(cfg);
2702 } 2530 }
2703} 2531}
2704 2532
2705#ifdef CONFIG_INTR_REMAP 2533#ifdef CONFIG_INTR_REMAP
2706static void ir_ack_apic_edge(unsigned int irq) 2534static void ir_ack_apic_edge(struct irq_data *data)
2707{ 2535{
2708 ack_APIC_irq(); 2536 ack_APIC_irq();
2709} 2537}
2710 2538
2711static void ir_ack_apic_level(unsigned int irq) 2539static void ir_ack_apic_level(struct irq_data *data)
2712{ 2540{
2713 struct irq_desc *desc = irq_to_desc(irq);
2714
2715 ack_APIC_irq(); 2541 ack_APIC_irq();
2716 eoi_ioapic_irq(desc); 2542 eoi_ioapic_irq(data->irq, data->chip_data);
2717} 2543}
2718#endif /* CONFIG_INTR_REMAP */ 2544#endif /* CONFIG_INTR_REMAP */
2719 2545
2720static struct irq_chip ioapic_chip __read_mostly = { 2546static struct irq_chip ioapic_chip __read_mostly = {
2721 .name = "IO-APIC", 2547 .name = "IO-APIC",
2722 .startup = startup_ioapic_irq, 2548 .irq_startup = startup_ioapic_irq,
2723 .mask = mask_IO_APIC_irq, 2549 .irq_mask = mask_ioapic_irq,
2724 .unmask = unmask_IO_APIC_irq, 2550 .irq_unmask = unmask_ioapic_irq,
2725 .ack = ack_apic_edge, 2551 .irq_ack = ack_apic_edge,
2726 .eoi = ack_apic_level, 2552 .irq_eoi = ack_apic_level,
2727#ifdef CONFIG_SMP 2553#ifdef CONFIG_SMP
2728 .set_affinity = set_ioapic_affinity_irq, 2554 .irq_set_affinity = ioapic_set_affinity,
2729#endif 2555#endif
2730 .retrigger = ioapic_retrigger_irq, 2556 .irq_retrigger = ioapic_retrigger_irq,
2731}; 2557};
2732 2558
2733static struct irq_chip ir_ioapic_chip __read_mostly = { 2559static struct irq_chip ir_ioapic_chip __read_mostly = {
2734 .name = "IR-IO-APIC", 2560 .name = "IR-IO-APIC",
2735 .startup = startup_ioapic_irq, 2561 .irq_startup = startup_ioapic_irq,
2736 .mask = mask_IO_APIC_irq, 2562 .irq_mask = mask_ioapic_irq,
2737 .unmask = unmask_IO_APIC_irq, 2563 .irq_unmask = unmask_ioapic_irq,
2738#ifdef CONFIG_INTR_REMAP 2564#ifdef CONFIG_INTR_REMAP
2739 .ack = ir_ack_apic_edge, 2565 .irq_ack = ir_ack_apic_edge,
2740 .eoi = ir_ack_apic_level, 2566 .irq_eoi = ir_ack_apic_level,
2741#ifdef CONFIG_SMP 2567#ifdef CONFIG_SMP
2742 .set_affinity = set_ir_ioapic_affinity_irq, 2568 .irq_set_affinity = ir_ioapic_set_affinity,
2743#endif 2569#endif
2744#endif 2570#endif
2745 .retrigger = ioapic_retrigger_irq, 2571 .irq_retrigger = ioapic_retrigger_irq,
2746}; 2572};
2747 2573
2748static inline void init_IO_APIC_traps(void) 2574static inline void init_IO_APIC_traps(void)
2749{ 2575{
2750 int irq;
2751 struct irq_desc *desc;
2752 struct irq_cfg *cfg; 2576 struct irq_cfg *cfg;
2577 unsigned int irq;
2753 2578
2754 /* 2579 /*
2755 * NOTE! The local APIC isn't very good at handling 2580 * NOTE! The local APIC isn't very good at handling
@@ -2762,8 +2587,8 @@ static inline void init_IO_APIC_traps(void)
2762 * Also, we've got to be careful not to trash gate 2587 * Also, we've got to be careful not to trash gate
2763 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2588 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2764 */ 2589 */
2765 for_each_irq_desc(irq, desc) { 2590 for_each_active_irq(irq) {
2766 cfg = desc->chip_data; 2591 cfg = get_irq_chip_data(irq);
2767 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2592 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2768 /* 2593 /*
2769 * Hmm.. We don't have an entry for this, 2594 * Hmm.. We don't have an entry for this,
@@ -2774,7 +2599,7 @@ static inline void init_IO_APIC_traps(void)
2774 legacy_pic->make_irq(irq); 2599 legacy_pic->make_irq(irq);
2775 else 2600 else
2776 /* Strange. Oh, well.. */ 2601 /* Strange. Oh, well.. */
2777 desc->chip = &no_irq_chip; 2602 set_irq_chip(irq, &no_irq_chip);
2778 } 2603 }
2779 } 2604 }
2780} 2605}
@@ -2783,7 +2608,7 @@ static inline void init_IO_APIC_traps(void)
2783 * The local APIC irq-chip implementation: 2608 * The local APIC irq-chip implementation:
2784 */ 2609 */
2785 2610
2786static void mask_lapic_irq(unsigned int irq) 2611static void mask_lapic_irq(struct irq_data *data)
2787{ 2612{
2788 unsigned long v; 2613 unsigned long v;
2789 2614
@@ -2791,7 +2616,7 @@ static void mask_lapic_irq(unsigned int irq)
2791 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2616 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2792} 2617}
2793 2618
2794static void unmask_lapic_irq(unsigned int irq) 2619static void unmask_lapic_irq(struct irq_data *data)
2795{ 2620{
2796 unsigned long v; 2621 unsigned long v;
2797 2622
@@ -2799,21 +2624,21 @@ static void unmask_lapic_irq(unsigned int irq)
2799 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2624 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2800} 2625}
2801 2626
2802static void ack_lapic_irq(unsigned int irq) 2627static void ack_lapic_irq(struct irq_data *data)
2803{ 2628{
2804 ack_APIC_irq(); 2629 ack_APIC_irq();
2805} 2630}
2806 2631
2807static struct irq_chip lapic_chip __read_mostly = { 2632static struct irq_chip lapic_chip __read_mostly = {
2808 .name = "local-APIC", 2633 .name = "local-APIC",
2809 .mask = mask_lapic_irq, 2634 .irq_mask = mask_lapic_irq,
2810 .unmask = unmask_lapic_irq, 2635 .irq_unmask = unmask_lapic_irq,
2811 .ack = ack_lapic_irq, 2636 .irq_ack = ack_lapic_irq,
2812}; 2637};
2813 2638
2814static void lapic_register_intr(int irq, struct irq_desc *desc) 2639static void lapic_register_intr(int irq)
2815{ 2640{
2816 desc->status &= ~IRQ_LEVEL; 2641 irq_clear_status_flags(irq, IRQ_LEVEL);
2817 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2642 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2818 "edge"); 2643 "edge");
2819} 2644}
@@ -2916,8 +2741,7 @@ int timer_through_8259 __initdata;
2916 */ 2741 */
2917static inline void __init check_timer(void) 2742static inline void __init check_timer(void)
2918{ 2743{
2919 struct irq_desc *desc = irq_to_desc(0); 2744 struct irq_cfg *cfg = get_irq_chip_data(0);
2920 struct irq_cfg *cfg = desc->chip_data;
2921 int node = cpu_to_node(0); 2745 int node = cpu_to_node(0);
2922 int apic1, pin1, apic2, pin2; 2746 int apic1, pin1, apic2, pin2;
2923 unsigned long flags; 2747 unsigned long flags;
@@ -2928,7 +2752,7 @@ static inline void __init check_timer(void)
2928 /* 2752 /*
2929 * get/set the timer IRQ vector: 2753 * get/set the timer IRQ vector:
2930 */ 2754 */
2931 legacy_pic->chip->mask(0); 2755 legacy_pic->mask(0);
2932 assign_irq_vector(0, cfg, apic->target_cpus()); 2756 assign_irq_vector(0, cfg, apic->target_cpus());
2933 2757
2934 /* 2758 /*
@@ -2987,7 +2811,7 @@ static inline void __init check_timer(void)
2987 add_pin_to_irq_node(cfg, node, apic1, pin1); 2811 add_pin_to_irq_node(cfg, node, apic1, pin1);
2988 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2812 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2989 } else { 2813 } else {
2990 /* for edge trigger, setup_IO_APIC_irq already 2814 /* for edge trigger, setup_ioapic_irq already
2991 * leave it unmasked. 2815 * leave it unmasked.
2992 * so only need to unmask if it is level-trigger 2816 * so only need to unmask if it is level-trigger
2993 * do we really have level trigger timer? 2817 * do we really have level trigger timer?
@@ -2995,12 +2819,12 @@ static inline void __init check_timer(void)
2995 int idx; 2819 int idx;
2996 idx = find_irq_entry(apic1, pin1, mp_INT); 2820 idx = find_irq_entry(apic1, pin1, mp_INT);
2997 if (idx != -1 && irq_trigger(idx)) 2821 if (idx != -1 && irq_trigger(idx))
2998 unmask_IO_APIC_irq_desc(desc); 2822 unmask_ioapic(cfg);
2999 } 2823 }
3000 if (timer_irq_works()) { 2824 if (timer_irq_works()) {
3001 if (nmi_watchdog == NMI_IO_APIC) { 2825 if (nmi_watchdog == NMI_IO_APIC) {
3002 setup_nmi(); 2826 setup_nmi();
3003 legacy_pic->chip->unmask(0); 2827 legacy_pic->unmask(0);
3004 } 2828 }
3005 if (disable_timer_pin_1 > 0) 2829 if (disable_timer_pin_1 > 0)
3006 clear_IO_APIC_pin(0, pin1); 2830 clear_IO_APIC_pin(0, pin1);
@@ -3023,14 +2847,14 @@ static inline void __init check_timer(void)
3023 */ 2847 */
3024 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 2848 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
3025 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2849 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
3026 legacy_pic->chip->unmask(0); 2850 legacy_pic->unmask(0);
3027 if (timer_irq_works()) { 2851 if (timer_irq_works()) {
3028 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2852 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
3029 timer_through_8259 = 1; 2853 timer_through_8259 = 1;
3030 if (nmi_watchdog == NMI_IO_APIC) { 2854 if (nmi_watchdog == NMI_IO_APIC) {
3031 legacy_pic->chip->mask(0); 2855 legacy_pic->mask(0);
3032 setup_nmi(); 2856 setup_nmi();
3033 legacy_pic->chip->unmask(0); 2857 legacy_pic->unmask(0);
3034 } 2858 }
3035 goto out; 2859 goto out;
3036 } 2860 }
@@ -3038,7 +2862,7 @@ static inline void __init check_timer(void)
3038 * Cleanup, just in case ... 2862 * Cleanup, just in case ...
3039 */ 2863 */
3040 local_irq_disable(); 2864 local_irq_disable();
3041 legacy_pic->chip->mask(0); 2865 legacy_pic->mask(0);
3042 clear_IO_APIC_pin(apic2, pin2); 2866 clear_IO_APIC_pin(apic2, pin2);
3043 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2867 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
3044 } 2868 }
@@ -3055,16 +2879,16 @@ static inline void __init check_timer(void)
3055 apic_printk(APIC_QUIET, KERN_INFO 2879 apic_printk(APIC_QUIET, KERN_INFO
3056 "...trying to set up timer as Virtual Wire IRQ...\n"); 2880 "...trying to set up timer as Virtual Wire IRQ...\n");
3057 2881
3058 lapic_register_intr(0, desc); 2882 lapic_register_intr(0);
3059 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2883 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
3060 legacy_pic->chip->unmask(0); 2884 legacy_pic->unmask(0);
3061 2885
3062 if (timer_irq_works()) { 2886 if (timer_irq_works()) {
3063 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2887 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3064 goto out; 2888 goto out;
3065 } 2889 }
3066 local_irq_disable(); 2890 local_irq_disable();
3067 legacy_pic->chip->mask(0); 2891 legacy_pic->mask(0);
3068 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 2892 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
3069 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2893 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
3070 2894
@@ -3230,44 +3054,37 @@ device_initcall(ioapic_init_sysfs);
3230/* 3054/*
3231 * Dynamic irq allocate and deallocation 3055 * Dynamic irq allocate and deallocation
3232 */ 3056 */
3233unsigned int create_irq_nr(unsigned int irq_want, int node) 3057unsigned int create_irq_nr(unsigned int from, int node)
3234{ 3058{
3235 /* Allocate an unused irq */ 3059 struct irq_cfg *cfg;
3236 unsigned int irq;
3237 unsigned int new;
3238 unsigned long flags; 3060 unsigned long flags;
3239 struct irq_cfg *cfg_new = NULL; 3061 unsigned int ret = 0;
3240 struct irq_desc *desc_new = NULL; 3062 int irq;
3241
3242 irq = 0;
3243 if (irq_want < nr_irqs_gsi)
3244 irq_want = nr_irqs_gsi;
3245
3246 raw_spin_lock_irqsave(&vector_lock, flags);
3247 for (new = irq_want; new < nr_irqs; new++) {
3248 desc_new = irq_to_desc_alloc_node(new, node);
3249 if (!desc_new) {
3250 printk(KERN_INFO "can not get irq_desc for %d\n", new);
3251 continue;
3252 }
3253 cfg_new = desc_new->chip_data;
3254
3255 if (cfg_new->vector != 0)
3256 continue;
3257 3063
3258 desc_new = move_irq_desc(desc_new, node); 3064 if (from < nr_irqs_gsi)
3259 cfg_new = desc_new->chip_data; 3065 from = nr_irqs_gsi;
3260 3066
3261 if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) 3067 irq = alloc_irq_from(from, node);
3262 irq = new; 3068 if (irq < 0)
3263 break; 3069 return 0;
3070 cfg = alloc_irq_cfg(irq, node);
3071 if (!cfg) {
3072 free_irq_at(irq, NULL);
3073 return 0;
3264 } 3074 }
3265 raw_spin_unlock_irqrestore(&vector_lock, flags);
3266 3075
3267 if (irq > 0) 3076 raw_spin_lock_irqsave(&vector_lock, flags);
3268 dynamic_irq_init_keep_chip_data(irq); 3077 if (!__assign_irq_vector(irq, cfg, apic->target_cpus()))
3078 ret = irq;
3079 raw_spin_unlock_irqrestore(&vector_lock, flags);
3269 3080
3270 return irq; 3081 if (ret) {
3082 set_irq_chip_data(irq, cfg);
3083 irq_clear_status_flags(irq, IRQ_NOREQUEST);
3084 } else {
3085 free_irq_at(irq, cfg);
3086 }
3087 return ret;
3271} 3088}
3272 3089
3273int create_irq(void) 3090int create_irq(void)
@@ -3287,14 +3104,17 @@ int create_irq(void)
3287 3104
3288void destroy_irq(unsigned int irq) 3105void destroy_irq(unsigned int irq)
3289{ 3106{
3107 struct irq_cfg *cfg = get_irq_chip_data(irq);
3290 unsigned long flags; 3108 unsigned long flags;
3291 3109
3292 dynamic_irq_cleanup_keep_chip_data(irq); 3110 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
3293 3111
3294 free_irte(irq); 3112 if (intr_remapping_enabled)
3113 free_irte(irq);
3295 raw_spin_lock_irqsave(&vector_lock, flags); 3114 raw_spin_lock_irqsave(&vector_lock, flags);
3296 __clear_irq_vector(irq, get_irq_chip_data(irq)); 3115 __clear_irq_vector(irq, cfg);
3297 raw_spin_unlock_irqrestore(&vector_lock, flags); 3116 raw_spin_unlock_irqrestore(&vector_lock, flags);
3117 free_irq_at(irq, cfg);
3298} 3118}
3299 3119
3300/* 3120/*
@@ -3318,7 +3138,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3318 3138
3319 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3139 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
3320 3140
3321 if (irq_remapped(irq)) { 3141 if (irq_remapped(get_irq_chip_data(irq))) {
3322 struct irte irte; 3142 struct irte irte;
3323 int ir_index; 3143 int ir_index;
3324 u16 sub_handle; 3144 u16 sub_handle;
@@ -3371,26 +3191,24 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3371} 3191}
3372 3192
3373#ifdef CONFIG_SMP 3193#ifdef CONFIG_SMP
3374static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) 3194static int
3195msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3375{ 3196{
3376 struct irq_desc *desc = irq_to_desc(irq); 3197 struct irq_cfg *cfg = data->chip_data;
3377 struct irq_cfg *cfg;
3378 struct msi_msg msg; 3198 struct msi_msg msg;
3379 unsigned int dest; 3199 unsigned int dest;
3380 3200
3381 if (set_desc_affinity(desc, mask, &dest)) 3201 if (__ioapic_set_affinity(data, mask, &dest))
3382 return -1; 3202 return -1;
3383 3203
3384 cfg = desc->chip_data; 3204 __get_cached_msi_msg(data->msi_desc, &msg);
3385
3386 get_cached_msi_msg_desc(desc, &msg);
3387 3205
3388 msg.data &= ~MSI_DATA_VECTOR_MASK; 3206 msg.data &= ~MSI_DATA_VECTOR_MASK;
3389 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3207 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3390 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3208 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3391 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3209 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3392 3210
3393 write_msi_msg_desc(desc, &msg); 3211 __write_msi_msg(data->msi_desc, &msg);
3394 3212
3395 return 0; 3213 return 0;
3396} 3214}
@@ -3400,17 +3218,17 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3400 * done in the process context using interrupt-remapping hardware. 3218 * done in the process context using interrupt-remapping hardware.
3401 */ 3219 */
3402static int 3220static int
3403ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) 3221ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3222 bool force)
3404{ 3223{
3405 struct irq_desc *desc = irq_to_desc(irq); 3224 struct irq_cfg *cfg = data->chip_data;
3406 struct irq_cfg *cfg = desc->chip_data; 3225 unsigned int dest, irq = data->irq;
3407 unsigned int dest;
3408 struct irte irte; 3226 struct irte irte;
3409 3227
3410 if (get_irte(irq, &irte)) 3228 if (get_irte(irq, &irte))
3411 return -1; 3229 return -1;
3412 3230
3413 if (set_desc_affinity(desc, mask, &dest)) 3231 if (__ioapic_set_affinity(data, mask, &dest))
3414 return -1; 3232 return -1;
3415 3233
3416 irte.vector = cfg->vector; 3234 irte.vector = cfg->vector;
@@ -3440,27 +3258,27 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3440 * which implement the MSI or MSI-X Capability Structure. 3258 * which implement the MSI or MSI-X Capability Structure.
3441 */ 3259 */
3442static struct irq_chip msi_chip = { 3260static struct irq_chip msi_chip = {
3443 .name = "PCI-MSI", 3261 .name = "PCI-MSI",
3444 .unmask = unmask_msi_irq, 3262 .irq_unmask = unmask_msi_irq,
3445 .mask = mask_msi_irq, 3263 .irq_mask = mask_msi_irq,
3446 .ack = ack_apic_edge, 3264 .irq_ack = ack_apic_edge,
3447#ifdef CONFIG_SMP 3265#ifdef CONFIG_SMP
3448 .set_affinity = set_msi_irq_affinity, 3266 .irq_set_affinity = msi_set_affinity,
3449#endif 3267#endif
3450 .retrigger = ioapic_retrigger_irq, 3268 .irq_retrigger = ioapic_retrigger_irq,
3451}; 3269};
3452 3270
3453static struct irq_chip msi_ir_chip = { 3271static struct irq_chip msi_ir_chip = {
3454 .name = "IR-PCI-MSI", 3272 .name = "IR-PCI-MSI",
3455 .unmask = unmask_msi_irq, 3273 .irq_unmask = unmask_msi_irq,
3456 .mask = mask_msi_irq, 3274 .irq_mask = mask_msi_irq,
3457#ifdef CONFIG_INTR_REMAP 3275#ifdef CONFIG_INTR_REMAP
3458 .ack = ir_ack_apic_edge, 3276 .irq_ack = ir_ack_apic_edge,
3459#ifdef CONFIG_SMP 3277#ifdef CONFIG_SMP
3460 .set_affinity = ir_set_msi_irq_affinity, 3278 .irq_set_affinity = ir_msi_set_affinity,
3461#endif 3279#endif
3462#endif 3280#endif
3463 .retrigger = ioapic_retrigger_irq, 3281 .irq_retrigger = ioapic_retrigger_irq,
3464}; 3282};
3465 3283
3466/* 3284/*
@@ -3492,8 +3310,8 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3492 3310
3493static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3311static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3494{ 3312{
3495 int ret;
3496 struct msi_msg msg; 3313 struct msi_msg msg;
3314 int ret;
3497 3315
3498 ret = msi_compose_msg(dev, irq, &msg, -1); 3316 ret = msi_compose_msg(dev, irq, &msg, -1);
3499 if (ret < 0) 3317 if (ret < 0)
@@ -3502,12 +3320,8 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3502 set_irq_msi(irq, msidesc); 3320 set_irq_msi(irq, msidesc);
3503 write_msi_msg(irq, &msg); 3321 write_msi_msg(irq, &msg);
3504 3322
3505 if (irq_remapped(irq)) { 3323 if (irq_remapped(get_irq_chip_data(irq))) {
3506 struct irq_desc *desc = irq_to_desc(irq); 3324 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3507 /*
3508 * irq migration in process context
3509 */
3510 desc->status |= IRQ_MOVE_PCNTXT;
3511 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); 3325 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3512 } else 3326 } else
3513 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); 3327 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
@@ -3519,13 +3333,10 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3519 3333
3520int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3334int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3521{ 3335{
3522 unsigned int irq; 3336 int node, ret, sub_handle, index = 0;
3523 int ret, sub_handle; 3337 unsigned int irq, irq_want;
3524 struct msi_desc *msidesc; 3338 struct msi_desc *msidesc;
3525 unsigned int irq_want;
3526 struct intel_iommu *iommu = NULL; 3339 struct intel_iommu *iommu = NULL;
3527 int index = 0;
3528 int node;
3529 3340
3530 /* x86 doesn't support multiple MSI yet */ 3341 /* x86 doesn't support multiple MSI yet */
3531 if (type == PCI_CAP_ID_MSI && nvec > 1) 3342 if (type == PCI_CAP_ID_MSI && nvec > 1)
@@ -3585,18 +3396,17 @@ void arch_teardown_msi_irq(unsigned int irq)
3585 3396
3586#if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) 3397#if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
3587#ifdef CONFIG_SMP 3398#ifdef CONFIG_SMP
3588static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 3399static int
3400dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3401 bool force)
3589{ 3402{
3590 struct irq_desc *desc = irq_to_desc(irq); 3403 struct irq_cfg *cfg = data->chip_data;
3591 struct irq_cfg *cfg; 3404 unsigned int dest, irq = data->irq;
3592 struct msi_msg msg; 3405 struct msi_msg msg;
3593 unsigned int dest;
3594 3406
3595 if (set_desc_affinity(desc, mask, &dest)) 3407 if (__ioapic_set_affinity(data, mask, &dest))
3596 return -1; 3408 return -1;
3597 3409
3598 cfg = desc->chip_data;
3599
3600 dmar_msi_read(irq, &msg); 3410 dmar_msi_read(irq, &msg);
3601 3411
3602 msg.data &= ~MSI_DATA_VECTOR_MASK; 3412 msg.data &= ~MSI_DATA_VECTOR_MASK;
@@ -3612,14 +3422,14 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3612#endif /* CONFIG_SMP */ 3422#endif /* CONFIG_SMP */
3613 3423
3614static struct irq_chip dmar_msi_type = { 3424static struct irq_chip dmar_msi_type = {
3615 .name = "DMAR_MSI", 3425 .name = "DMAR_MSI",
3616 .unmask = dmar_msi_unmask, 3426 .irq_unmask = dmar_msi_unmask,
3617 .mask = dmar_msi_mask, 3427 .irq_mask = dmar_msi_mask,
3618 .ack = ack_apic_edge, 3428 .irq_ack = ack_apic_edge,
3619#ifdef CONFIG_SMP 3429#ifdef CONFIG_SMP
3620 .set_affinity = dmar_msi_set_affinity, 3430 .irq_set_affinity = dmar_msi_set_affinity,
3621#endif 3431#endif
3622 .retrigger = ioapic_retrigger_irq, 3432 .irq_retrigger = ioapic_retrigger_irq,
3623}; 3433};
3624 3434
3625int arch_setup_dmar_msi(unsigned int irq) 3435int arch_setup_dmar_msi(unsigned int irq)
@@ -3640,26 +3450,24 @@ int arch_setup_dmar_msi(unsigned int irq)
3640#ifdef CONFIG_HPET_TIMER 3450#ifdef CONFIG_HPET_TIMER
3641 3451
3642#ifdef CONFIG_SMP 3452#ifdef CONFIG_SMP
3643static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 3453static int hpet_msi_set_affinity(struct irq_data *data,
3454 const struct cpumask *mask, bool force)
3644{ 3455{
3645 struct irq_desc *desc = irq_to_desc(irq); 3456 struct irq_cfg *cfg = data->chip_data;
3646 struct irq_cfg *cfg;
3647 struct msi_msg msg; 3457 struct msi_msg msg;
3648 unsigned int dest; 3458 unsigned int dest;
3649 3459
3650 if (set_desc_affinity(desc, mask, &dest)) 3460 if (__ioapic_set_affinity(data, mask, &dest))
3651 return -1; 3461 return -1;
3652 3462
3653 cfg = desc->chip_data; 3463 hpet_msi_read(data->handler_data, &msg);
3654
3655 hpet_msi_read(irq, &msg);
3656 3464
3657 msg.data &= ~MSI_DATA_VECTOR_MASK; 3465 msg.data &= ~MSI_DATA_VECTOR_MASK;
3658 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3466 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3659 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3467 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3660 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3468 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3661 3469
3662 hpet_msi_write(irq, &msg); 3470 hpet_msi_write(data->handler_data, &msg);
3663 3471
3664 return 0; 3472 return 0;
3665} 3473}
@@ -3667,34 +3475,33 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3667#endif /* CONFIG_SMP */ 3475#endif /* CONFIG_SMP */
3668 3476
3669static struct irq_chip ir_hpet_msi_type = { 3477static struct irq_chip ir_hpet_msi_type = {
3670 .name = "IR-HPET_MSI", 3478 .name = "IR-HPET_MSI",
3671 .unmask = hpet_msi_unmask, 3479 .irq_unmask = hpet_msi_unmask,
3672 .mask = hpet_msi_mask, 3480 .irq_mask = hpet_msi_mask,
3673#ifdef CONFIG_INTR_REMAP 3481#ifdef CONFIG_INTR_REMAP
3674 .ack = ir_ack_apic_edge, 3482 .irq_ack = ir_ack_apic_edge,
3675#ifdef CONFIG_SMP 3483#ifdef CONFIG_SMP
3676 .set_affinity = ir_set_msi_irq_affinity, 3484 .irq_set_affinity = ir_msi_set_affinity,
3677#endif 3485#endif
3678#endif 3486#endif
3679 .retrigger = ioapic_retrigger_irq, 3487 .irq_retrigger = ioapic_retrigger_irq,
3680}; 3488};
3681 3489
3682static struct irq_chip hpet_msi_type = { 3490static struct irq_chip hpet_msi_type = {
3683 .name = "HPET_MSI", 3491 .name = "HPET_MSI",
3684 .unmask = hpet_msi_unmask, 3492 .irq_unmask = hpet_msi_unmask,
3685 .mask = hpet_msi_mask, 3493 .irq_mask = hpet_msi_mask,
3686 .ack = ack_apic_edge, 3494 .irq_ack = ack_apic_edge,
3687#ifdef CONFIG_SMP 3495#ifdef CONFIG_SMP
3688 .set_affinity = hpet_msi_set_affinity, 3496 .irq_set_affinity = hpet_msi_set_affinity,
3689#endif 3497#endif
3690 .retrigger = ioapic_retrigger_irq, 3498 .irq_retrigger = ioapic_retrigger_irq,
3691}; 3499};
3692 3500
3693int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3501int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
3694{ 3502{
3695 int ret;
3696 struct msi_msg msg; 3503 struct msi_msg msg;
3697 struct irq_desc *desc = irq_to_desc(irq); 3504 int ret;
3698 3505
3699 if (intr_remapping_enabled) { 3506 if (intr_remapping_enabled) {
3700 struct intel_iommu *iommu = map_hpet_to_ir(id); 3507 struct intel_iommu *iommu = map_hpet_to_ir(id);
@@ -3712,9 +3519,9 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
3712 if (ret < 0) 3519 if (ret < 0)
3713 return ret; 3520 return ret;
3714 3521
3715 hpet_msi_write(irq, &msg); 3522 hpet_msi_write(get_irq_data(irq), &msg);
3716 desc->status |= IRQ_MOVE_PCNTXT; 3523 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3717 if (irq_remapped(irq)) 3524 if (irq_remapped(get_irq_chip_data(irq)))
3718 set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, 3525 set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
3719 handle_edge_irq, "edge"); 3526 handle_edge_irq, "edge");
3720 else 3527 else
@@ -3747,33 +3554,30 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3747 write_ht_irq_msg(irq, &msg); 3554 write_ht_irq_msg(irq, &msg);
3748} 3555}
3749 3556
3750static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) 3557static int
3558ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3751{ 3559{
3752 struct irq_desc *desc = irq_to_desc(irq); 3560 struct irq_cfg *cfg = data->chip_data;
3753 struct irq_cfg *cfg;
3754 unsigned int dest; 3561 unsigned int dest;
3755 3562
3756 if (set_desc_affinity(desc, mask, &dest)) 3563 if (__ioapic_set_affinity(data, mask, &dest))
3757 return -1; 3564 return -1;
3758 3565
3759 cfg = desc->chip_data; 3566 target_ht_irq(data->irq, dest, cfg->vector);
3760
3761 target_ht_irq(irq, dest, cfg->vector);
3762
3763 return 0; 3567 return 0;
3764} 3568}
3765 3569
3766#endif 3570#endif
3767 3571
3768static struct irq_chip ht_irq_chip = { 3572static struct irq_chip ht_irq_chip = {
3769 .name = "PCI-HT", 3573 .name = "PCI-HT",
3770 .mask = mask_ht_irq, 3574 .irq_mask = mask_ht_irq,
3771 .unmask = unmask_ht_irq, 3575 .irq_unmask = unmask_ht_irq,
3772 .ack = ack_apic_edge, 3576 .irq_ack = ack_apic_edge,
3773#ifdef CONFIG_SMP 3577#ifdef CONFIG_SMP
3774 .set_affinity = set_ht_irq_affinity, 3578 .irq_set_affinity = ht_set_affinity,
3775#endif 3579#endif
3776 .retrigger = ioapic_retrigger_irq, 3580 .irq_retrigger = ioapic_retrigger_irq,
3777}; 3581};
3778 3582
3779int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3583int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
@@ -3864,14 +3668,13 @@ int __init arch_probe_nr_irqs(void)
3864 if (nr < nr_irqs) 3668 if (nr < nr_irqs)
3865 nr_irqs = nr; 3669 nr_irqs = nr;
3866 3670
3867 return 0; 3671 return NR_IRQS_LEGACY;
3868} 3672}
3869#endif 3673#endif
3870 3674
3871static int __io_apic_set_pci_routing(struct device *dev, int irq, 3675static int __io_apic_set_pci_routing(struct device *dev, int irq,
3872 struct io_apic_irq_attr *irq_attr) 3676 struct io_apic_irq_attr *irq_attr)
3873{ 3677{
3874 struct irq_desc *desc;
3875 struct irq_cfg *cfg; 3678 struct irq_cfg *cfg;
3876 int node; 3679 int node;
3877 int ioapic, pin; 3680 int ioapic, pin;
@@ -3889,11 +3692,9 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq,
3889 else 3692 else
3890 node = cpu_to_node(0); 3693 node = cpu_to_node(0);
3891 3694
3892 desc = irq_to_desc_alloc_node(irq, node); 3695 cfg = alloc_irq_and_cfg_at(irq, node);
3893 if (!desc) { 3696 if (!cfg)
3894 printk(KERN_INFO "can not get irq_desc %d\n", irq);
3895 return 0; 3697 return 0;
3896 }
3897 3698
3898 pin = irq_attr->ioapic_pin; 3699 pin = irq_attr->ioapic_pin;
3899 trigger = irq_attr->trigger; 3700 trigger = irq_attr->trigger;
@@ -3903,15 +3704,14 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq,
3903 * IRQs < 16 are already in the irq_2_pin[] map 3704 * IRQs < 16 are already in the irq_2_pin[] map
3904 */ 3705 */
3905 if (irq >= legacy_pic->nr_legacy_irqs) { 3706 if (irq >= legacy_pic->nr_legacy_irqs) {
3906 cfg = desc->chip_data; 3707 if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) {
3907 if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) {
3908 printk(KERN_INFO "can not add pin %d for irq %d\n", 3708 printk(KERN_INFO "can not add pin %d for irq %d\n",
3909 pin, irq); 3709 pin, irq);
3910 return 0; 3710 return 0;
3911 } 3711 }
3912 } 3712 }
3913 3713
3914 setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); 3714 setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity);
3915 3715
3916 return 0; 3716 return 0;
3917} 3717}
@@ -4104,14 +3904,14 @@ void __init setup_ioapic_dest(void)
4104 */ 3904 */
4105 if (desc->status & 3905 if (desc->status &
4106 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 3906 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
4107 mask = desc->affinity; 3907 mask = desc->irq_data.affinity;
4108 else 3908 else
4109 mask = apic->target_cpus(); 3909 mask = apic->target_cpus();
4110 3910
4111 if (intr_remapping_enabled) 3911 if (intr_remapping_enabled)
4112 set_ir_ioapic_affinity_irq_desc(desc, mask); 3912 ir_ioapic_set_affinity(&desc->irq_data, mask, false);
4113 else 3913 else
4114 set_ioapic_affinity_irq_desc(desc, mask); 3914 ioapic_set_affinity(&desc->irq_data, mask, false);
4115 } 3915 }
4116 3916
4117} 3917}
@@ -4295,19 +4095,18 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
4295void __init pre_init_apic_IRQ0(void) 4095void __init pre_init_apic_IRQ0(void)
4296{ 4096{
4297 struct irq_cfg *cfg; 4097 struct irq_cfg *cfg;
4298 struct irq_desc *desc;
4299 4098
4300 printk(KERN_INFO "Early APIC setup for system timer0\n"); 4099 printk(KERN_INFO "Early APIC setup for system timer0\n");
4301#ifndef CONFIG_SMP 4100#ifndef CONFIG_SMP
4302 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 4101 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
4303#endif 4102#endif
4304 desc = irq_to_desc_alloc_node(0, 0); 4103 /* Make sure the irq descriptor is set up */
4104 cfg = alloc_irq_and_cfg_at(0, 0);
4305 4105
4306 setup_local_APIC(); 4106 setup_local_APIC();
4307 4107
4308 cfg = irq_cfg(0);
4309 add_pin_to_irq_node(cfg, 0, 0, 0); 4108 add_pin_to_irq_node(cfg, 0, 0, 0);
4310 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); 4109 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
4311 4110
4312 setup_IO_APIC_irq(0, 0, 0, desc, 0, 0); 4111 setup_ioapic_irq(0, 0, 0, cfg, 0, 0);
4313} 4112}