aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/io_apic.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2008-12-05 21:58:34 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-08 08:31:59 -0500
commit3145e941fcfe2548fa2270afb1a05bab3a6bc418 (patch)
tree5bd08b9de84d1be58be040058ac77a4dd8c8d7b2 /arch/x86/kernel/io_apic.c
parentbe5d5350a937cd8513b258739f1099420129e96f (diff)
x86, MSI: pass irq_cfg and irq_desc
Impact: simplify code Pass irq_desc and cfg around, instead of raw IRQ numbers - this way we dont have to look it up again and again. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/io_apic.c')
-rw-r--r--arch/x86/kernel/io_apic.c318
1 files changed, 181 insertions, 137 deletions
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 0dcde74abd1d..a1a2e070f31a 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -231,6 +231,10 @@ static struct irq_cfg *irq_cfg(unsigned int irq)
231 231
232#endif 232#endif
233 233
234static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask)
235{
236}
237
234struct io_apic { 238struct io_apic {
235 unsigned int index; 239 unsigned int index;
236 unsigned int unused[3]; 240 unsigned int unused[3];
@@ -272,11 +276,10 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
272 writel(value, &io_apic->data); 276 writel(value, &io_apic->data);
273} 277}
274 278
275static bool io_apic_level_ack_pending(unsigned int irq) 279static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
276{ 280{
277 struct irq_pin_list *entry; 281 struct irq_pin_list *entry;
278 unsigned long flags; 282 unsigned long flags;
279 struct irq_cfg *cfg = irq_cfg(irq);
280 283
281 spin_lock_irqsave(&ioapic_lock, flags); 284 spin_lock_irqsave(&ioapic_lock, flags);
282 entry = cfg->irq_2_pin; 285 entry = cfg->irq_2_pin;
@@ -358,13 +361,12 @@ static void ioapic_mask_entry(int apic, int pin)
358} 361}
359 362
360#ifdef CONFIG_SMP 363#ifdef CONFIG_SMP
361static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector) 364static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
362{ 365{
363 int apic, pin; 366 int apic, pin;
364 struct irq_cfg *cfg;
365 struct irq_pin_list *entry; 367 struct irq_pin_list *entry;
368 u8 vector = cfg->vector;
366 369
367 cfg = irq_cfg(irq);
368 entry = cfg->irq_2_pin; 370 entry = cfg->irq_2_pin;
369 for (;;) { 371 for (;;) {
370 unsigned int reg; 372 unsigned int reg;
@@ -394,24 +396,27 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
394 } 396 }
395} 397}
396 398
397static int assign_irq_vector(int irq, cpumask_t mask); 399static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask);
398 400
399static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 401static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
400{ 402{
401 struct irq_cfg *cfg; 403 struct irq_cfg *cfg;
402 unsigned long flags; 404 unsigned long flags;
403 unsigned int dest; 405 unsigned int dest;
404 cpumask_t tmp; 406 cpumask_t tmp;
405 struct irq_desc *desc; 407 unsigned int irq;
406 408
407 cpus_and(tmp, mask, cpu_online_map); 409 cpus_and(tmp, mask, cpu_online_map);
408 if (cpus_empty(tmp)) 410 if (cpus_empty(tmp))
409 return; 411 return;
410 412
411 cfg = irq_cfg(irq); 413 irq = desc->irq;
412 if (assign_irq_vector(irq, mask)) 414 cfg = desc->chip_data;
415 if (assign_irq_vector(irq, cfg, mask))
413 return; 416 return;
414 417
418 set_extra_move_desc(desc, mask);
419
415 cpus_and(tmp, cfg->domain, mask); 420 cpus_and(tmp, cfg->domain, mask);
416 dest = cpu_mask_to_apicid(tmp); 421 dest = cpu_mask_to_apicid(tmp);
417 /* 422 /*
@@ -419,12 +424,20 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
419 */ 424 */
420 dest = SET_APIC_LOGICAL_ID(dest); 425 dest = SET_APIC_LOGICAL_ID(dest);
421 426
422 desc = irq_to_desc(irq);
423 spin_lock_irqsave(&ioapic_lock, flags); 427 spin_lock_irqsave(&ioapic_lock, flags);
424 __target_IO_APIC_irq(irq, dest, cfg->vector); 428 __target_IO_APIC_irq(irq, dest, cfg);
425 desc->affinity = mask; 429 desc->affinity = mask;
426 spin_unlock_irqrestore(&ioapic_lock, flags); 430 spin_unlock_irqrestore(&ioapic_lock, flags);
427} 431}
432
433static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
434{
435 struct irq_desc *desc;
436
437 desc = irq_to_desc(irq);
438
439 set_ioapic_affinity_irq_desc(desc, mask);
440}
428#endif /* CONFIG_SMP */ 441#endif /* CONFIG_SMP */
429 442
430/* 443/*
@@ -432,10 +445,9 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
432 * shared ISA-space IRQs, so we have to support them. We are super 445 * shared ISA-space IRQs, so we have to support them. We are super
433 * fast in the common case, and fast for shared ISA-space IRQs. 446 * fast in the common case, and fast for shared ISA-space IRQs.
434 */ 447 */
435static void add_pin_to_irq_cpu(unsigned int irq, int cpu, int apic, int pin) 448static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin)
436{ 449{
437 struct irq_pin_list *entry; 450 struct irq_pin_list *entry;
438 struct irq_cfg *cfg = irq_cfg(irq);
439 451
440 entry = cfg->irq_2_pin; 452 entry = cfg->irq_2_pin;
441 if (!entry) { 453 if (!entry) {
@@ -468,11 +480,10 @@ static void add_pin_to_irq_cpu(unsigned int irq, int cpu, int apic, int pin)
468/* 480/*
469 * Reroute an IRQ to a different pin. 481 * Reroute an IRQ to a different pin.
470 */ 482 */
471static void __init replace_pin_at_irq(unsigned int irq, int cpu, 483static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu,
472 int oldapic, int oldpin, 484 int oldapic, int oldpin,
473 int newapic, int newpin) 485 int newapic, int newpin)
474{ 486{
475 struct irq_cfg *cfg = irq_cfg(irq);
476 struct irq_pin_list *entry = cfg->irq_2_pin; 487 struct irq_pin_list *entry = cfg->irq_2_pin;
477 int replaced = 0; 488 int replaced = 0;
478 489
@@ -489,18 +500,16 @@ static void __init replace_pin_at_irq(unsigned int irq, int cpu,
489 500
490 /* why? call replace before add? */ 501 /* why? call replace before add? */
491 if (!replaced) 502 if (!replaced)
492 add_pin_to_irq_cpu(irq, cpu, newapic, newpin); 503 add_pin_to_irq_cpu(cfg, cpu, newapic, newpin);
493} 504}
494 505
495static inline void io_apic_modify_irq(unsigned int irq, 506static inline void io_apic_modify_irq(struct irq_cfg *cfg,
496 int mask_and, int mask_or, 507 int mask_and, int mask_or,
497 void (*final)(struct irq_pin_list *entry)) 508 void (*final)(struct irq_pin_list *entry))
498{ 509{
499 int pin; 510 int pin;
500 struct irq_cfg *cfg;
501 struct irq_pin_list *entry; 511 struct irq_pin_list *entry;
502 512
503 cfg = irq_cfg(irq);
504 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) { 513 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
505 unsigned int reg; 514 unsigned int reg;
506 pin = entry->pin; 515 pin = entry->pin;
@@ -513,9 +522,9 @@ static inline void io_apic_modify_irq(unsigned int irq,
513 } 522 }
514} 523}
515 524
516static void __unmask_IO_APIC_irq(unsigned int irq) 525static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
517{ 526{
518 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 0, NULL); 527 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
519} 528}
520 529
521#ifdef CONFIG_X86_64 530#ifdef CONFIG_X86_64
@@ -530,47 +539,64 @@ void io_apic_sync(struct irq_pin_list *entry)
530 readl(&io_apic->data); 539 readl(&io_apic->data);
531} 540}
532 541
533static void __mask_IO_APIC_irq(unsigned int irq) 542static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
534{ 543{
535 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 544 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
536} 545}
537#else /* CONFIG_X86_32 */ 546#else /* CONFIG_X86_32 */
538static void __mask_IO_APIC_irq(unsigned int irq) 547static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
539{ 548{
540 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, NULL); 549 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL);
541} 550}
542 551
543static void __mask_and_edge_IO_APIC_irq(unsigned int irq) 552static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
544{ 553{
545 io_apic_modify_irq(irq, ~IO_APIC_REDIR_LEVEL_TRIGGER, 554 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER,
546 IO_APIC_REDIR_MASKED, NULL); 555 IO_APIC_REDIR_MASKED, NULL);
547} 556}
548 557
549static void __unmask_and_level_IO_APIC_irq(unsigned int irq) 558static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
550{ 559{
551 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 560 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
552 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 561 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
553} 562}
554#endif /* CONFIG_X86_32 */ 563#endif /* CONFIG_X86_32 */
555 564
556static void mask_IO_APIC_irq (unsigned int irq) 565static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
557{ 566{
567 struct irq_cfg *cfg = desc->chip_data;
558 unsigned long flags; 568 unsigned long flags;
559 569
570 BUG_ON(!cfg);
571
560 spin_lock_irqsave(&ioapic_lock, flags); 572 spin_lock_irqsave(&ioapic_lock, flags);
561 __mask_IO_APIC_irq(irq); 573 __mask_IO_APIC_irq(cfg);
562 spin_unlock_irqrestore(&ioapic_lock, flags); 574 spin_unlock_irqrestore(&ioapic_lock, flags);
563} 575}
564 576
565static void unmask_IO_APIC_irq (unsigned int irq) 577static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
566{ 578{
579 struct irq_cfg *cfg = desc->chip_data;
567 unsigned long flags; 580 unsigned long flags;
568 581
569 spin_lock_irqsave(&ioapic_lock, flags); 582 spin_lock_irqsave(&ioapic_lock, flags);
570 __unmask_IO_APIC_irq(irq); 583 __unmask_IO_APIC_irq(cfg);
571 spin_unlock_irqrestore(&ioapic_lock, flags); 584 spin_unlock_irqrestore(&ioapic_lock, flags);
572} 585}
573 586
587static void mask_IO_APIC_irq(unsigned int irq)
588{
589 struct irq_desc *desc = irq_to_desc(irq);
590
591 mask_IO_APIC_irq_desc(desc);
592}
593static void unmask_IO_APIC_irq(unsigned int irq)
594{
595 struct irq_desc *desc = irq_to_desc(irq);
596
597 unmask_IO_APIC_irq_desc(desc);
598}
599
574static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 600static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
575{ 601{
576 struct IO_APIC_route_entry entry; 602 struct IO_APIC_route_entry entry;
@@ -1072,7 +1098,7 @@ void unlock_vector_lock(void)
1072 spin_unlock(&vector_lock); 1098 spin_unlock(&vector_lock);
1073} 1099}
1074 1100
1075static int __assign_irq_vector(int irq, cpumask_t mask) 1101static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
1076{ 1102{
1077 /* 1103 /*
1078 * NOTE! The local APIC isn't very good at handling 1104 * NOTE! The local APIC isn't very good at handling
@@ -1088,16 +1114,13 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
1088 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1114 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1089 unsigned int old_vector; 1115 unsigned int old_vector;
1090 int cpu; 1116 int cpu;
1091 struct irq_cfg *cfg;
1092 1117
1093 cfg = irq_cfg(irq); 1118 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1119 return -EBUSY;
1094 1120
1095 /* Only try and allocate irqs on cpus that are present */ 1121 /* Only try and allocate irqs on cpus that are present */
1096 cpus_and(mask, mask, cpu_online_map); 1122 cpus_and(mask, mask, cpu_online_map);
1097 1123
1098 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1099 return -EBUSY;
1100
1101 old_vector = cfg->vector; 1124 old_vector = cfg->vector;
1102 if (old_vector) { 1125 if (old_vector) {
1103 cpumask_t tmp; 1126 cpumask_t tmp;
@@ -1151,24 +1174,22 @@ next:
1151 return -ENOSPC; 1174 return -ENOSPC;
1152} 1175}
1153 1176
1154static int assign_irq_vector(int irq, cpumask_t mask) 1177static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
1155{ 1178{
1156 int err; 1179 int err;
1157 unsigned long flags; 1180 unsigned long flags;
1158 1181
1159 spin_lock_irqsave(&vector_lock, flags); 1182 spin_lock_irqsave(&vector_lock, flags);
1160 err = __assign_irq_vector(irq, mask); 1183 err = __assign_irq_vector(irq, cfg, mask);
1161 spin_unlock_irqrestore(&vector_lock, flags); 1184 spin_unlock_irqrestore(&vector_lock, flags);
1162 return err; 1185 return err;
1163} 1186}
1164 1187
1165static void __clear_irq_vector(int irq) 1188static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1166{ 1189{
1167 struct irq_cfg *cfg;
1168 cpumask_t mask; 1190 cpumask_t mask;
1169 int cpu, vector; 1191 int cpu, vector;
1170 1192
1171 cfg = irq_cfg(irq);
1172 BUG_ON(!cfg->vector); 1193 BUG_ON(!cfg->vector);
1173 1194
1174 vector = cfg->vector; 1195 vector = cfg->vector;
@@ -1257,11 +1278,8 @@ static inline int IO_APIC_irq_trigger(int irq)
1257} 1278}
1258#endif 1279#endif
1259 1280
1260static void ioapic_register_intr(int irq, unsigned long trigger) 1281static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
1261{ 1282{
1262 struct irq_desc *desc;
1263
1264 desc = irq_to_desc(irq);
1265 1283
1266 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1284 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1267 trigger == IOAPIC_LEVEL) 1285 trigger == IOAPIC_LEVEL)
@@ -1353,7 +1371,7 @@ static int setup_ioapic_entry(int apic, int irq,
1353 return 0; 1371 return 0;
1354} 1372}
1355 1373
1356static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, 1374static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc,
1357 int trigger, int polarity) 1375 int trigger, int polarity)
1358{ 1376{
1359 struct irq_cfg *cfg; 1377 struct irq_cfg *cfg;
@@ -1363,10 +1381,10 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1363 if (!IO_APIC_IRQ(irq)) 1381 if (!IO_APIC_IRQ(irq))
1364 return; 1382 return;
1365 1383
1366 cfg = irq_cfg(irq); 1384 cfg = desc->chip_data;
1367 1385
1368 mask = TARGET_CPUS; 1386 mask = TARGET_CPUS;
1369 if (assign_irq_vector(irq, mask)) 1387 if (assign_irq_vector(irq, cfg, mask))
1370 return; 1388 return;
1371 1389
1372 cpus_and(mask, cfg->domain, mask); 1390 cpus_and(mask, cfg->domain, mask);
@@ -1383,11 +1401,11 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1383 cfg->vector)) { 1401 cfg->vector)) {
1384 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1402 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1385 mp_ioapics[apic].mp_apicid, pin); 1403 mp_ioapics[apic].mp_apicid, pin);
1386 __clear_irq_vector(irq); 1404 __clear_irq_vector(irq, cfg);
1387 return; 1405 return;
1388 } 1406 }
1389 1407
1390 ioapic_register_intr(irq, trigger); 1408 ioapic_register_intr(irq, desc, trigger);
1391 if (irq < NR_IRQS_LEGACY) 1409 if (irq < NR_IRQS_LEGACY)
1392 disable_8259A_irq(irq); 1410 disable_8259A_irq(irq);
1393 1411
@@ -1399,6 +1417,7 @@ static void __init setup_IO_APIC_irqs(void)
1399 int apic, pin, idx, irq; 1417 int apic, pin, idx, irq;
1400 int notcon = 0; 1418 int notcon = 0;
1401 struct irq_desc *desc; 1419 struct irq_desc *desc;
1420 struct irq_cfg *cfg;
1402 int cpu = boot_cpu_id; 1421 int cpu = boot_cpu_id;
1403 1422
1404 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1423 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
@@ -1436,9 +1455,10 @@ static void __init setup_IO_APIC_irqs(void)
1436 printk(KERN_INFO "can not get irq_desc for %d\n", irq); 1455 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1437 continue; 1456 continue;
1438 } 1457 }
1439 add_pin_to_irq_cpu(irq, cpu, apic, pin); 1458 cfg = desc->chip_data;
1459 add_pin_to_irq_cpu(cfg, cpu, apic, pin);
1440 1460
1441 setup_IO_APIC_irq(apic, pin, irq, 1461 setup_IO_APIC_irq(apic, pin, irq, desc,
1442 irq_trigger(idx), irq_polarity(idx)); 1462 irq_trigger(idx), irq_polarity(idx));
1443 } 1463 }
1444 } 1464 }
@@ -2086,7 +2106,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2086 was_pending = 1; 2106 was_pending = 1;
2087 } 2107 }
2088 cfg = irq_cfg(irq); 2108 cfg = irq_cfg(irq);
2089 __unmask_IO_APIC_irq(irq); 2109 __unmask_IO_APIC_irq(cfg);
2090 spin_unlock_irqrestore(&ioapic_lock, flags); 2110 spin_unlock_irqrestore(&ioapic_lock, flags);
2091 2111
2092 return was_pending; 2112 return was_pending;
@@ -2149,35 +2169,37 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2149 * as simple as edge triggered migration and we can do the irq migration 2169 * as simple as edge triggered migration and we can do the irq migration
2150 * with a simple atomic update to IO-APIC RTE. 2170 * with a simple atomic update to IO-APIC RTE.
2151 */ 2171 */
2152static void migrate_ioapic_irq(int irq, cpumask_t mask) 2172static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
2153{ 2173{
2154 struct irq_cfg *cfg; 2174 struct irq_cfg *cfg;
2155 struct irq_desc *desc;
2156 cpumask_t tmp, cleanup_mask; 2175 cpumask_t tmp, cleanup_mask;
2157 struct irte irte; 2176 struct irte irte;
2158 int modify_ioapic_rte; 2177 int modify_ioapic_rte;
2159 unsigned int dest; 2178 unsigned int dest;
2160 unsigned long flags; 2179 unsigned long flags;
2180 unsigned int irq;
2161 2181
2162 cpus_and(tmp, mask, cpu_online_map); 2182 cpus_and(tmp, mask, cpu_online_map);
2163 if (cpus_empty(tmp)) 2183 if (cpus_empty(tmp))
2164 return; 2184 return;
2165 2185
2186 irq = desc->irq;
2166 if (get_irte(irq, &irte)) 2187 if (get_irte(irq, &irte))
2167 return; 2188 return;
2168 2189
2169 if (assign_irq_vector(irq, mask)) 2190 cfg = desc->chip_data;
2191 if (assign_irq_vector(irq, cfg, mask))
2170 return; 2192 return;
2171 2193
2172 cfg = irq_cfg(irq); 2194 set_extra_move_desc(desc, mask);
2195
2173 cpus_and(tmp, cfg->domain, mask); 2196 cpus_and(tmp, cfg->domain, mask);
2174 dest = cpu_mask_to_apicid(tmp); 2197 dest = cpu_mask_to_apicid(tmp);
2175 2198
2176 desc = irq_to_desc(irq);
2177 modify_ioapic_rte = desc->status & IRQ_LEVEL; 2199 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2178 if (modify_ioapic_rte) { 2200 if (modify_ioapic_rte) {
2179 spin_lock_irqsave(&ioapic_lock, flags); 2201 spin_lock_irqsave(&ioapic_lock, flags);
2180 __target_IO_APIC_irq(irq, dest, cfg->vector); 2202 __target_IO_APIC_irq(irq, dest, cfg);
2181 spin_unlock_irqrestore(&ioapic_lock, flags); 2203 spin_unlock_irqrestore(&ioapic_lock, flags);
2182 } 2204 }
2183 2205
@@ -2199,14 +2221,14 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask)
2199 desc->affinity = mask; 2221 desc->affinity = mask;
2200} 2222}
2201 2223
2202static int migrate_irq_remapped_level(int irq) 2224static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2203{ 2225{
2204 int ret = -1; 2226 int ret = -1;
2205 struct irq_desc *desc = irq_to_desc(irq); 2227 struct irq_cfg *cfg = desc->chip_data;
2206 2228
2207 mask_IO_APIC_irq(irq); 2229 mask_IO_APIC_irq_desc(desc);
2208 2230
2209 if (io_apic_level_ack_pending(irq)) { 2231 if (io_apic_level_ack_pending(cfg)) {
2210 /* 2232 /*
2211 * Interrupt in progress. Migrating irq now will change the 2233 * Interrupt in progress. Migrating irq now will change the
2212 * vector information in the IO-APIC RTE and that will confuse 2234 * vector information in the IO-APIC RTE and that will confuse
@@ -2218,14 +2240,15 @@ static int migrate_irq_remapped_level(int irq)
2218 } 2240 }
2219 2241
2220 /* everthing is clear. we have right of way */ 2242 /* everthing is clear. we have right of way */
2221 migrate_ioapic_irq(irq, desc->pending_mask); 2243 migrate_ioapic_irq_desc(desc, desc->pending_mask);
2222 2244
2223 ret = 0; 2245 ret = 0;
2224 desc->status &= ~IRQ_MOVE_PENDING; 2246 desc->status &= ~IRQ_MOVE_PENDING;
2225 cpus_clear(desc->pending_mask); 2247 cpus_clear(desc->pending_mask);
2226 2248
2227unmask: 2249unmask:
2228 unmask_IO_APIC_irq(irq); 2250 unmask_IO_APIC_irq_desc(desc);
2251
2229 return ret; 2252 return ret;
2230} 2253}
2231 2254
@@ -2258,18 +2281,22 @@ static void ir_irq_migration(struct work_struct *work)
2258/* 2281/*
2259 * Migrates the IRQ destination in the process context. 2282 * Migrates the IRQ destination in the process context.
2260 */ 2283 */
2261static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 2284static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
2262{ 2285{
2263 struct irq_desc *desc = irq_to_desc(irq);
2264
2265 if (desc->status & IRQ_LEVEL) { 2286 if (desc->status & IRQ_LEVEL) {
2266 desc->status |= IRQ_MOVE_PENDING; 2287 desc->status |= IRQ_MOVE_PENDING;
2267 desc->pending_mask = mask; 2288 desc->pending_mask = mask;
2268 migrate_irq_remapped_level(irq); 2289 migrate_irq_remapped_level_desc(desc);
2269 return; 2290 return;
2270 } 2291 }
2271 2292
2272 migrate_ioapic_irq(irq, mask); 2293 migrate_ioapic_irq_desc(desc, mask);
2294}
2295static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
2296{
2297 struct irq_desc *desc = irq_to_desc(irq);
2298
2299 set_ir_ioapic_affinity_irq_desc(desc, mask);
2273} 2300}
2274#endif 2301#endif
2275 2302
@@ -2313,9 +2340,10 @@ unlock:
2313 irq_exit(); 2340 irq_exit();
2314} 2341}
2315 2342
2316static void irq_complete_move(unsigned int irq) 2343static void irq_complete_move(struct irq_desc **descp)
2317{ 2344{
2318 struct irq_cfg *cfg = irq_cfg(irq); 2345 struct irq_desc *desc = *descp;
2346 struct irq_cfg *cfg = desc->chip_data;
2319 unsigned vector, me; 2347 unsigned vector, me;
2320 2348
2321 if (likely(!cfg->move_in_progress)) 2349 if (likely(!cfg->move_in_progress))
@@ -2333,8 +2361,9 @@ static void irq_complete_move(unsigned int irq)
2333 } 2361 }
2334} 2362}
2335#else 2363#else
2336static inline void irq_complete_move(unsigned int irq) {} 2364static inline void irq_complete_move(struct irq_desc **descp) {}
2337#endif 2365#endif
2366
2338#ifdef CONFIG_INTR_REMAP 2367#ifdef CONFIG_INTR_REMAP
2339static void ack_x2apic_level(unsigned int irq) 2368static void ack_x2apic_level(unsigned int irq)
2340{ 2369{
@@ -2345,11 +2374,14 @@ static void ack_x2apic_edge(unsigned int irq)
2345{ 2374{
2346 ack_x2APIC_irq(); 2375 ack_x2APIC_irq();
2347} 2376}
2377
2348#endif 2378#endif
2349 2379
2350static void ack_apic_edge(unsigned int irq) 2380static void ack_apic_edge(unsigned int irq)
2351{ 2381{
2352 irq_complete_move(irq); 2382 struct irq_desc *desc = irq_to_desc(irq);
2383
2384 irq_complete_move(&desc);
2353 move_native_irq(irq); 2385 move_native_irq(irq);
2354 ack_APIC_irq(); 2386 ack_APIC_irq();
2355} 2387}
@@ -2358,18 +2390,21 @@ atomic_t irq_mis_count;
2358 2390
2359static void ack_apic_level(unsigned int irq) 2391static void ack_apic_level(unsigned int irq)
2360{ 2392{
2393 struct irq_desc *desc = irq_to_desc(irq);
2394
2361#ifdef CONFIG_X86_32 2395#ifdef CONFIG_X86_32
2362 unsigned long v; 2396 unsigned long v;
2363 int i; 2397 int i;
2364#endif 2398#endif
2399 struct irq_cfg *cfg;
2365 int do_unmask_irq = 0; 2400 int do_unmask_irq = 0;
2366 2401
2367 irq_complete_move(irq); 2402 irq_complete_move(&desc);
2368#ifdef CONFIG_GENERIC_PENDING_IRQ 2403#ifdef CONFIG_GENERIC_PENDING_IRQ
2369 /* If we are moving the irq we need to mask it */ 2404 /* If we are moving the irq we need to mask it */
2370 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { 2405 if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2371 do_unmask_irq = 1; 2406 do_unmask_irq = 1;
2372 mask_IO_APIC_irq(irq); 2407 mask_IO_APIC_irq_desc(desc);
2373 } 2408 }
2374#endif 2409#endif
2375 2410
@@ -2393,7 +2428,8 @@ static void ack_apic_level(unsigned int irq)
2393 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2428 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2394 * The idea is from Manfred Spraul. --macro 2429 * The idea is from Manfred Spraul. --macro
2395 */ 2430 */
2396 i = irq_cfg(irq)->vector; 2431 cfg = desc->chip_data;
2432 i = cfg->vector;
2397 2433
2398 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2434 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2399#endif 2435#endif
@@ -2432,17 +2468,18 @@ static void ack_apic_level(unsigned int irq)
2432 * accurate and is causing problems then it is a hardware bug 2468 * accurate and is causing problems then it is a hardware bug
2433 * and you can go talk to the chipset vendor about it. 2469 * and you can go talk to the chipset vendor about it.
2434 */ 2470 */
2435 if (!io_apic_level_ack_pending(irq)) 2471 cfg = desc->chip_data;
2472 if (!io_apic_level_ack_pending(cfg))
2436 move_masked_irq(irq); 2473 move_masked_irq(irq);
2437 unmask_IO_APIC_irq(irq); 2474 unmask_IO_APIC_irq_desc(desc);
2438 } 2475 }
2439 2476
2440#ifdef CONFIG_X86_32 2477#ifdef CONFIG_X86_32
2441 if (!(v & (1 << (i & 0x1f)))) { 2478 if (!(v & (1 << (i & 0x1f)))) {
2442 atomic_inc(&irq_mis_count); 2479 atomic_inc(&irq_mis_count);
2443 spin_lock(&ioapic_lock); 2480 spin_lock(&ioapic_lock);
2444 __mask_and_edge_IO_APIC_irq(irq); 2481 __mask_and_edge_IO_APIC_irq(cfg);
2445 __unmask_and_level_IO_APIC_irq(irq); 2482 __unmask_and_level_IO_APIC_irq(cfg);
2446 spin_unlock(&ioapic_lock); 2483 spin_unlock(&ioapic_lock);
2447 } 2484 }
2448#endif 2485#endif
@@ -2533,7 +2570,7 @@ static void unmask_lapic_irq(unsigned int irq)
2533 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2570 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2534} 2571}
2535 2572
2536static void ack_lapic_irq (unsigned int irq) 2573static void ack_lapic_irq(unsigned int irq)
2537{ 2574{
2538 ack_APIC_irq(); 2575 ack_APIC_irq();
2539} 2576}
@@ -2545,11 +2582,8 @@ static struct irq_chip lapic_chip __read_mostly = {
2545 .ack = ack_lapic_irq, 2582 .ack = ack_lapic_irq,
2546}; 2583};
2547 2584
2548static void lapic_register_intr(int irq) 2585static void lapic_register_intr(int irq, struct irq_desc *desc)
2549{ 2586{
2550 struct irq_desc *desc;
2551
2552 desc = irq_to_desc(irq);
2553 desc->status &= ~IRQ_LEVEL; 2587 desc->status &= ~IRQ_LEVEL;
2554 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2588 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2555 "edge"); 2589 "edge");
@@ -2653,7 +2687,9 @@ int timer_through_8259 __initdata;
2653 */ 2687 */
2654static inline void __init check_timer(void) 2688static inline void __init check_timer(void)
2655{ 2689{
2656 struct irq_cfg *cfg = irq_cfg(0); 2690 struct irq_desc *desc = irq_to_desc(0);
2691 struct irq_cfg *cfg = desc->chip_data;
2692 int cpu = boot_cpu_id;
2657 int apic1, pin1, apic2, pin2; 2693 int apic1, pin1, apic2, pin2;
2658 unsigned long flags; 2694 unsigned long flags;
2659 unsigned int ver; 2695 unsigned int ver;
@@ -2668,7 +2704,7 @@ static inline void __init check_timer(void)
2668 * get/set the timer IRQ vector: 2704 * get/set the timer IRQ vector:
2669 */ 2705 */
2670 disable_8259A_irq(0); 2706 disable_8259A_irq(0);
2671 assign_irq_vector(0, TARGET_CPUS); 2707 assign_irq_vector(0, cfg, TARGET_CPUS);
2672 2708
2673 /* 2709 /*
2674 * As IRQ0 is to be enabled in the 8259A, the virtual 2710 * As IRQ0 is to be enabled in the 8259A, the virtual
@@ -2719,10 +2755,10 @@ static inline void __init check_timer(void)
2719 * Ok, does IRQ0 through the IOAPIC work? 2755 * Ok, does IRQ0 through the IOAPIC work?
2720 */ 2756 */
2721 if (no_pin1) { 2757 if (no_pin1) {
2722 add_pin_to_irq_cpu(0, boot_cpu_id, apic1, pin1); 2758 add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
2723 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2759 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2724 } 2760 }
2725 unmask_IO_APIC_irq(0); 2761 unmask_IO_APIC_irq_desc(desc);
2726 if (timer_irq_works()) { 2762 if (timer_irq_works()) {
2727 if (nmi_watchdog == NMI_IO_APIC) { 2763 if (nmi_watchdog == NMI_IO_APIC) {
2728 setup_nmi(); 2764 setup_nmi();
@@ -2748,9 +2784,9 @@ static inline void __init check_timer(void)
2748 /* 2784 /*
2749 * legacy devices should be connected to IO APIC #0 2785 * legacy devices should be connected to IO APIC #0
2750 */ 2786 */
2751 replace_pin_at_irq(0, boot_cpu_id, apic1, pin1, apic2, pin2); 2787 replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
2752 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2788 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2753 unmask_IO_APIC_irq(0); 2789 unmask_IO_APIC_irq_desc(desc);
2754 enable_8259A_irq(0); 2790 enable_8259A_irq(0);
2755 if (timer_irq_works()) { 2791 if (timer_irq_works()) {
2756 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2792 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2782,7 +2818,7 @@ static inline void __init check_timer(void)
2782 apic_printk(APIC_QUIET, KERN_INFO 2818 apic_printk(APIC_QUIET, KERN_INFO
2783 "...trying to set up timer as Virtual Wire IRQ...\n"); 2819 "...trying to set up timer as Virtual Wire IRQ...\n");
2784 2820
2785 lapic_register_intr(0); 2821 lapic_register_intr(0, desc);
2786 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2822 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2787 enable_8259A_irq(0); 2823 enable_8259A_irq(0);
2788 2824
@@ -2986,7 +3022,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
2986 3022
2987 if (cfg_new->vector != 0) 3023 if (cfg_new->vector != 0)
2988 continue; 3024 continue;
2989 if (__assign_irq_vector(new, TARGET_CPUS) == 0) 3025 if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0)
2990 irq = new; 3026 irq = new;
2991 break; 3027 break;
2992 } 3028 }
@@ -3034,7 +3070,7 @@ void destroy_irq(unsigned int irq)
3034 free_irte(irq); 3070 free_irte(irq);
3035#endif 3071#endif
3036 spin_lock_irqsave(&vector_lock, flags); 3072 spin_lock_irqsave(&vector_lock, flags);
3037 __clear_irq_vector(irq); 3073 __clear_irq_vector(irq, cfg);
3038 spin_unlock_irqrestore(&vector_lock, flags); 3074 spin_unlock_irqrestore(&vector_lock, flags);
3039} 3075}
3040 3076
@@ -3049,12 +3085,12 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3049 unsigned dest; 3085 unsigned dest;
3050 cpumask_t tmp; 3086 cpumask_t tmp;
3051 3087
3088 cfg = irq_cfg(irq);
3052 tmp = TARGET_CPUS; 3089 tmp = TARGET_CPUS;
3053 err = assign_irq_vector(irq, tmp); 3090 err = assign_irq_vector(irq, cfg, tmp);
3054 if (err) 3091 if (err)
3055 return err; 3092 return err;
3056 3093
3057 cfg = irq_cfg(irq);
3058 cpus_and(tmp, cfg->domain, tmp); 3094 cpus_and(tmp, cfg->domain, tmp);
3059 dest = cpu_mask_to_apicid(tmp); 3095 dest = cpu_mask_to_apicid(tmp);
3060 3096
@@ -3112,35 +3148,35 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3112#ifdef CONFIG_SMP 3148#ifdef CONFIG_SMP
3113static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3149static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3114{ 3150{
3151 struct irq_desc *desc = irq_to_desc(irq);
3115 struct irq_cfg *cfg; 3152 struct irq_cfg *cfg;
3116 struct msi_msg msg; 3153 struct msi_msg msg;
3117 unsigned int dest; 3154 unsigned int dest;
3118 cpumask_t tmp; 3155 cpumask_t tmp;
3119 struct irq_desc *desc;
3120 3156
3121 cpus_and(tmp, mask, cpu_online_map); 3157 cpus_and(tmp, mask, cpu_online_map);
3122 if (cpus_empty(tmp)) 3158 if (cpus_empty(tmp))
3123 return; 3159 return;
3124 3160
3125 if (assign_irq_vector(irq, mask)) 3161 cfg = desc->chip_data;
3162 if (assign_irq_vector(irq, cfg, mask))
3126 return; 3163 return;
3127 3164
3128 cfg = irq_cfg(irq); 3165 set_extra_move_desc(desc, mask);
3166
3129 cpus_and(tmp, cfg->domain, mask); 3167 cpus_and(tmp, cfg->domain, mask);
3130 dest = cpu_mask_to_apicid(tmp); 3168 dest = cpu_mask_to_apicid(tmp);
3131 3169
3132 read_msi_msg(irq, &msg); 3170 read_msi_msg_desc(desc, &msg);
3133 3171
3134 msg.data &= ~MSI_DATA_VECTOR_MASK; 3172 msg.data &= ~MSI_DATA_VECTOR_MASK;
3135 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3173 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3136 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3174 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3137 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3175 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3138 3176
3139 write_msi_msg(irq, &msg); 3177 write_msi_msg_desc(desc, &msg);
3140 desc = irq_to_desc(irq);
3141 desc->affinity = mask; 3178 desc->affinity = mask;
3142} 3179}
3143
3144#ifdef CONFIG_INTR_REMAP 3180#ifdef CONFIG_INTR_REMAP
3145/* 3181/*
3146 * Migrate the MSI irq to another cpumask. This migration is 3182 * Migrate the MSI irq to another cpumask. This migration is
@@ -3148,11 +3184,11 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3148 */ 3184 */
3149static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3185static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3150{ 3186{
3187 struct irq_desc *desc = irq_to_desc(irq);
3151 struct irq_cfg *cfg; 3188 struct irq_cfg *cfg;
3152 unsigned int dest; 3189 unsigned int dest;
3153 cpumask_t tmp, cleanup_mask; 3190 cpumask_t tmp, cleanup_mask;
3154 struct irte irte; 3191 struct irte irte;
3155 struct irq_desc *desc;
3156 3192
3157 cpus_and(tmp, mask, cpu_online_map); 3193 cpus_and(tmp, mask, cpu_online_map);
3158 if (cpus_empty(tmp)) 3194 if (cpus_empty(tmp))
@@ -3161,10 +3197,12 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3161 if (get_irte(irq, &irte)) 3197 if (get_irte(irq, &irte))
3162 return; 3198 return;
3163 3199
3164 if (assign_irq_vector(irq, mask)) 3200 cfg = desc->chip_data;
3201 if (assign_irq_vector(irq, cfg, mask))
3165 return; 3202 return;
3166 3203
3167 cfg = irq_cfg(irq); 3204 set_extra_move_desc(desc, mask);
3205
3168 cpus_and(tmp, cfg->domain, mask); 3206 cpus_and(tmp, cfg->domain, mask);
3169 dest = cpu_mask_to_apicid(tmp); 3207 dest = cpu_mask_to_apicid(tmp);
3170 3208
@@ -3188,9 +3226,9 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3188 cfg->move_in_progress = 0; 3226 cfg->move_in_progress = 0;
3189 } 3227 }
3190 3228
3191 desc = irq_to_desc(irq);
3192 desc->affinity = mask; 3229 desc->affinity = mask;
3193} 3230}
3231
3194#endif 3232#endif
3195#endif /* CONFIG_SMP */ 3233#endif /* CONFIG_SMP */
3196 3234
@@ -3249,7 +3287,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3249} 3287}
3250#endif 3288#endif
3251 3289
3252static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq) 3290static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3253{ 3291{
3254 int ret; 3292 int ret;
3255 struct msi_msg msg; 3293 struct msi_msg msg;
@@ -3258,7 +3296,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
3258 if (ret < 0) 3296 if (ret < 0)
3259 return ret; 3297 return ret;
3260 3298
3261 set_irq_msi(irq, desc); 3299 set_irq_msi(irq, msidesc);
3262 write_msi_msg(irq, &msg); 3300 write_msi_msg(irq, &msg);
3263 3301
3264#ifdef CONFIG_INTR_REMAP 3302#ifdef CONFIG_INTR_REMAP
@@ -3381,20 +3419,22 @@ void arch_teardown_msi_irq(unsigned int irq)
3381#ifdef CONFIG_SMP 3419#ifdef CONFIG_SMP
3382static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 3420static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3383{ 3421{
3422 struct irq_desc *desc = irq_to_desc(irq);
3384 struct irq_cfg *cfg; 3423 struct irq_cfg *cfg;
3385 struct msi_msg msg; 3424 struct msi_msg msg;
3386 unsigned int dest; 3425 unsigned int dest;
3387 cpumask_t tmp; 3426 cpumask_t tmp;
3388 struct irq_desc *desc;
3389 3427
3390 cpus_and(tmp, mask, cpu_online_map); 3428 cpus_and(tmp, mask, cpu_online_map);
3391 if (cpus_empty(tmp)) 3429 if (cpus_empty(tmp))
3392 return; 3430 return;
3393 3431
3394 if (assign_irq_vector(irq, mask)) 3432 cfg = desc->chip_data;
3433 if (assign_irq_vector(irq, cfg, mask))
3395 return; 3434 return;
3396 3435
3397 cfg = irq_cfg(irq); 3436 set_extra_move_desc(desc, mask);
3437
3398 cpus_and(tmp, cfg->domain, mask); 3438 cpus_and(tmp, cfg->domain, mask);
3399 dest = cpu_mask_to_apicid(tmp); 3439 dest = cpu_mask_to_apicid(tmp);
3400 3440
@@ -3406,9 +3446,9 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3406 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3446 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3407 3447
3408 dmar_msi_write(irq, &msg); 3448 dmar_msi_write(irq, &msg);
3409 desc = irq_to_desc(irq);
3410 desc->affinity = mask; 3449 desc->affinity = mask;
3411} 3450}
3451
3412#endif /* CONFIG_SMP */ 3452#endif /* CONFIG_SMP */
3413 3453
3414struct irq_chip dmar_msi_type = { 3454struct irq_chip dmar_msi_type = {
@@ -3442,8 +3482,8 @@ int arch_setup_dmar_msi(unsigned int irq)
3442#ifdef CONFIG_SMP 3482#ifdef CONFIG_SMP
3443static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) 3483static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3444{ 3484{
3485 struct irq_desc *desc = irq_to_desc(irq);
3445 struct irq_cfg *cfg; 3486 struct irq_cfg *cfg;
3446 struct irq_desc *desc;
3447 struct msi_msg msg; 3487 struct msi_msg msg;
3448 unsigned int dest; 3488 unsigned int dest;
3449 cpumask_t tmp; 3489 cpumask_t tmp;
@@ -3452,10 +3492,12 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3452 if (cpus_empty(tmp)) 3492 if (cpus_empty(tmp))
3453 return; 3493 return;
3454 3494
3455 if (assign_irq_vector(irq, mask)) 3495 cfg = desc->chip_data;
3496 if (assign_irq_vector(irq, cfg, mask))
3456 return; 3497 return;
3457 3498
3458 cfg = irq_cfg(irq); 3499 set_extra_move_desc(desc, mask);
3500
3459 cpus_and(tmp, cfg->domain, mask); 3501 cpus_and(tmp, cfg->domain, mask);
3460 dest = cpu_mask_to_apicid(tmp); 3502 dest = cpu_mask_to_apicid(tmp);
3461 3503
@@ -3467,9 +3509,9 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3467 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3509 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3468 3510
3469 hpet_msi_write(irq, &msg); 3511 hpet_msi_write(irq, &msg);
3470 desc = irq_to_desc(irq);
3471 desc->affinity = mask; 3512 desc->affinity = mask;
3472} 3513}
3514
3473#endif /* CONFIG_SMP */ 3515#endif /* CONFIG_SMP */
3474 3516
3475struct irq_chip hpet_msi_type = { 3517struct irq_chip hpet_msi_type = {
@@ -3524,26 +3566,28 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3524 3566
3525static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) 3567static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
3526{ 3568{
3569 struct irq_desc *desc = irq_to_desc(irq);
3527 struct irq_cfg *cfg; 3570 struct irq_cfg *cfg;
3528 unsigned int dest; 3571 unsigned int dest;
3529 cpumask_t tmp; 3572 cpumask_t tmp;
3530 struct irq_desc *desc;
3531 3573
3532 cpus_and(tmp, mask, cpu_online_map); 3574 cpus_and(tmp, mask, cpu_online_map);
3533 if (cpus_empty(tmp)) 3575 if (cpus_empty(tmp))
3534 return; 3576 return;
3535 3577
3536 if (assign_irq_vector(irq, mask)) 3578 cfg = desc->chip_data;
3579 if (assign_irq_vector(irq, cfg, mask))
3537 return; 3580 return;
3538 3581
3539 cfg = irq_cfg(irq); 3582 set_extra_move_desc(desc, mask);
3583
3540 cpus_and(tmp, cfg->domain, mask); 3584 cpus_and(tmp, cfg->domain, mask);
3541 dest = cpu_mask_to_apicid(tmp); 3585 dest = cpu_mask_to_apicid(tmp);
3542 3586
3543 target_ht_irq(irq, dest, cfg->vector); 3587 target_ht_irq(irq, dest, cfg->vector);
3544 desc = irq_to_desc(irq);
3545 desc->affinity = mask; 3588 desc->affinity = mask;
3546} 3589}
3590
3547#endif 3591#endif
3548 3592
3549static struct irq_chip ht_irq_chip = { 3593static struct irq_chip ht_irq_chip = {
@@ -3563,13 +3607,13 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3563 int err; 3607 int err;
3564 cpumask_t tmp; 3608 cpumask_t tmp;
3565 3609
3610 cfg = irq_cfg(irq);
3566 tmp = TARGET_CPUS; 3611 tmp = TARGET_CPUS;
3567 err = assign_irq_vector(irq, tmp); 3612 err = assign_irq_vector(irq, cfg, tmp);
3568 if (!err) { 3613 if (!err) {
3569 struct ht_irq_msg msg; 3614 struct ht_irq_msg msg;
3570 unsigned dest; 3615 unsigned dest;
3571 3616
3572 cfg = irq_cfg(irq);
3573 cpus_and(tmp, cfg->domain, tmp); 3617 cpus_and(tmp, cfg->domain, tmp);
3574 dest = cpu_mask_to_apicid(tmp); 3618 dest = cpu_mask_to_apicid(tmp);
3575 3619
@@ -3615,7 +3659,9 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3615 unsigned long flags; 3659 unsigned long flags;
3616 int err; 3660 int err;
3617 3661
3618 err = assign_irq_vector(irq, *eligible_cpu); 3662 cfg = irq_cfg(irq);
3663
3664 err = assign_irq_vector(irq, cfg, *eligible_cpu);
3619 if (err != 0) 3665 if (err != 0)
3620 return err; 3666 return err;
3621 3667
@@ -3624,8 +3670,6 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3624 irq_name); 3670 irq_name);
3625 spin_unlock_irqrestore(&vector_lock, flags); 3671 spin_unlock_irqrestore(&vector_lock, flags);
3626 3672
3627 cfg = irq_cfg(irq);
3628
3629 mmr_value = 0; 3673 mmr_value = 0;
3630 entry = (struct uv_IO_APIC_route_entry *)&mmr_value; 3674 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3631 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); 3675 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
@@ -3806,10 +3850,10 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int p
3806 */ 3850 */
3807 if (irq >= NR_IRQS_LEGACY) { 3851 if (irq >= NR_IRQS_LEGACY) {
3808 cfg = desc->chip_data; 3852 cfg = desc->chip_data;
3809 add_pin_to_irq_cpu(irq, cpu, ioapic, pin); 3853 add_pin_to_irq_cpu(cfg, cpu, ioapic, pin);
3810 } 3854 }
3811 3855
3812 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity); 3856 setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity);
3813 3857
3814 return 0; 3858 return 0;
3815} 3859}
@@ -3866,7 +3910,7 @@ void __init setup_ioapic_dest(void)
3866 desc = irq_to_desc(irq); 3910 desc = irq_to_desc(irq);
3867 cfg = desc->chip_data; 3911 cfg = desc->chip_data;
3868 if (!cfg->vector) { 3912 if (!cfg->vector) {
3869 setup_IO_APIC_irq(ioapic, pin, irq, 3913 setup_IO_APIC_irq(ioapic, pin, irq, desc,
3870 irq_trigger(irq_entry), 3914 irq_trigger(irq_entry),
3871 irq_polarity(irq_entry)); 3915 irq_polarity(irq_entry));
3872 continue; 3916 continue;
@@ -3884,10 +3928,10 @@ void __init setup_ioapic_dest(void)
3884 3928
3885#ifdef CONFIG_INTR_REMAP 3929#ifdef CONFIG_INTR_REMAP
3886 if (intr_remapping_enabled) 3930 if (intr_remapping_enabled)
3887 set_ir_ioapic_affinity_irq(irq, mask); 3931 set_ir_ioapic_affinity_irq_desc(desc, mask);
3888 else 3932 else
3889#endif 3933#endif
3890 set_ioapic_affinity_irq(irq, mask); 3934 set_ioapic_affinity_irq_desc(desc, mask);
3891 } 3935 }
3892 3936
3893 } 3937 }