aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/io_apic.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-02 16:41:36 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-02 16:41:36 -0500
commit923a789b49c7269a0245d5af6afe486188d940df (patch)
treec3f168427372e64f7467a794f313416da5086ba0 /arch/x86/kernel/io_apic.c
parent103ceffb9501531f6931df6aebc11a05189201f0 (diff)
parentb840d79631c882786925303c2b0f4fefc31845ed (diff)
Merge branch 'linus' into x86/cleanups
Conflicts: arch/x86/kernel/reboot.c
Diffstat (limited to 'arch/x86/kernel/io_apic.c')
-rw-r--r--arch/x86/kernel/io_apic.c1021
1 files changed, 637 insertions, 384 deletions
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index b8c8a8e99341..69911722b9d3 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -108,94 +108,277 @@ static int __init parse_noapic(char *str)
108early_param("noapic", parse_noapic); 108early_param("noapic", parse_noapic);
109 109
110struct irq_pin_list; 110struct irq_pin_list;
111
112/*
113 * This is performance-critical, we want to do it O(1)
114 *
115 * the indexing order of this array favors 1:1 mappings
116 * between pins and IRQs.
117 */
118
119struct irq_pin_list {
120 int apic, pin;
121 struct irq_pin_list *next;
122};
123
124static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
125{
126 struct irq_pin_list *pin;
127 int node;
128
129 node = cpu_to_node(cpu);
130
131 pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
132 printk(KERN_DEBUG " alloc irq_2_pin on cpu %d node %d\n", cpu, node);
133
134 return pin;
135}
136
111struct irq_cfg { 137struct irq_cfg {
112 unsigned int irq;
113 struct irq_pin_list *irq_2_pin; 138 struct irq_pin_list *irq_2_pin;
114 cpumask_t domain; 139 cpumask_var_t domain;
115 cpumask_t old_domain; 140 cpumask_var_t old_domain;
116 unsigned move_cleanup_count; 141 unsigned move_cleanup_count;
117 u8 vector; 142 u8 vector;
118 u8 move_in_progress : 1; 143 u8 move_in_progress : 1;
144#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
145 u8 move_desc_pending : 1;
146#endif
119}; 147};
120 148
121/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 149/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
150#ifdef CONFIG_SPARSE_IRQ
151static struct irq_cfg irq_cfgx[] = {
152#else
122static struct irq_cfg irq_cfgx[NR_IRQS] = { 153static struct irq_cfg irq_cfgx[NR_IRQS] = {
123 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, 154#endif
124 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, 155 [0] = { .vector = IRQ0_VECTOR, },
125 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, 156 [1] = { .vector = IRQ1_VECTOR, },
126 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, 157 [2] = { .vector = IRQ2_VECTOR, },
127 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, 158 [3] = { .vector = IRQ3_VECTOR, },
128 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, 159 [4] = { .vector = IRQ4_VECTOR, },
129 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, 160 [5] = { .vector = IRQ5_VECTOR, },
130 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, 161 [6] = { .vector = IRQ6_VECTOR, },
131 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, 162 [7] = { .vector = IRQ7_VECTOR, },
132 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, 163 [8] = { .vector = IRQ8_VECTOR, },
133 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, 164 [9] = { .vector = IRQ9_VECTOR, },
134 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, 165 [10] = { .vector = IRQ10_VECTOR, },
135 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, 166 [11] = { .vector = IRQ11_VECTOR, },
136 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, 167 [12] = { .vector = IRQ12_VECTOR, },
137 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, 168 [13] = { .vector = IRQ13_VECTOR, },
138 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, 169 [14] = { .vector = IRQ14_VECTOR, },
170 [15] = { .vector = IRQ15_VECTOR, },
139}; 171};
140 172
141#define for_each_irq_cfg(irq, cfg) \ 173int __init arch_early_irq_init(void)
142 for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++) 174{
175 struct irq_cfg *cfg;
176 struct irq_desc *desc;
177 int count;
178 int i;
179
180 cfg = irq_cfgx;
181 count = ARRAY_SIZE(irq_cfgx);
182
183 for (i = 0; i < count; i++) {
184 desc = irq_to_desc(i);
185 desc->chip_data = &cfg[i];
186 alloc_bootmem_cpumask_var(&cfg[i].domain);
187 alloc_bootmem_cpumask_var(&cfg[i].old_domain);
188 if (i < NR_IRQS_LEGACY)
189 cpumask_setall(cfg[i].domain);
190 }
191
192 return 0;
193}
143 194
195#ifdef CONFIG_SPARSE_IRQ
144static struct irq_cfg *irq_cfg(unsigned int irq) 196static struct irq_cfg *irq_cfg(unsigned int irq)
145{ 197{
146 return irq < nr_irqs ? irq_cfgx + irq : NULL; 198 struct irq_cfg *cfg = NULL;
199 struct irq_desc *desc;
200
201 desc = irq_to_desc(irq);
202 if (desc)
203 cfg = desc->chip_data;
204
205 return cfg;
147} 206}
148 207
149static struct irq_cfg *irq_cfg_alloc(unsigned int irq) 208static struct irq_cfg *get_one_free_irq_cfg(int cpu)
150{ 209{
151 return irq_cfg(irq); 210 struct irq_cfg *cfg;
211 int node;
212
213 node = cpu_to_node(cpu);
214
215 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
216 if (cfg) {
217 /* FIXME: needs alloc_cpumask_var_node() */
218 if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) {
219 kfree(cfg);
220 cfg = NULL;
221 } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) {
222 free_cpumask_var(cfg->domain);
223 kfree(cfg);
224 cfg = NULL;
225 } else {
226 cpumask_clear(cfg->domain);
227 cpumask_clear(cfg->old_domain);
228 }
229 }
230 printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
231
232 return cfg;
152} 233}
153 234
154/* 235int arch_init_chip_data(struct irq_desc *desc, int cpu)
155 * Rough estimation of how many shared IRQs there are, can be changed 236{
156 * anytime. 237 struct irq_cfg *cfg;
157 */
158#define MAX_PLUS_SHARED_IRQS NR_IRQS
159#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
160 238
161/* 239 cfg = desc->chip_data;
162 * This is performance-critical, we want to do it O(1) 240 if (!cfg) {
163 * 241 desc->chip_data = get_one_free_irq_cfg(cpu);
164 * the indexing order of this array favors 1:1 mappings 242 if (!desc->chip_data) {
165 * between pins and IRQs. 243 printk(KERN_ERR "can not alloc irq_cfg\n");
166 */ 244 BUG_ON(1);
245 }
246 }
167 247
168struct irq_pin_list { 248 return 0;
169 int apic, pin; 249}
170 struct irq_pin_list *next;
171};
172 250
173static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE]; 251#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
174static struct irq_pin_list *irq_2_pin_ptr;
175 252
176static void __init irq_2_pin_init(void) 253static void
254init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu)
177{ 255{
178 struct irq_pin_list *pin = irq_2_pin_head; 256 struct irq_pin_list *old_entry, *head, *tail, *entry;
179 int i; 257
258 cfg->irq_2_pin = NULL;
259 old_entry = old_cfg->irq_2_pin;
260 if (!old_entry)
261 return;
262
263 entry = get_one_free_irq_2_pin(cpu);
264 if (!entry)
265 return;
180 266
181 for (i = 1; i < PIN_MAP_SIZE; i++) 267 entry->apic = old_entry->apic;
182 pin[i-1].next = &pin[i]; 268 entry->pin = old_entry->pin;
269 head = entry;
270 tail = entry;
271 old_entry = old_entry->next;
272 while (old_entry) {
273 entry = get_one_free_irq_2_pin(cpu);
274 if (!entry) {
275 entry = head;
276 while (entry) {
277 head = entry->next;
278 kfree(entry);
279 entry = head;
280 }
281 /* still use the old one */
282 return;
283 }
284 entry->apic = old_entry->apic;
285 entry->pin = old_entry->pin;
286 tail->next = entry;
287 tail = entry;
288 old_entry = old_entry->next;
289 }
183 290
184 irq_2_pin_ptr = &pin[0]; 291 tail->next = NULL;
292 cfg->irq_2_pin = head;
185} 293}
186 294
187static struct irq_pin_list *get_one_free_irq_2_pin(void) 295static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg)
188{ 296{
189 struct irq_pin_list *pin = irq_2_pin_ptr; 297 struct irq_pin_list *entry, *next;
190 298
191 if (!pin) 299 if (old_cfg->irq_2_pin == cfg->irq_2_pin)
192 panic("can not get more irq_2_pin\n"); 300 return;
193 301
194 irq_2_pin_ptr = pin->next; 302 entry = old_cfg->irq_2_pin;
195 pin->next = NULL; 303
196 return pin; 304 while (entry) {
305 next = entry->next;
306 kfree(entry);
307 entry = next;
308 }
309 old_cfg->irq_2_pin = NULL;
310}
311
312void arch_init_copy_chip_data(struct irq_desc *old_desc,
313 struct irq_desc *desc, int cpu)
314{
315 struct irq_cfg *cfg;
316 struct irq_cfg *old_cfg;
317
318 cfg = get_one_free_irq_cfg(cpu);
319
320 if (!cfg)
321 return;
322
323 desc->chip_data = cfg;
324
325 old_cfg = old_desc->chip_data;
326
327 memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
328
329 init_copy_irq_2_pin(old_cfg, cfg, cpu);
197} 330}
198 331
332static void free_irq_cfg(struct irq_cfg *old_cfg)
333{
334 kfree(old_cfg);
335}
336
337void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
338{
339 struct irq_cfg *old_cfg, *cfg;
340
341 old_cfg = old_desc->chip_data;
342 cfg = desc->chip_data;
343
344 if (old_cfg == cfg)
345 return;
346
347 if (old_cfg) {
348 free_irq_2_pin(old_cfg, cfg);
349 free_irq_cfg(old_cfg);
350 old_desc->chip_data = NULL;
351 }
352}
353
354static void
355set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
356{
357 struct irq_cfg *cfg = desc->chip_data;
358
359 if (!cfg->move_in_progress) {
360 /* it means that domain is not changed */
361 if (!cpumask_intersects(&desc->affinity, mask))
362 cfg->move_desc_pending = 1;
363 }
364}
365#endif
366
367#else
368static struct irq_cfg *irq_cfg(unsigned int irq)
369{
370 return irq < nr_irqs ? irq_cfgx + irq : NULL;
371}
372
373#endif
374
375#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
376static inline void
377set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
378{
379}
380#endif
381
199struct io_apic { 382struct io_apic {
200 unsigned int index; 383 unsigned int index;
201 unsigned int unused[3]; 384 unsigned int unused[3];
@@ -237,11 +420,10 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
237 writel(value, &io_apic->data); 420 writel(value, &io_apic->data);
238} 421}
239 422
240static bool io_apic_level_ack_pending(unsigned int irq) 423static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
241{ 424{
242 struct irq_pin_list *entry; 425 struct irq_pin_list *entry;
243 unsigned long flags; 426 unsigned long flags;
244 struct irq_cfg *cfg = irq_cfg(irq);
245 427
246 spin_lock_irqsave(&ioapic_lock, flags); 428 spin_lock_irqsave(&ioapic_lock, flags);
247 entry = cfg->irq_2_pin; 429 entry = cfg->irq_2_pin;
@@ -323,13 +505,32 @@ static void ioapic_mask_entry(int apic, int pin)
323} 505}
324 506
325#ifdef CONFIG_SMP 507#ifdef CONFIG_SMP
326static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector) 508static void send_cleanup_vector(struct irq_cfg *cfg)
509{
510 cpumask_var_t cleanup_mask;
511
512 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
513 unsigned int i;
514 cfg->move_cleanup_count = 0;
515 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
516 cfg->move_cleanup_count++;
517 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
518 send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
519 } else {
520 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
521 cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
522 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
523 free_cpumask_var(cleanup_mask);
524 }
525 cfg->move_in_progress = 0;
526}
527
528static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
327{ 529{
328 int apic, pin; 530 int apic, pin;
329 struct irq_cfg *cfg;
330 struct irq_pin_list *entry; 531 struct irq_pin_list *entry;
532 u8 vector = cfg->vector;
331 533
332 cfg = irq_cfg(irq);
333 entry = cfg->irq_2_pin; 534 entry = cfg->irq_2_pin;
334 for (;;) { 535 for (;;) {
335 unsigned int reg; 536 unsigned int reg;
@@ -359,36 +560,61 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
359 } 560 }
360} 561}
361 562
362static int assign_irq_vector(int irq, cpumask_t mask); 563static int
564assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
363 565
364static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 566/*
567 * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid
568 * of that, or returns BAD_APICID and leaves desc->affinity untouched.
569 */
570static unsigned int
571set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
572{
573 struct irq_cfg *cfg;
574 unsigned int irq;
575
576 if (!cpumask_intersects(mask, cpu_online_mask))
577 return BAD_APICID;
578
579 irq = desc->irq;
580 cfg = desc->chip_data;
581 if (assign_irq_vector(irq, cfg, mask))
582 return BAD_APICID;
583
584 cpumask_and(&desc->affinity, cfg->domain, mask);
585 set_extra_move_desc(desc, mask);
586 return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
587}
588
589static void
590set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
365{ 591{
366 struct irq_cfg *cfg; 592 struct irq_cfg *cfg;
367 unsigned long flags; 593 unsigned long flags;
368 unsigned int dest; 594 unsigned int dest;
369 cpumask_t tmp; 595 unsigned int irq;
370 struct irq_desc *desc;
371 596
372 cpus_and(tmp, mask, cpu_online_map); 597 irq = desc->irq;
373 if (cpus_empty(tmp)) 598 cfg = desc->chip_data;
374 return;
375 599
376 cfg = irq_cfg(irq); 600 spin_lock_irqsave(&ioapic_lock, flags);
377 if (assign_irq_vector(irq, mask)) 601 dest = set_desc_affinity(desc, mask);
378 return; 602 if (dest != BAD_APICID) {
603 /* Only the high 8 bits are valid. */
604 dest = SET_APIC_LOGICAL_ID(dest);
605 __target_IO_APIC_irq(irq, dest, cfg);
606 }
607 spin_unlock_irqrestore(&ioapic_lock, flags);
608}
379 609
380 cpus_and(tmp, cfg->domain, mask); 610static void
381 dest = cpu_mask_to_apicid(tmp); 611set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
382 /* 612{
383 * Only the high 8 bits are valid. 613 struct irq_desc *desc;
384 */
385 dest = SET_APIC_LOGICAL_ID(dest);
386 614
387 desc = irq_to_desc(irq); 615 desc = irq_to_desc(irq);
388 spin_lock_irqsave(&ioapic_lock, flags); 616
389 __target_IO_APIC_irq(irq, dest, cfg->vector); 617 set_ioapic_affinity_irq_desc(desc, mask);
390 desc->affinity = mask;
391 spin_unlock_irqrestore(&ioapic_lock, flags);
392} 618}
393#endif /* CONFIG_SMP */ 619#endif /* CONFIG_SMP */
394 620
@@ -397,16 +623,18 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
397 * shared ISA-space IRQs, so we have to support them. We are super 623 * shared ISA-space IRQs, so we have to support them. We are super
398 * fast in the common case, and fast for shared ISA-space IRQs. 624 * fast in the common case, and fast for shared ISA-space IRQs.
399 */ 625 */
400static void add_pin_to_irq(unsigned int irq, int apic, int pin) 626static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin)
401{ 627{
402 struct irq_cfg *cfg;
403 struct irq_pin_list *entry; 628 struct irq_pin_list *entry;
404 629
405 /* first time to refer irq_cfg, so with new */
406 cfg = irq_cfg_alloc(irq);
407 entry = cfg->irq_2_pin; 630 entry = cfg->irq_2_pin;
408 if (!entry) { 631 if (!entry) {
409 entry = get_one_free_irq_2_pin(); 632 entry = get_one_free_irq_2_pin(cpu);
633 if (!entry) {
634 printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n",
635 apic, pin);
636 return;
637 }
410 cfg->irq_2_pin = entry; 638 cfg->irq_2_pin = entry;
411 entry->apic = apic; 639 entry->apic = apic;
412 entry->pin = pin; 640 entry->pin = pin;
@@ -421,7 +649,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
421 entry = entry->next; 649 entry = entry->next;
422 } 650 }
423 651
424 entry->next = get_one_free_irq_2_pin(); 652 entry->next = get_one_free_irq_2_pin(cpu);
425 entry = entry->next; 653 entry = entry->next;
426 entry->apic = apic; 654 entry->apic = apic;
427 entry->pin = pin; 655 entry->pin = pin;
@@ -430,11 +658,10 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
430/* 658/*
431 * Reroute an IRQ to a different pin. 659 * Reroute an IRQ to a different pin.
432 */ 660 */
433static void __init replace_pin_at_irq(unsigned int irq, 661static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu,
434 int oldapic, int oldpin, 662 int oldapic, int oldpin,
435 int newapic, int newpin) 663 int newapic, int newpin)
436{ 664{
437 struct irq_cfg *cfg = irq_cfg(irq);
438 struct irq_pin_list *entry = cfg->irq_2_pin; 665 struct irq_pin_list *entry = cfg->irq_2_pin;
439 int replaced = 0; 666 int replaced = 0;
440 667
@@ -451,18 +678,16 @@ static void __init replace_pin_at_irq(unsigned int irq,
451 678
452 /* why? call replace before add? */ 679 /* why? call replace before add? */
453 if (!replaced) 680 if (!replaced)
454 add_pin_to_irq(irq, newapic, newpin); 681 add_pin_to_irq_cpu(cfg, cpu, newapic, newpin);
455} 682}
456 683
457static inline void io_apic_modify_irq(unsigned int irq, 684static inline void io_apic_modify_irq(struct irq_cfg *cfg,
458 int mask_and, int mask_or, 685 int mask_and, int mask_or,
459 void (*final)(struct irq_pin_list *entry)) 686 void (*final)(struct irq_pin_list *entry))
460{ 687{
461 int pin; 688 int pin;
462 struct irq_cfg *cfg;
463 struct irq_pin_list *entry; 689 struct irq_pin_list *entry;
464 690
465 cfg = irq_cfg(irq);
466 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) { 691 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
467 unsigned int reg; 692 unsigned int reg;
468 pin = entry->pin; 693 pin = entry->pin;
@@ -475,9 +700,9 @@ static inline void io_apic_modify_irq(unsigned int irq,
475 } 700 }
476} 701}
477 702
478static void __unmask_IO_APIC_irq(unsigned int irq) 703static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
479{ 704{
480 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 0, NULL); 705 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
481} 706}
482 707
483#ifdef CONFIG_X86_64 708#ifdef CONFIG_X86_64
@@ -492,47 +717,64 @@ static void io_apic_sync(struct irq_pin_list *entry)
492 readl(&io_apic->data); 717 readl(&io_apic->data);
493} 718}
494 719
495static void __mask_IO_APIC_irq(unsigned int irq) 720static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
496{ 721{
497 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 722 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
498} 723}
499#else /* CONFIG_X86_32 */ 724#else /* CONFIG_X86_32 */
500static void __mask_IO_APIC_irq(unsigned int irq) 725static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
501{ 726{
502 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, NULL); 727 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL);
503} 728}
504 729
505static void __mask_and_edge_IO_APIC_irq(unsigned int irq) 730static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
506{ 731{
507 io_apic_modify_irq(irq, ~IO_APIC_REDIR_LEVEL_TRIGGER, 732 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER,
508 IO_APIC_REDIR_MASKED, NULL); 733 IO_APIC_REDIR_MASKED, NULL);
509} 734}
510 735
511static void __unmask_and_level_IO_APIC_irq(unsigned int irq) 736static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
512{ 737{
513 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 738 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
514 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 739 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
515} 740}
516#endif /* CONFIG_X86_32 */ 741#endif /* CONFIG_X86_32 */
517 742
518static void mask_IO_APIC_irq (unsigned int irq) 743static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
519{ 744{
745 struct irq_cfg *cfg = desc->chip_data;
520 unsigned long flags; 746 unsigned long flags;
521 747
748 BUG_ON(!cfg);
749
522 spin_lock_irqsave(&ioapic_lock, flags); 750 spin_lock_irqsave(&ioapic_lock, flags);
523 __mask_IO_APIC_irq(irq); 751 __mask_IO_APIC_irq(cfg);
524 spin_unlock_irqrestore(&ioapic_lock, flags); 752 spin_unlock_irqrestore(&ioapic_lock, flags);
525} 753}
526 754
527static void unmask_IO_APIC_irq (unsigned int irq) 755static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
528{ 756{
757 struct irq_cfg *cfg = desc->chip_data;
529 unsigned long flags; 758 unsigned long flags;
530 759
531 spin_lock_irqsave(&ioapic_lock, flags); 760 spin_lock_irqsave(&ioapic_lock, flags);
532 __unmask_IO_APIC_irq(irq); 761 __unmask_IO_APIC_irq(cfg);
533 spin_unlock_irqrestore(&ioapic_lock, flags); 762 spin_unlock_irqrestore(&ioapic_lock, flags);
534} 763}
535 764
765static void mask_IO_APIC_irq(unsigned int irq)
766{
767 struct irq_desc *desc = irq_to_desc(irq);
768
769 mask_IO_APIC_irq_desc(desc);
770}
771static void unmask_IO_APIC_irq(unsigned int irq)
772{
773 struct irq_desc *desc = irq_to_desc(irq);
774
775 unmask_IO_APIC_irq_desc(desc);
776}
777
536static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 778static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
537{ 779{
538 struct IO_APIC_route_entry entry; 780 struct IO_APIC_route_entry entry;
@@ -809,7 +1051,7 @@ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
809 */ 1051 */
810static int EISA_ELCR(unsigned int irq) 1052static int EISA_ELCR(unsigned int irq)
811{ 1053{
812 if (irq < 16) { 1054 if (irq < NR_IRQS_LEGACY) {
813 unsigned int port = 0x4d0 + (irq >> 3); 1055 unsigned int port = 0x4d0 + (irq >> 3);
814 return (inb(port) >> (irq & 7)) & 1; 1056 return (inb(port) >> (irq & 7)) & 1;
815 } 1057 }
@@ -1034,7 +1276,8 @@ void unlock_vector_lock(void)
1034 spin_unlock(&vector_lock); 1276 spin_unlock(&vector_lock);
1035} 1277}
1036 1278
1037static int __assign_irq_vector(int irq, cpumask_t mask) 1279static int
1280__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1038{ 1281{
1039 /* 1282 /*
1040 * NOTE! The local APIC isn't very good at handling 1283 * NOTE! The local APIC isn't very good at handling
@@ -1049,52 +1292,49 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
1049 */ 1292 */
1050 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1293 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1051 unsigned int old_vector; 1294 unsigned int old_vector;
1052 int cpu; 1295 int cpu, err;
1053 struct irq_cfg *cfg; 1296 cpumask_var_t tmp_mask;
1054
1055 cfg = irq_cfg(irq);
1056
1057 /* Only try and allocate irqs on cpus that are present */
1058 cpus_and(mask, mask, cpu_online_map);
1059 1297
1060 if ((cfg->move_in_progress) || cfg->move_cleanup_count) 1298 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1061 return -EBUSY; 1299 return -EBUSY;
1062 1300
1301 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1302 return -ENOMEM;
1303
1063 old_vector = cfg->vector; 1304 old_vector = cfg->vector;
1064 if (old_vector) { 1305 if (old_vector) {
1065 cpumask_t tmp; 1306 cpumask_and(tmp_mask, mask, cpu_online_mask);
1066 cpus_and(tmp, cfg->domain, mask); 1307 cpumask_and(tmp_mask, cfg->domain, tmp_mask);
1067 if (!cpus_empty(tmp)) 1308 if (!cpumask_empty(tmp_mask)) {
1309 free_cpumask_var(tmp_mask);
1068 return 0; 1310 return 0;
1311 }
1069 } 1312 }
1070 1313
1071 for_each_cpu_mask_nr(cpu, mask) { 1314 /* Only try and allocate irqs on cpus that are present */
1072 cpumask_t domain, new_mask; 1315 err = -ENOSPC;
1316 for_each_cpu_and(cpu, mask, cpu_online_mask) {
1073 int new_cpu; 1317 int new_cpu;
1074 int vector, offset; 1318 int vector, offset;
1075 1319
1076 domain = vector_allocation_domain(cpu); 1320 vector_allocation_domain(cpu, tmp_mask);
1077 cpus_and(new_mask, domain, cpu_online_map);
1078 1321
1079 vector = current_vector; 1322 vector = current_vector;
1080 offset = current_offset; 1323 offset = current_offset;
1081next: 1324next:
1082 vector += 8; 1325 vector += 8;
1083 if (vector >= first_system_vector) { 1326 if (vector >= first_system_vector) {
1084 /* If we run out of vectors on large boxen, must share them. */ 1327 /* If out of vectors on large boxen, must share them. */
1085 offset = (offset + 1) % 8; 1328 offset = (offset + 1) % 8;
1086 vector = FIRST_DEVICE_VECTOR + offset; 1329 vector = FIRST_DEVICE_VECTOR + offset;
1087 } 1330 }
1088 if (unlikely(current_vector == vector)) 1331 if (unlikely(current_vector == vector))
1089 continue; 1332 continue;
1090#ifdef CONFIG_X86_64 1333
1091 if (vector == IA32_SYSCALL_VECTOR) 1334 if (test_bit(vector, used_vectors))
1092 goto next;
1093#else
1094 if (vector == SYSCALL_VECTOR)
1095 goto next; 1335 goto next;
1096#endif 1336
1097 for_each_cpu_mask_nr(new_cpu, new_mask) 1337 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1098 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1338 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1099 goto next; 1339 goto next;
1100 /* Found one! */ 1340 /* Found one! */
@@ -1102,49 +1342,47 @@ next:
1102 current_offset = offset; 1342 current_offset = offset;
1103 if (old_vector) { 1343 if (old_vector) {
1104 cfg->move_in_progress = 1; 1344 cfg->move_in_progress = 1;
1105 cfg->old_domain = cfg->domain; 1345 cpumask_copy(cfg->old_domain, cfg->domain);
1106 } 1346 }
1107 for_each_cpu_mask_nr(new_cpu, new_mask) 1347 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1108 per_cpu(vector_irq, new_cpu)[vector] = irq; 1348 per_cpu(vector_irq, new_cpu)[vector] = irq;
1109 cfg->vector = vector; 1349 cfg->vector = vector;
1110 cfg->domain = domain; 1350 cpumask_copy(cfg->domain, tmp_mask);
1111 return 0; 1351 err = 0;
1352 break;
1112 } 1353 }
1113 return -ENOSPC; 1354 free_cpumask_var(tmp_mask);
1355 return err;
1114} 1356}
1115 1357
1116static int assign_irq_vector(int irq, cpumask_t mask) 1358static int
1359assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1117{ 1360{
1118 int err; 1361 int err;
1119 unsigned long flags; 1362 unsigned long flags;
1120 1363
1121 spin_lock_irqsave(&vector_lock, flags); 1364 spin_lock_irqsave(&vector_lock, flags);
1122 err = __assign_irq_vector(irq, mask); 1365 err = __assign_irq_vector(irq, cfg, mask);
1123 spin_unlock_irqrestore(&vector_lock, flags); 1366 spin_unlock_irqrestore(&vector_lock, flags);
1124 return err; 1367 return err;
1125} 1368}
1126 1369
1127static void __clear_irq_vector(int irq) 1370static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1128{ 1371{
1129 struct irq_cfg *cfg;
1130 cpumask_t mask;
1131 int cpu, vector; 1372 int cpu, vector;
1132 1373
1133 cfg = irq_cfg(irq);
1134 BUG_ON(!cfg->vector); 1374 BUG_ON(!cfg->vector);
1135 1375
1136 vector = cfg->vector; 1376 vector = cfg->vector;
1137 cpus_and(mask, cfg->domain, cpu_online_map); 1377 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1138 for_each_cpu_mask_nr(cpu, mask)
1139 per_cpu(vector_irq, cpu)[vector] = -1; 1378 per_cpu(vector_irq, cpu)[vector] = -1;
1140 1379
1141 cfg->vector = 0; 1380 cfg->vector = 0;
1142 cpus_clear(cfg->domain); 1381 cpumask_clear(cfg->domain);
1143 1382
1144 if (likely(!cfg->move_in_progress)) 1383 if (likely(!cfg->move_in_progress))
1145 return; 1384 return;
1146 cpus_and(mask, cfg->old_domain, cpu_online_map); 1385 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1147 for_each_cpu_mask_nr(cpu, mask) {
1148 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1386 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1149 vector++) { 1387 vector++) {
1150 if (per_cpu(vector_irq, cpu)[vector] != irq) 1388 if (per_cpu(vector_irq, cpu)[vector] != irq)
@@ -1162,10 +1400,12 @@ void __setup_vector_irq(int cpu)
1162 /* This function must be called with vector_lock held */ 1400 /* This function must be called with vector_lock held */
1163 int irq, vector; 1401 int irq, vector;
1164 struct irq_cfg *cfg; 1402 struct irq_cfg *cfg;
1403 struct irq_desc *desc;
1165 1404
1166 /* Mark the inuse vectors */ 1405 /* Mark the inuse vectors */
1167 for_each_irq_cfg(irq, cfg) { 1406 for_each_irq_desc(irq, desc) {
1168 if (!cpu_isset(cpu, cfg->domain)) 1407 cfg = desc->chip_data;
1408 if (!cpumask_test_cpu(cpu, cfg->domain))
1169 continue; 1409 continue;
1170 vector = cfg->vector; 1410 vector = cfg->vector;
1171 per_cpu(vector_irq, cpu)[vector] = irq; 1411 per_cpu(vector_irq, cpu)[vector] = irq;
@@ -1177,7 +1417,7 @@ void __setup_vector_irq(int cpu)
1177 continue; 1417 continue;
1178 1418
1179 cfg = irq_cfg(irq); 1419 cfg = irq_cfg(irq);
1180 if (!cpu_isset(cpu, cfg->domain)) 1420 if (!cpumask_test_cpu(cpu, cfg->domain))
1181 per_cpu(vector_irq, cpu)[vector] = -1; 1421 per_cpu(vector_irq, cpu)[vector] = -1;
1182 } 1422 }
1183} 1423}
@@ -1215,11 +1455,8 @@ static inline int IO_APIC_irq_trigger(int irq)
1215} 1455}
1216#endif 1456#endif
1217 1457
1218static void ioapic_register_intr(int irq, unsigned long trigger) 1458static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
1219{ 1459{
1220 struct irq_desc *desc;
1221
1222 desc = irq_to_desc(irq);
1223 1460
1224 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1461 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1225 trigger == IOAPIC_LEVEL) 1462 trigger == IOAPIC_LEVEL)
@@ -1311,23 +1548,22 @@ static int setup_ioapic_entry(int apic, int irq,
1311 return 0; 1548 return 0;
1312} 1549}
1313 1550
1314static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, 1551static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc,
1315 int trigger, int polarity) 1552 int trigger, int polarity)
1316{ 1553{
1317 struct irq_cfg *cfg; 1554 struct irq_cfg *cfg;
1318 struct IO_APIC_route_entry entry; 1555 struct IO_APIC_route_entry entry;
1319 cpumask_t mask; 1556 unsigned int dest;
1320 1557
1321 if (!IO_APIC_IRQ(irq)) 1558 if (!IO_APIC_IRQ(irq))
1322 return; 1559 return;
1323 1560
1324 cfg = irq_cfg(irq); 1561 cfg = desc->chip_data;
1325 1562
1326 mask = TARGET_CPUS; 1563 if (assign_irq_vector(irq, cfg, TARGET_CPUS))
1327 if (assign_irq_vector(irq, mask))
1328 return; 1564 return;
1329 1565
1330 cpus_and(mask, cfg->domain, mask); 1566 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
1331 1567
1332 apic_printk(APIC_VERBOSE,KERN_DEBUG 1568 apic_printk(APIC_VERBOSE,KERN_DEBUG
1333 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1569 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
@@ -1337,16 +1573,15 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1337 1573
1338 1574
1339 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, 1575 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1340 cpu_mask_to_apicid(mask), trigger, polarity, 1576 dest, trigger, polarity, cfg->vector)) {
1341 cfg->vector)) {
1342 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1577 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1343 mp_ioapics[apic].mp_apicid, pin); 1578 mp_ioapics[apic].mp_apicid, pin);
1344 __clear_irq_vector(irq); 1579 __clear_irq_vector(irq, cfg);
1345 return; 1580 return;
1346 } 1581 }
1347 1582
1348 ioapic_register_intr(irq, trigger); 1583 ioapic_register_intr(irq, desc, trigger);
1349 if (irq < 16) 1584 if (irq < NR_IRQS_LEGACY)
1350 disable_8259A_irq(irq); 1585 disable_8259A_irq(irq);
1351 1586
1352 ioapic_write_entry(apic, pin, entry); 1587 ioapic_write_entry(apic, pin, entry);
@@ -1356,6 +1591,9 @@ static void __init setup_IO_APIC_irqs(void)
1356{ 1591{
1357 int apic, pin, idx, irq; 1592 int apic, pin, idx, irq;
1358 int notcon = 0; 1593 int notcon = 0;
1594 struct irq_desc *desc;
1595 struct irq_cfg *cfg;
1596 int cpu = boot_cpu_id;
1359 1597
1360 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1598 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1361 1599
@@ -1387,9 +1625,15 @@ static void __init setup_IO_APIC_irqs(void)
1387 if (multi_timer_check(apic, irq)) 1625 if (multi_timer_check(apic, irq))
1388 continue; 1626 continue;
1389#endif 1627#endif
1390 add_pin_to_irq(irq, apic, pin); 1628 desc = irq_to_desc_alloc_cpu(irq, cpu);
1629 if (!desc) {
1630 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1631 continue;
1632 }
1633 cfg = desc->chip_data;
1634 add_pin_to_irq_cpu(cfg, cpu, apic, pin);
1391 1635
1392 setup_IO_APIC_irq(apic, pin, irq, 1636 setup_IO_APIC_irq(apic, pin, irq, desc,
1393 irq_trigger(idx), irq_polarity(idx)); 1637 irq_trigger(idx), irq_polarity(idx));
1394 } 1638 }
1395 } 1639 }
@@ -1448,6 +1692,7 @@ __apicdebuginit(void) print_IO_APIC(void)
1448 union IO_APIC_reg_03 reg_03; 1692 union IO_APIC_reg_03 reg_03;
1449 unsigned long flags; 1693 unsigned long flags;
1450 struct irq_cfg *cfg; 1694 struct irq_cfg *cfg;
1695 struct irq_desc *desc;
1451 unsigned int irq; 1696 unsigned int irq;
1452 1697
1453 if (apic_verbosity == APIC_QUIET) 1698 if (apic_verbosity == APIC_QUIET)
@@ -1537,8 +1782,11 @@ __apicdebuginit(void) print_IO_APIC(void)
1537 } 1782 }
1538 } 1783 }
1539 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1784 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1540 for_each_irq_cfg(irq, cfg) { 1785 for_each_irq_desc(irq, desc) {
1541 struct irq_pin_list *entry = cfg->irq_2_pin; 1786 struct irq_pin_list *entry;
1787
1788 cfg = desc->chip_data;
1789 entry = cfg->irq_2_pin;
1542 if (!entry) 1790 if (!entry)
1543 continue; 1791 continue;
1544 printk(KERN_DEBUG "IRQ%d ", irq); 1792 printk(KERN_DEBUG "IRQ%d ", irq);
@@ -2022,14 +2270,16 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2022{ 2270{
2023 int was_pending = 0; 2271 int was_pending = 0;
2024 unsigned long flags; 2272 unsigned long flags;
2273 struct irq_cfg *cfg;
2025 2274
2026 spin_lock_irqsave(&ioapic_lock, flags); 2275 spin_lock_irqsave(&ioapic_lock, flags);
2027 if (irq < 16) { 2276 if (irq < NR_IRQS_LEGACY) {
2028 disable_8259A_irq(irq); 2277 disable_8259A_irq(irq);
2029 if (i8259A_irq_pending(irq)) 2278 if (i8259A_irq_pending(irq))
2030 was_pending = 1; 2279 was_pending = 1;
2031 } 2280 }
2032 __unmask_IO_APIC_irq(irq); 2281 cfg = irq_cfg(irq);
2282 __unmask_IO_APIC_irq(cfg);
2033 spin_unlock_irqrestore(&ioapic_lock, flags); 2283 spin_unlock_irqrestore(&ioapic_lock, flags);
2034 2284
2035 return was_pending; 2285 return was_pending;
@@ -2043,7 +2293,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
2043 unsigned long flags; 2293 unsigned long flags;
2044 2294
2045 spin_lock_irqsave(&vector_lock, flags); 2295 spin_lock_irqsave(&vector_lock, flags);
2046 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); 2296 send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2047 spin_unlock_irqrestore(&vector_lock, flags); 2297 spin_unlock_irqrestore(&vector_lock, flags);
2048 2298
2049 return 1; 2299 return 1;
@@ -2092,35 +2342,35 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2092 * as simple as edge triggered migration and we can do the irq migration 2342 * as simple as edge triggered migration and we can do the irq migration
2093 * with a simple atomic update to IO-APIC RTE. 2343 * with a simple atomic update to IO-APIC RTE.
2094 */ 2344 */
2095static void migrate_ioapic_irq(int irq, cpumask_t mask) 2345static void
2346migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2096{ 2347{
2097 struct irq_cfg *cfg; 2348 struct irq_cfg *cfg;
2098 struct irq_desc *desc;
2099 cpumask_t tmp, cleanup_mask;
2100 struct irte irte; 2349 struct irte irte;
2101 int modify_ioapic_rte; 2350 int modify_ioapic_rte;
2102 unsigned int dest; 2351 unsigned int dest;
2103 unsigned long flags; 2352 unsigned long flags;
2353 unsigned int irq;
2104 2354
2105 cpus_and(tmp, mask, cpu_online_map); 2355 if (!cpumask_intersects(mask, cpu_online_mask))
2106 if (cpus_empty(tmp))
2107 return; 2356 return;
2108 2357
2358 irq = desc->irq;
2109 if (get_irte(irq, &irte)) 2359 if (get_irte(irq, &irte))
2110 return; 2360 return;
2111 2361
2112 if (assign_irq_vector(irq, mask)) 2362 cfg = desc->chip_data;
2363 if (assign_irq_vector(irq, cfg, mask))
2113 return; 2364 return;
2114 2365
2115 cfg = irq_cfg(irq); 2366 set_extra_move_desc(desc, mask);
2116 cpus_and(tmp, cfg->domain, mask); 2367
2117 dest = cpu_mask_to_apicid(tmp); 2368 dest = cpu_mask_to_apicid_and(cfg->domain, mask);
2118 2369
2119 desc = irq_to_desc(irq);
2120 modify_ioapic_rte = desc->status & IRQ_LEVEL; 2370 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2121 if (modify_ioapic_rte) { 2371 if (modify_ioapic_rte) {
2122 spin_lock_irqsave(&ioapic_lock, flags); 2372 spin_lock_irqsave(&ioapic_lock, flags);
2123 __target_IO_APIC_irq(irq, dest, cfg->vector); 2373 __target_IO_APIC_irq(irq, dest, cfg);
2124 spin_unlock_irqrestore(&ioapic_lock, flags); 2374 spin_unlock_irqrestore(&ioapic_lock, flags);
2125 } 2375 }
2126 2376
@@ -2132,24 +2382,20 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask)
2132 */ 2382 */
2133 modify_irte(irq, &irte); 2383 modify_irte(irq, &irte);
2134 2384
2135 if (cfg->move_in_progress) { 2385 if (cfg->move_in_progress)
2136 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 2386 send_cleanup_vector(cfg);
2137 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2138 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2139 cfg->move_in_progress = 0;
2140 }
2141 2387
2142 desc->affinity = mask; 2388 cpumask_copy(&desc->affinity, mask);
2143} 2389}
2144 2390
2145static int migrate_irq_remapped_level(int irq) 2391static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2146{ 2392{
2147 int ret = -1; 2393 int ret = -1;
2148 struct irq_desc *desc = irq_to_desc(irq); 2394 struct irq_cfg *cfg = desc->chip_data;
2149 2395
2150 mask_IO_APIC_irq(irq); 2396 mask_IO_APIC_irq_desc(desc);
2151 2397
2152 if (io_apic_level_ack_pending(irq)) { 2398 if (io_apic_level_ack_pending(cfg)) {
2153 /* 2399 /*
2154 * Interrupt in progress. Migrating irq now will change the 2400 * Interrupt in progress. Migrating irq now will change the
2155 * vector information in the IO-APIC RTE and that will confuse 2401 * vector information in the IO-APIC RTE and that will confuse
@@ -2161,14 +2407,15 @@ static int migrate_irq_remapped_level(int irq)
2161 } 2407 }
2162 2408
2163 /* everthing is clear. we have right of way */ 2409 /* everthing is clear. we have right of way */
2164 migrate_ioapic_irq(irq, desc->pending_mask); 2410 migrate_ioapic_irq_desc(desc, &desc->pending_mask);
2165 2411
2166 ret = 0; 2412 ret = 0;
2167 desc->status &= ~IRQ_MOVE_PENDING; 2413 desc->status &= ~IRQ_MOVE_PENDING;
2168 cpus_clear(desc->pending_mask); 2414 cpumask_clear(&desc->pending_mask);
2169 2415
2170unmask: 2416unmask:
2171 unmask_IO_APIC_irq(irq); 2417 unmask_IO_APIC_irq_desc(desc);
2418
2172 return ret; 2419 return ret;
2173} 2420}
2174 2421
@@ -2189,7 +2436,7 @@ static void ir_irq_migration(struct work_struct *work)
2189 continue; 2436 continue;
2190 } 2437 }
2191 2438
2192 desc->chip->set_affinity(irq, desc->pending_mask); 2439 desc->chip->set_affinity(irq, &desc->pending_mask);
2193 spin_unlock_irqrestore(&desc->lock, flags); 2440 spin_unlock_irqrestore(&desc->lock, flags);
2194 } 2441 }
2195 } 2442 }
@@ -2198,18 +2445,24 @@ static void ir_irq_migration(struct work_struct *work)
2198/* 2445/*
2199 * Migrates the IRQ destination in the process context. 2446 * Migrates the IRQ destination in the process context.
2200 */ 2447 */
2201static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 2448static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2449 const struct cpumask *mask)
2202{ 2450{
2203 struct irq_desc *desc = irq_to_desc(irq);
2204
2205 if (desc->status & IRQ_LEVEL) { 2451 if (desc->status & IRQ_LEVEL) {
2206 desc->status |= IRQ_MOVE_PENDING; 2452 desc->status |= IRQ_MOVE_PENDING;
2207 desc->pending_mask = mask; 2453 cpumask_copy(&desc->pending_mask, mask);
2208 migrate_irq_remapped_level(irq); 2454 migrate_irq_remapped_level_desc(desc);
2209 return; 2455 return;
2210 } 2456 }
2211 2457
2212 migrate_ioapic_irq(irq, mask); 2458 migrate_ioapic_irq_desc(desc, mask);
2459}
2460static void set_ir_ioapic_affinity_irq(unsigned int irq,
2461 const struct cpumask *mask)
2462{
2463 struct irq_desc *desc = irq_to_desc(irq);
2464
2465 set_ir_ioapic_affinity_irq_desc(desc, mask);
2213} 2466}
2214#endif 2467#endif
2215 2468
@@ -2228,6 +2481,9 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2228 struct irq_cfg *cfg; 2481 struct irq_cfg *cfg;
2229 irq = __get_cpu_var(vector_irq)[vector]; 2482 irq = __get_cpu_var(vector_irq)[vector];
2230 2483
2484 if (irq == -1)
2485 continue;
2486
2231 desc = irq_to_desc(irq); 2487 desc = irq_to_desc(irq);
2232 if (!desc) 2488 if (!desc)
2233 continue; 2489 continue;
@@ -2237,7 +2493,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2237 if (!cfg->move_cleanup_count) 2493 if (!cfg->move_cleanup_count)
2238 goto unlock; 2494 goto unlock;
2239 2495
2240 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) 2496 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2241 goto unlock; 2497 goto unlock;
2242 2498
2243 __get_cpu_var(vector_irq)[vector] = -1; 2499 __get_cpu_var(vector_irq)[vector] = -1;
@@ -2249,28 +2505,44 @@ unlock:
2249 irq_exit(); 2505 irq_exit();
2250} 2506}
2251 2507
2252static void irq_complete_move(unsigned int irq) 2508static void irq_complete_move(struct irq_desc **descp)
2253{ 2509{
2254 struct irq_cfg *cfg = irq_cfg(irq); 2510 struct irq_desc *desc = *descp;
2511 struct irq_cfg *cfg = desc->chip_data;
2255 unsigned vector, me; 2512 unsigned vector, me;
2256 2513
2257 if (likely(!cfg->move_in_progress)) 2514 if (likely(!cfg->move_in_progress)) {
2515#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2516 if (likely(!cfg->move_desc_pending))
2517 return;
2518
2519 /* domain has not changed, but affinity did */
2520 me = smp_processor_id();
2521 if (cpu_isset(me, desc->affinity)) {
2522 *descp = desc = move_irq_desc(desc, me);
2523 /* get the new one */
2524 cfg = desc->chip_data;
2525 cfg->move_desc_pending = 0;
2526 }
2527#endif
2258 return; 2528 return;
2529 }
2259 2530
2260 vector = ~get_irq_regs()->orig_ax; 2531 vector = ~get_irq_regs()->orig_ax;
2261 me = smp_processor_id(); 2532 me = smp_processor_id();
2262 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) { 2533#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2263 cpumask_t cleanup_mask; 2534 *descp = desc = move_irq_desc(desc, me);
2535 /* get the new one */
2536 cfg = desc->chip_data;
2537#endif
2264 2538
2265 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 2539 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2266 cfg->move_cleanup_count = cpus_weight(cleanup_mask); 2540 send_cleanup_vector(cfg);
2267 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2268 cfg->move_in_progress = 0;
2269 }
2270} 2541}
2271#else 2542#else
2272static inline void irq_complete_move(unsigned int irq) {} 2543static inline void irq_complete_move(struct irq_desc **descp) {}
2273#endif 2544#endif
2545
2274#ifdef CONFIG_INTR_REMAP 2546#ifdef CONFIG_INTR_REMAP
2275static void ack_x2apic_level(unsigned int irq) 2547static void ack_x2apic_level(unsigned int irq)
2276{ 2548{
@@ -2281,11 +2553,14 @@ static void ack_x2apic_edge(unsigned int irq)
2281{ 2553{
2282 ack_x2APIC_irq(); 2554 ack_x2APIC_irq();
2283} 2555}
2556
2284#endif 2557#endif
2285 2558
2286static void ack_apic_edge(unsigned int irq) 2559static void ack_apic_edge(unsigned int irq)
2287{ 2560{
2288 irq_complete_move(irq); 2561 struct irq_desc *desc = irq_to_desc(irq);
2562
2563 irq_complete_move(&desc);
2289 move_native_irq(irq); 2564 move_native_irq(irq);
2290 ack_APIC_irq(); 2565 ack_APIC_irq();
2291} 2566}
@@ -2294,18 +2569,21 @@ atomic_t irq_mis_count;
2294 2569
2295static void ack_apic_level(unsigned int irq) 2570static void ack_apic_level(unsigned int irq)
2296{ 2571{
2572 struct irq_desc *desc = irq_to_desc(irq);
2573
2297#ifdef CONFIG_X86_32 2574#ifdef CONFIG_X86_32
2298 unsigned long v; 2575 unsigned long v;
2299 int i; 2576 int i;
2300#endif 2577#endif
2578 struct irq_cfg *cfg;
2301 int do_unmask_irq = 0; 2579 int do_unmask_irq = 0;
2302 2580
2303 irq_complete_move(irq); 2581 irq_complete_move(&desc);
2304#ifdef CONFIG_GENERIC_PENDING_IRQ 2582#ifdef CONFIG_GENERIC_PENDING_IRQ
2305 /* If we are moving the irq we need to mask it */ 2583 /* If we are moving the irq we need to mask it */
2306 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { 2584 if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2307 do_unmask_irq = 1; 2585 do_unmask_irq = 1;
2308 mask_IO_APIC_irq(irq); 2586 mask_IO_APIC_irq_desc(desc);
2309 } 2587 }
2310#endif 2588#endif
2311 2589
@@ -2329,7 +2607,8 @@ static void ack_apic_level(unsigned int irq)
2329 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2607 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2330 * The idea is from Manfred Spraul. --macro 2608 * The idea is from Manfred Spraul. --macro
2331 */ 2609 */
2332 i = irq_cfg(irq)->vector; 2610 cfg = desc->chip_data;
2611 i = cfg->vector;
2333 2612
2334 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2613 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2335#endif 2614#endif
@@ -2368,17 +2647,18 @@ static void ack_apic_level(unsigned int irq)
2368 * accurate and is causing problems then it is a hardware bug 2647 * accurate and is causing problems then it is a hardware bug
2369 * and you can go talk to the chipset vendor about it. 2648 * and you can go talk to the chipset vendor about it.
2370 */ 2649 */
2371 if (!io_apic_level_ack_pending(irq)) 2650 cfg = desc->chip_data;
2651 if (!io_apic_level_ack_pending(cfg))
2372 move_masked_irq(irq); 2652 move_masked_irq(irq);
2373 unmask_IO_APIC_irq(irq); 2653 unmask_IO_APIC_irq_desc(desc);
2374 } 2654 }
2375 2655
2376#ifdef CONFIG_X86_32 2656#ifdef CONFIG_X86_32
2377 if (!(v & (1 << (i & 0x1f)))) { 2657 if (!(v & (1 << (i & 0x1f)))) {
2378 atomic_inc(&irq_mis_count); 2658 atomic_inc(&irq_mis_count);
2379 spin_lock(&ioapic_lock); 2659 spin_lock(&ioapic_lock);
2380 __mask_and_edge_IO_APIC_irq(irq); 2660 __mask_and_edge_IO_APIC_irq(cfg);
2381 __unmask_and_level_IO_APIC_irq(irq); 2661 __unmask_and_level_IO_APIC_irq(cfg);
2382 spin_unlock(&ioapic_lock); 2662 spin_unlock(&ioapic_lock);
2383 } 2663 }
2384#endif 2664#endif
@@ -2429,20 +2709,19 @@ static inline void init_IO_APIC_traps(void)
2429 * Also, we've got to be careful not to trash gate 2709 * Also, we've got to be careful not to trash gate
2430 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2710 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2431 */ 2711 */
2432 for_each_irq_cfg(irq, cfg) { 2712 for_each_irq_desc(irq, desc) {
2433 if (IO_APIC_IRQ(irq) && !cfg->vector) { 2713 cfg = desc->chip_data;
2714 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2434 /* 2715 /*
2435 * Hmm.. We don't have an entry for this, 2716 * Hmm.. We don't have an entry for this,
2436 * so default to an old-fashioned 8259 2717 * so default to an old-fashioned 8259
2437 * interrupt if we can.. 2718 * interrupt if we can..
2438 */ 2719 */
2439 if (irq < 16) 2720 if (irq < NR_IRQS_LEGACY)
2440 make_8259A_irq(irq); 2721 make_8259A_irq(irq);
2441 else { 2722 else
2442 desc = irq_to_desc(irq);
2443 /* Strange. Oh, well.. */ 2723 /* Strange. Oh, well.. */
2444 desc->chip = &no_irq_chip; 2724 desc->chip = &no_irq_chip;
2445 }
2446 } 2725 }
2447 } 2726 }
2448} 2727}
@@ -2467,7 +2746,7 @@ static void unmask_lapic_irq(unsigned int irq)
2467 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2746 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2468} 2747}
2469 2748
2470static void ack_lapic_irq (unsigned int irq) 2749static void ack_lapic_irq(unsigned int irq)
2471{ 2750{
2472 ack_APIC_irq(); 2751 ack_APIC_irq();
2473} 2752}
@@ -2479,11 +2758,8 @@ static struct irq_chip lapic_chip __read_mostly = {
2479 .ack = ack_lapic_irq, 2758 .ack = ack_lapic_irq,
2480}; 2759};
2481 2760
2482static void lapic_register_intr(int irq) 2761static void lapic_register_intr(int irq, struct irq_desc *desc)
2483{ 2762{
2484 struct irq_desc *desc;
2485
2486 desc = irq_to_desc(irq);
2487 desc->status &= ~IRQ_LEVEL; 2763 desc->status &= ~IRQ_LEVEL;
2488 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2764 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2489 "edge"); 2765 "edge");
@@ -2587,7 +2863,9 @@ int timer_through_8259 __initdata;
2587 */ 2863 */
2588static inline void __init check_timer(void) 2864static inline void __init check_timer(void)
2589{ 2865{
2590 struct irq_cfg *cfg = irq_cfg(0); 2866 struct irq_desc *desc = irq_to_desc(0);
2867 struct irq_cfg *cfg = desc->chip_data;
2868 int cpu = boot_cpu_id;
2591 int apic1, pin1, apic2, pin2; 2869 int apic1, pin1, apic2, pin2;
2592 unsigned long flags; 2870 unsigned long flags;
2593 unsigned int ver; 2871 unsigned int ver;
@@ -2602,7 +2880,7 @@ static inline void __init check_timer(void)
2602 * get/set the timer IRQ vector: 2880 * get/set the timer IRQ vector:
2603 */ 2881 */
2604 disable_8259A_irq(0); 2882 disable_8259A_irq(0);
2605 assign_irq_vector(0, TARGET_CPUS); 2883 assign_irq_vector(0, cfg, TARGET_CPUS);
2606 2884
2607 /* 2885 /*
2608 * As IRQ0 is to be enabled in the 8259A, the virtual 2886 * As IRQ0 is to be enabled in the 8259A, the virtual
@@ -2653,10 +2931,10 @@ static inline void __init check_timer(void)
2653 * Ok, does IRQ0 through the IOAPIC work? 2931 * Ok, does IRQ0 through the IOAPIC work?
2654 */ 2932 */
2655 if (no_pin1) { 2933 if (no_pin1) {
2656 add_pin_to_irq(0, apic1, pin1); 2934 add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
2657 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2935 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2658 } 2936 }
2659 unmask_IO_APIC_irq(0); 2937 unmask_IO_APIC_irq_desc(desc);
2660 if (timer_irq_works()) { 2938 if (timer_irq_works()) {
2661 if (nmi_watchdog == NMI_IO_APIC) { 2939 if (nmi_watchdog == NMI_IO_APIC) {
2662 setup_nmi(); 2940 setup_nmi();
@@ -2682,9 +2960,9 @@ static inline void __init check_timer(void)
2682 /* 2960 /*
2683 * legacy devices should be connected to IO APIC #0 2961 * legacy devices should be connected to IO APIC #0
2684 */ 2962 */
2685 replace_pin_at_irq(0, apic1, pin1, apic2, pin2); 2963 replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
2686 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2964 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2687 unmask_IO_APIC_irq(0); 2965 unmask_IO_APIC_irq_desc(desc);
2688 enable_8259A_irq(0); 2966 enable_8259A_irq(0);
2689 if (timer_irq_works()) { 2967 if (timer_irq_works()) {
2690 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2968 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2716,7 +2994,7 @@ static inline void __init check_timer(void)
2716 apic_printk(APIC_QUIET, KERN_INFO 2994 apic_printk(APIC_QUIET, KERN_INFO
2717 "...trying to set up timer as Virtual Wire IRQ...\n"); 2995 "...trying to set up timer as Virtual Wire IRQ...\n");
2718 2996
2719 lapic_register_intr(0); 2997 lapic_register_intr(0, desc);
2720 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2998 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2721 enable_8259A_irq(0); 2999 enable_8259A_irq(0);
2722 3000
@@ -2901,22 +3179,26 @@ unsigned int create_irq_nr(unsigned int irq_want)
2901 unsigned int irq; 3179 unsigned int irq;
2902 unsigned int new; 3180 unsigned int new;
2903 unsigned long flags; 3181 unsigned long flags;
2904 struct irq_cfg *cfg_new; 3182 struct irq_cfg *cfg_new = NULL;
2905 3183 int cpu = boot_cpu_id;
2906 irq_want = nr_irqs - 1; 3184 struct irq_desc *desc_new = NULL;
2907 3185
2908 irq = 0; 3186 irq = 0;
2909 spin_lock_irqsave(&vector_lock, flags); 3187 spin_lock_irqsave(&vector_lock, flags);
2910 for (new = irq_want; new > 0; new--) { 3188 for (new = irq_want; new < NR_IRQS; new++) {
2911 if (platform_legacy_irq(new)) 3189 if (platform_legacy_irq(new))
2912 continue; 3190 continue;
2913 cfg_new = irq_cfg(new); 3191
2914 if (cfg_new && cfg_new->vector != 0) 3192 desc_new = irq_to_desc_alloc_cpu(new, cpu);
3193 if (!desc_new) {
3194 printk(KERN_INFO "can not get irq_desc for %d\n", new);
2915 continue; 3195 continue;
2916 /* check if need to create one */ 3196 }
2917 if (!cfg_new) 3197 cfg_new = desc_new->chip_data;
2918 cfg_new = irq_cfg_alloc(new); 3198
2919 if (__assign_irq_vector(new, TARGET_CPUS) == 0) 3199 if (cfg_new->vector != 0)
3200 continue;
3201 if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0)
2920 irq = new; 3202 irq = new;
2921 break; 3203 break;
2922 } 3204 }
@@ -2924,15 +3206,21 @@ unsigned int create_irq_nr(unsigned int irq_want)
2924 3206
2925 if (irq > 0) { 3207 if (irq > 0) {
2926 dynamic_irq_init(irq); 3208 dynamic_irq_init(irq);
3209 /* restore it, in case dynamic_irq_init clear it */
3210 if (desc_new)
3211 desc_new->chip_data = cfg_new;
2927 } 3212 }
2928 return irq; 3213 return irq;
2929} 3214}
2930 3215
3216static int nr_irqs_gsi = NR_IRQS_LEGACY;
2931int create_irq(void) 3217int create_irq(void)
2932{ 3218{
3219 unsigned int irq_want;
2933 int irq; 3220 int irq;
2934 3221
2935 irq = create_irq_nr(nr_irqs - 1); 3222 irq_want = nr_irqs_gsi;
3223 irq = create_irq_nr(irq_want);
2936 3224
2937 if (irq == 0) 3225 if (irq == 0)
2938 irq = -1; 3226 irq = -1;
@@ -2943,14 +3231,22 @@ int create_irq(void)
2943void destroy_irq(unsigned int irq) 3231void destroy_irq(unsigned int irq)
2944{ 3232{
2945 unsigned long flags; 3233 unsigned long flags;
3234 struct irq_cfg *cfg;
3235 struct irq_desc *desc;
2946 3236
3237 /* store it, in case dynamic_irq_cleanup clear it */
3238 desc = irq_to_desc(irq);
3239 cfg = desc->chip_data;
2947 dynamic_irq_cleanup(irq); 3240 dynamic_irq_cleanup(irq);
3241 /* connect back irq_cfg */
3242 if (desc)
3243 desc->chip_data = cfg;
2948 3244
2949#ifdef CONFIG_INTR_REMAP 3245#ifdef CONFIG_INTR_REMAP
2950 free_irte(irq); 3246 free_irte(irq);
2951#endif 3247#endif
2952 spin_lock_irqsave(&vector_lock, flags); 3248 spin_lock_irqsave(&vector_lock, flags);
2953 __clear_irq_vector(irq); 3249 __clear_irq_vector(irq, cfg);
2954 spin_unlock_irqrestore(&vector_lock, flags); 3250 spin_unlock_irqrestore(&vector_lock, flags);
2955} 3251}
2956 3252
@@ -2963,16 +3259,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
2963 struct irq_cfg *cfg; 3259 struct irq_cfg *cfg;
2964 int err; 3260 int err;
2965 unsigned dest; 3261 unsigned dest;
2966 cpumask_t tmp;
2967 3262
2968 tmp = TARGET_CPUS; 3263 cfg = irq_cfg(irq);
2969 err = assign_irq_vector(irq, tmp); 3264 err = assign_irq_vector(irq, cfg, TARGET_CPUS);
2970 if (err) 3265 if (err)
2971 return err; 3266 return err;
2972 3267
2973 cfg = irq_cfg(irq); 3268 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
2974 cpus_and(tmp, cfg->domain, tmp);
2975 dest = cpu_mask_to_apicid(tmp);
2976 3269
2977#ifdef CONFIG_INTR_REMAP 3270#ifdef CONFIG_INTR_REMAP
2978 if (irq_remapped(irq)) { 3271 if (irq_remapped(irq)) {
@@ -3026,64 +3319,48 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3026} 3319}
3027 3320
3028#ifdef CONFIG_SMP 3321#ifdef CONFIG_SMP
3029static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3322static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3030{ 3323{
3324 struct irq_desc *desc = irq_to_desc(irq);
3031 struct irq_cfg *cfg; 3325 struct irq_cfg *cfg;
3032 struct msi_msg msg; 3326 struct msi_msg msg;
3033 unsigned int dest; 3327 unsigned int dest;
3034 cpumask_t tmp;
3035 struct irq_desc *desc;
3036 3328
3037 cpus_and(tmp, mask, cpu_online_map); 3329 dest = set_desc_affinity(desc, mask);
3038 if (cpus_empty(tmp)) 3330 if (dest == BAD_APICID)
3039 return; 3331 return;
3040 3332
3041 if (assign_irq_vector(irq, mask)) 3333 cfg = desc->chip_data;
3042 return;
3043 3334
3044 cfg = irq_cfg(irq); 3335 read_msi_msg_desc(desc, &msg);
3045 cpus_and(tmp, cfg->domain, mask);
3046 dest = cpu_mask_to_apicid(tmp);
3047
3048 read_msi_msg(irq, &msg);
3049 3336
3050 msg.data &= ~MSI_DATA_VECTOR_MASK; 3337 msg.data &= ~MSI_DATA_VECTOR_MASK;
3051 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3338 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3052 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3339 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3053 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3340 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3054 3341
3055 write_msi_msg(irq, &msg); 3342 write_msi_msg_desc(desc, &msg);
3056 desc = irq_to_desc(irq);
3057 desc->affinity = mask;
3058} 3343}
3059
3060#ifdef CONFIG_INTR_REMAP 3344#ifdef CONFIG_INTR_REMAP
3061/* 3345/*
3062 * Migrate the MSI irq to another cpumask. This migration is 3346 * Migrate the MSI irq to another cpumask. This migration is
3063 * done in the process context using interrupt-remapping hardware. 3347 * done in the process context using interrupt-remapping hardware.
3064 */ 3348 */
3065static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3349static void
3350ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3066{ 3351{
3067 struct irq_cfg *cfg; 3352 struct irq_desc *desc = irq_to_desc(irq);
3353 struct irq_cfg *cfg = desc->chip_data;
3068 unsigned int dest; 3354 unsigned int dest;
3069 cpumask_t tmp, cleanup_mask;
3070 struct irte irte; 3355 struct irte irte;
3071 struct irq_desc *desc;
3072
3073 cpus_and(tmp, mask, cpu_online_map);
3074 if (cpus_empty(tmp))
3075 return;
3076 3356
3077 if (get_irte(irq, &irte)) 3357 if (get_irte(irq, &irte))
3078 return; 3358 return;
3079 3359
3080 if (assign_irq_vector(irq, mask)) 3360 dest = set_desc_affinity(desc, mask);
3361 if (dest == BAD_APICID)
3081 return; 3362 return;
3082 3363
3083 cfg = irq_cfg(irq);
3084 cpus_and(tmp, cfg->domain, mask);
3085 dest = cpu_mask_to_apicid(tmp);
3086
3087 irte.vector = cfg->vector; 3364 irte.vector = cfg->vector;
3088 irte.dest_id = IRTE_DEST(dest); 3365 irte.dest_id = IRTE_DEST(dest);
3089 3366
@@ -3097,16 +3374,10 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3097 * at the new destination. So, time to cleanup the previous 3374 * at the new destination. So, time to cleanup the previous
3098 * vector allocation. 3375 * vector allocation.
3099 */ 3376 */
3100 if (cfg->move_in_progress) { 3377 if (cfg->move_in_progress)
3101 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 3378 send_cleanup_vector(cfg);
3102 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
3103 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
3104 cfg->move_in_progress = 0;
3105 }
3106
3107 desc = irq_to_desc(irq);
3108 desc->affinity = mask;
3109} 3379}
3380
3110#endif 3381#endif
3111#endif /* CONFIG_SMP */ 3382#endif /* CONFIG_SMP */
3112 3383
@@ -3165,7 +3436,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3165} 3436}
3166#endif 3437#endif
3167 3438
3168static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq) 3439static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3169{ 3440{
3170 int ret; 3441 int ret;
3171 struct msi_msg msg; 3442 struct msi_msg msg;
@@ -3174,7 +3445,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
3174 if (ret < 0) 3445 if (ret < 0)
3175 return ret; 3446 return ret;
3176 3447
3177 set_irq_msi(irq, desc); 3448 set_irq_msi(irq, msidesc);
3178 write_msi_msg(irq, &msg); 3449 write_msi_msg(irq, &msg);
3179 3450
3180#ifdef CONFIG_INTR_REMAP 3451#ifdef CONFIG_INTR_REMAP
@@ -3194,26 +3465,13 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
3194 return 0; 3465 return 0;
3195} 3466}
3196 3467
3197static unsigned int build_irq_for_pci_dev(struct pci_dev *dev) 3468int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
3198{
3199 unsigned int irq;
3200
3201 irq = dev->bus->number;
3202 irq <<= 8;
3203 irq |= dev->devfn;
3204 irq <<= 12;
3205
3206 return irq;
3207}
3208
3209int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
3210{ 3469{
3211 unsigned int irq; 3470 unsigned int irq;
3212 int ret; 3471 int ret;
3213 unsigned int irq_want; 3472 unsigned int irq_want;
3214 3473
3215 irq_want = build_irq_for_pci_dev(dev) + 0x100; 3474 irq_want = nr_irqs_gsi;
3216
3217 irq = create_irq_nr(irq_want); 3475 irq = create_irq_nr(irq_want);
3218 if (irq == 0) 3476 if (irq == 0)
3219 return -1; 3477 return -1;
@@ -3227,7 +3485,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
3227 goto error; 3485 goto error;
3228no_ir: 3486no_ir:
3229#endif 3487#endif
3230 ret = setup_msi_irq(dev, desc, irq); 3488 ret = setup_msi_irq(dev, msidesc, irq);
3231 if (ret < 0) { 3489 if (ret < 0) {
3232 destroy_irq(irq); 3490 destroy_irq(irq);
3233 return ret; 3491 return ret;
@@ -3245,7 +3503,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3245{ 3503{
3246 unsigned int irq; 3504 unsigned int irq;
3247 int ret, sub_handle; 3505 int ret, sub_handle;
3248 struct msi_desc *desc; 3506 struct msi_desc *msidesc;
3249 unsigned int irq_want; 3507 unsigned int irq_want;
3250 3508
3251#ifdef CONFIG_INTR_REMAP 3509#ifdef CONFIG_INTR_REMAP
@@ -3253,10 +3511,11 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3253 int index = 0; 3511 int index = 0;
3254#endif 3512#endif
3255 3513
3256 irq_want = build_irq_for_pci_dev(dev) + 0x100; 3514 irq_want = nr_irqs_gsi;
3257 sub_handle = 0; 3515 sub_handle = 0;
3258 list_for_each_entry(desc, &dev->msi_list, list) { 3516 list_for_each_entry(msidesc, &dev->msi_list, list) {
3259 irq = create_irq_nr(irq_want--); 3517 irq = create_irq_nr(irq_want);
3518 irq_want++;
3260 if (irq == 0) 3519 if (irq == 0)
3261 return -1; 3520 return -1;
3262#ifdef CONFIG_INTR_REMAP 3521#ifdef CONFIG_INTR_REMAP
@@ -3288,7 +3547,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3288 } 3547 }
3289no_ir: 3548no_ir:
3290#endif 3549#endif
3291 ret = setup_msi_irq(dev, desc, irq); 3550 ret = setup_msi_irq(dev, msidesc, irq);
3292 if (ret < 0) 3551 if (ret < 0)
3293 goto error; 3552 goto error;
3294 sub_handle++; 3553 sub_handle++;
@@ -3307,24 +3566,18 @@ void arch_teardown_msi_irq(unsigned int irq)
3307 3566
3308#ifdef CONFIG_DMAR 3567#ifdef CONFIG_DMAR
3309#ifdef CONFIG_SMP 3568#ifdef CONFIG_SMP
3310static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 3569static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3311{ 3570{
3571 struct irq_desc *desc = irq_to_desc(irq);
3312 struct irq_cfg *cfg; 3572 struct irq_cfg *cfg;
3313 struct msi_msg msg; 3573 struct msi_msg msg;
3314 unsigned int dest; 3574 unsigned int dest;
3315 cpumask_t tmp;
3316 struct irq_desc *desc;
3317 3575
3318 cpus_and(tmp, mask, cpu_online_map); 3576 dest = set_desc_affinity(desc, mask);
3319 if (cpus_empty(tmp)) 3577 if (dest == BAD_APICID)
3320 return; 3578 return;
3321 3579
3322 if (assign_irq_vector(irq, mask)) 3580 cfg = desc->chip_data;
3323 return;
3324
3325 cfg = irq_cfg(irq);
3326 cpus_and(tmp, cfg->domain, mask);
3327 dest = cpu_mask_to_apicid(tmp);
3328 3581
3329 dmar_msi_read(irq, &msg); 3582 dmar_msi_read(irq, &msg);
3330 3583
@@ -3334,9 +3587,8 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3334 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3587 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3335 3588
3336 dmar_msi_write(irq, &msg); 3589 dmar_msi_write(irq, &msg);
3337 desc = irq_to_desc(irq);
3338 desc->affinity = mask;
3339} 3590}
3591
3340#endif /* CONFIG_SMP */ 3592#endif /* CONFIG_SMP */
3341 3593
3342struct irq_chip dmar_msi_type = { 3594struct irq_chip dmar_msi_type = {
@@ -3368,24 +3620,18 @@ int arch_setup_dmar_msi(unsigned int irq)
3368#ifdef CONFIG_HPET_TIMER 3620#ifdef CONFIG_HPET_TIMER
3369 3621
3370#ifdef CONFIG_SMP 3622#ifdef CONFIG_SMP
3371static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) 3623static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3372{ 3624{
3625 struct irq_desc *desc = irq_to_desc(irq);
3373 struct irq_cfg *cfg; 3626 struct irq_cfg *cfg;
3374 struct irq_desc *desc;
3375 struct msi_msg msg; 3627 struct msi_msg msg;
3376 unsigned int dest; 3628 unsigned int dest;
3377 cpumask_t tmp;
3378 3629
3379 cpus_and(tmp, mask, cpu_online_map); 3630 dest = set_desc_affinity(desc, mask);
3380 if (cpus_empty(tmp)) 3631 if (dest == BAD_APICID)
3381 return; 3632 return;
3382 3633
3383 if (assign_irq_vector(irq, mask)) 3634 cfg = desc->chip_data;
3384 return;
3385
3386 cfg = irq_cfg(irq);
3387 cpus_and(tmp, cfg->domain, mask);
3388 dest = cpu_mask_to_apicid(tmp);
3389 3635
3390 hpet_msi_read(irq, &msg); 3636 hpet_msi_read(irq, &msg);
3391 3637
@@ -3395,9 +3641,8 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3395 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3641 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3396 3642
3397 hpet_msi_write(irq, &msg); 3643 hpet_msi_write(irq, &msg);
3398 desc = irq_to_desc(irq);
3399 desc->affinity = mask;
3400} 3644}
3645
3401#endif /* CONFIG_SMP */ 3646#endif /* CONFIG_SMP */
3402 3647
3403struct irq_chip hpet_msi_type = { 3648struct irq_chip hpet_msi_type = {
@@ -3450,28 +3695,21 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3450 write_ht_irq_msg(irq, &msg); 3695 write_ht_irq_msg(irq, &msg);
3451} 3696}
3452 3697
3453static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) 3698static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3454{ 3699{
3700 struct irq_desc *desc = irq_to_desc(irq);
3455 struct irq_cfg *cfg; 3701 struct irq_cfg *cfg;
3456 unsigned int dest; 3702 unsigned int dest;
3457 cpumask_t tmp;
3458 struct irq_desc *desc;
3459 3703
3460 cpus_and(tmp, mask, cpu_online_map); 3704 dest = set_desc_affinity(desc, mask);
3461 if (cpus_empty(tmp)) 3705 if (dest == BAD_APICID)
3462 return; 3706 return;
3463 3707
3464 if (assign_irq_vector(irq, mask)) 3708 cfg = desc->chip_data;
3465 return;
3466
3467 cfg = irq_cfg(irq);
3468 cpus_and(tmp, cfg->domain, mask);
3469 dest = cpu_mask_to_apicid(tmp);
3470 3709
3471 target_ht_irq(irq, dest, cfg->vector); 3710 target_ht_irq(irq, dest, cfg->vector);
3472 desc = irq_to_desc(irq);
3473 desc->affinity = mask;
3474} 3711}
3712
3475#endif 3713#endif
3476 3714
3477static struct irq_chip ht_irq_chip = { 3715static struct irq_chip ht_irq_chip = {
@@ -3489,17 +3727,14 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3489{ 3727{
3490 struct irq_cfg *cfg; 3728 struct irq_cfg *cfg;
3491 int err; 3729 int err;
3492 cpumask_t tmp;
3493 3730
3494 tmp = TARGET_CPUS; 3731 cfg = irq_cfg(irq);
3495 err = assign_irq_vector(irq, tmp); 3732 err = assign_irq_vector(irq, cfg, TARGET_CPUS);
3496 if (!err) { 3733 if (!err) {
3497 struct ht_irq_msg msg; 3734 struct ht_irq_msg msg;
3498 unsigned dest; 3735 unsigned dest;
3499 3736
3500 cfg = irq_cfg(irq); 3737 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
3501 cpus_and(tmp, cfg->domain, tmp);
3502 dest = cpu_mask_to_apicid(tmp);
3503 3738
3504 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3739 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3505 3740
@@ -3535,7 +3770,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3535int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, 3770int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3536 unsigned long mmr_offset) 3771 unsigned long mmr_offset)
3537{ 3772{
3538 const cpumask_t *eligible_cpu = get_cpu_mask(cpu); 3773 const struct cpumask *eligible_cpu = cpumask_of(cpu);
3539 struct irq_cfg *cfg; 3774 struct irq_cfg *cfg;
3540 int mmr_pnode; 3775 int mmr_pnode;
3541 unsigned long mmr_value; 3776 unsigned long mmr_value;
@@ -3543,7 +3778,9 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3543 unsigned long flags; 3778 unsigned long flags;
3544 int err; 3779 int err;
3545 3780
3546 err = assign_irq_vector(irq, *eligible_cpu); 3781 cfg = irq_cfg(irq);
3782
3783 err = assign_irq_vector(irq, cfg, eligible_cpu);
3547 if (err != 0) 3784 if (err != 0)
3548 return err; 3785 return err;
3549 3786
@@ -3552,8 +3789,6 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3552 irq_name); 3789 irq_name);
3553 spin_unlock_irqrestore(&vector_lock, flags); 3790 spin_unlock_irqrestore(&vector_lock, flags);
3554 3791
3555 cfg = irq_cfg(irq);
3556
3557 mmr_value = 0; 3792 mmr_value = 0;
3558 entry = (struct uv_IO_APIC_route_entry *)&mmr_value; 3793 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3559 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); 3794 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
@@ -3564,7 +3799,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3564 entry->polarity = 0; 3799 entry->polarity = 0;
3565 entry->trigger = 0; 3800 entry->trigger = 0;
3566 entry->mask = 0; 3801 entry->mask = 0;
3567 entry->dest = cpu_mask_to_apicid(*eligible_cpu); 3802 entry->dest = cpu_mask_to_apicid(eligible_cpu);
3568 3803
3569 mmr_pnode = uv_blade_to_pnode(mmr_blade); 3804 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3570 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 3805 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -3605,9 +3840,16 @@ int __init io_apic_get_redir_entries (int ioapic)
3605 return reg_01.bits.entries; 3840 return reg_01.bits.entries;
3606} 3841}
3607 3842
3608int __init probe_nr_irqs(void) 3843void __init probe_nr_irqs_gsi(void)
3609{ 3844{
3610 return NR_IRQS; 3845 int idx;
3846 int nr = 0;
3847
3848 for (idx = 0; idx < nr_ioapics; idx++)
3849 nr += io_apic_get_redir_entries(idx) + 1;
3850
3851 if (nr > nr_irqs_gsi)
3852 nr_irqs_gsi = nr;
3611} 3853}
3612 3854
3613/* -------------------------------------------------------------------------- 3855/* --------------------------------------------------------------------------
@@ -3706,19 +3948,31 @@ int __init io_apic_get_version(int ioapic)
3706 3948
3707int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) 3949int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
3708{ 3950{
3951 struct irq_desc *desc;
3952 struct irq_cfg *cfg;
3953 int cpu = boot_cpu_id;
3954
3709 if (!IO_APIC_IRQ(irq)) { 3955 if (!IO_APIC_IRQ(irq)) {
3710 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 3956 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3711 ioapic); 3957 ioapic);
3712 return -EINVAL; 3958 return -EINVAL;
3713 } 3959 }
3714 3960
3961 desc = irq_to_desc_alloc_cpu(irq, cpu);
3962 if (!desc) {
3963 printk(KERN_INFO "can not get irq_desc %d\n", irq);
3964 return 0;
3965 }
3966
3715 /* 3967 /*
3716 * IRQs < 16 are already in the irq_2_pin[] map 3968 * IRQs < 16 are already in the irq_2_pin[] map
3717 */ 3969 */
3718 if (irq >= 16) 3970 if (irq >= NR_IRQS_LEGACY) {
3719 add_pin_to_irq(irq, ioapic, pin); 3971 cfg = desc->chip_data;
3972 add_pin_to_irq_cpu(cfg, cpu, ioapic, pin);
3973 }
3720 3974
3721 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity); 3975 setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity);
3722 3976
3723 return 0; 3977 return 0;
3724} 3978}
@@ -3756,7 +4010,7 @@ void __init setup_ioapic_dest(void)
3756 int pin, ioapic, irq, irq_entry; 4010 int pin, ioapic, irq, irq_entry;
3757 struct irq_desc *desc; 4011 struct irq_desc *desc;
3758 struct irq_cfg *cfg; 4012 struct irq_cfg *cfg;
3759 cpumask_t mask; 4013 const struct cpumask *mask;
3760 4014
3761 if (skip_ioapic_setup == 1) 4015 if (skip_ioapic_setup == 1)
3762 return; 4016 return;
@@ -3772,9 +4026,10 @@ void __init setup_ioapic_dest(void)
3772 * when you have too many devices, because at that time only boot 4026 * when you have too many devices, because at that time only boot
3773 * cpu is online. 4027 * cpu is online.
3774 */ 4028 */
3775 cfg = irq_cfg(irq); 4029 desc = irq_to_desc(irq);
4030 cfg = desc->chip_data;
3776 if (!cfg->vector) { 4031 if (!cfg->vector) {
3777 setup_IO_APIC_irq(ioapic, pin, irq, 4032 setup_IO_APIC_irq(ioapic, pin, irq, desc,
3778 irq_trigger(irq_entry), 4033 irq_trigger(irq_entry),
3779 irq_polarity(irq_entry)); 4034 irq_polarity(irq_entry));
3780 continue; 4035 continue;
@@ -3784,19 +4039,18 @@ void __init setup_ioapic_dest(void)
3784 /* 4039 /*
3785 * Honour affinities which have been set in early boot 4040 * Honour affinities which have been set in early boot
3786 */ 4041 */
3787 desc = irq_to_desc(irq);
3788 if (desc->status & 4042 if (desc->status &
3789 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 4043 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3790 mask = desc->affinity; 4044 mask = &desc->affinity;
3791 else 4045 else
3792 mask = TARGET_CPUS; 4046 mask = TARGET_CPUS;
3793 4047
3794#ifdef CONFIG_INTR_REMAP 4048#ifdef CONFIG_INTR_REMAP
3795 if (intr_remapping_enabled) 4049 if (intr_remapping_enabled)
3796 set_ir_ioapic_affinity_irq(irq, mask); 4050 set_ir_ioapic_affinity_irq_desc(desc, mask);
3797 else 4051 else
3798#endif 4052#endif
3799 set_ioapic_affinity_irq(irq, mask); 4053 set_ioapic_affinity_irq_desc(desc, mask);
3800 } 4054 }
3801 4055
3802 } 4056 }
@@ -3845,7 +4099,6 @@ void __init ioapic_init_mappings(void)
3845 struct resource *ioapic_res; 4099 struct resource *ioapic_res;
3846 int i; 4100 int i;
3847 4101
3848 irq_2_pin_init();
3849 ioapic_res = ioapic_setup_resources(); 4102 ioapic_res = ioapic_setup_resources();
3850 for (i = 0; i < nr_ioapics; i++) { 4103 for (i = 0; i < nr_ioapics; i++) {
3851 if (smp_found_config) { 4104 if (smp_found_config) {