aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-08-19 23:50:28 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-16 10:52:53 -0400
commit497c9a195db918d3f035e8cb3021e5d4d035516e (patch)
tree73df8764a812f2d6d2bd81d6a1d671008a0d212b /arch
parent199751d715bba5b469ea22adadc68a4166bfa4f5 (diff)
x86: make 32bit support per_cpu vector
so we can merge io_apic_32.c and io_apic_64.c v2: Use cpu_online_map as target cpus for bigsmp, just like 64-bit is doing. Also remove some unused TARGET_CPUS macro. v3: need to check if desc is null in smp_irq_move_cleanup also migration needs to reset vector too, so copy __target_IO_APIC_irq from 64bit. (the duplication will go away once the two files are unified.) Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/entry_32.S2
-rw-r--r--arch/x86/kernel/io_apic_32.c719
-rw-r--r--arch/x86/kernel/irq_32.c18
-rw-r--r--arch/x86/kernel/irqinit_32.c41
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/mach-generic/bigsmp.c4
-rw-r--r--arch/x86/mach-generic/es7000.c14
-rw-r--r--arch/x86/mach-generic/numaq.c14
-rw-r--r--arch/x86/mach-generic/summit.c14
9 files changed, 547 insertions, 281 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index b21fbfaffe39..4d82171d0f9c 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -629,7 +629,7 @@ ENTRY(interrupt)
629ENTRY(irq_entries_start) 629ENTRY(irq_entries_start)
630 RING0_INT_FRAME 630 RING0_INT_FRAME
631vector=0 631vector=0
632.rept NR_IRQS 632.rept NR_VECTORS
633 ALIGN 633 ALIGN
634 .if vector 634 .if vector
635 CFI_ADJUST_CFA_OFFSET -4 635 CFI_ADJUST_CFA_OFFSET -4
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 66c0a91362a7..ea33d3c74970 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -48,6 +48,7 @@
48#include <asm/hypertransport.h> 48#include <asm/hypertransport.h>
49#include <asm/setup.h> 49#include <asm/setup.h>
50 50
51#include <mach_ipi.h>
51#include <mach_apic.h> 52#include <mach_apic.h>
52#include <mach_apicdef.h> 53#include <mach_apicdef.h>
53 54
@@ -60,7 +61,7 @@ atomic_t irq_mis_count;
60static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 61static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
61 62
62static DEFINE_SPINLOCK(ioapic_lock); 63static DEFINE_SPINLOCK(ioapic_lock);
63DEFINE_SPINLOCK(vector_lock); 64static DEFINE_SPINLOCK(vector_lock);
64 65
65int timer_through_8259 __initdata; 66int timer_through_8259 __initdata;
66 67
@@ -100,28 +101,32 @@ struct irq_cfg {
100 unsigned int irq; 101 unsigned int irq;
101 struct irq_cfg *next; 102 struct irq_cfg *next;
102 struct irq_pin_list *irq_2_pin; 103 struct irq_pin_list *irq_2_pin;
104 cpumask_t domain;
105 cpumask_t old_domain;
106 unsigned move_cleanup_count;
103 u8 vector; 107 u8 vector;
108 u8 move_in_progress : 1;
104}; 109};
105 110
106 111
107/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 112/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
108static struct irq_cfg irq_cfg_legacy[] __initdata = { 113static struct irq_cfg irq_cfg_legacy[] __initdata = {
109 [0] = { .irq = 0, .vector = IRQ0_VECTOR, }, 114 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
110 [1] = { .irq = 1, .vector = IRQ1_VECTOR, }, 115 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
111 [2] = { .irq = 2, .vector = IRQ2_VECTOR, }, 116 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
112 [3] = { .irq = 3, .vector = IRQ3_VECTOR, }, 117 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
113 [4] = { .irq = 4, .vector = IRQ4_VECTOR, }, 118 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
114 [5] = { .irq = 5, .vector = IRQ5_VECTOR, }, 119 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
115 [6] = { .irq = 6, .vector = IRQ6_VECTOR, }, 120 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
116 [7] = { .irq = 7, .vector = IRQ7_VECTOR, }, 121 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
117 [8] = { .irq = 8, .vector = IRQ8_VECTOR, }, 122 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
118 [9] = { .irq = 9, .vector = IRQ9_VECTOR, }, 123 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
119 [10] = { .irq = 10, .vector = IRQ10_VECTOR, }, 124 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
120 [11] = { .irq = 11, .vector = IRQ11_VECTOR, }, 125 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
121 [12] = { .irq = 12, .vector = IRQ12_VECTOR, }, 126 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
122 [13] = { .irq = 13, .vector = IRQ13_VECTOR, }, 127 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
123 [14] = { .irq = 14, .vector = IRQ14_VECTOR, }, 128 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
124 [15] = { .irq = 15, .vector = IRQ15_VECTOR, }, 129 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
125}; 130};
126 131
127static struct irq_cfg irq_cfg_init = { .irq = -1U, }; 132static struct irq_cfg irq_cfg_init = { .irq = -1U, };
@@ -263,6 +268,7 @@ static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
263 return cfg; 268 return cfg;
264} 269}
265 270
271static int assign_irq_vector(int irq, cpumask_t mask);
266/* 272/*
267 * Rough estimation of how many shared IRQs there are, can 273 * Rough estimation of how many shared IRQs there are, can
268 * be changed anytime. 274 * be changed anytime.
@@ -432,6 +438,65 @@ static void ioapic_mask_entry(int apic, int pin)
432 spin_unlock_irqrestore(&ioapic_lock, flags); 438 spin_unlock_irqrestore(&ioapic_lock, flags);
433} 439}
434 440
441#ifdef CONFIG_SMP
442static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
443{
444 int apic, pin;
445 struct irq_cfg *cfg;
446 struct irq_pin_list *entry;
447
448 cfg = irq_cfg(irq);
449 entry = cfg->irq_2_pin;
450 for (;;) {
451 unsigned int reg;
452
453 if (!entry)
454 break;
455
456 apic = entry->apic;
457 pin = entry->pin;
458 io_apic_write(apic, 0x11 + pin*2, dest);
459 reg = io_apic_read(apic, 0x10 + pin*2);
460 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
461 reg |= vector;
462 io_apic_modify(apic, 0x10 + pin *2, reg);
463 if (!entry->next)
464 break;
465 entry = entry->next;
466 }
467}
468static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
469{
470 struct irq_cfg *cfg;
471 unsigned long flags;
472 unsigned int dest;
473 cpumask_t tmp;
474
475 cfg = irq_cfg(irq);
476
477 cpus_and(tmp, mask, cpu_online_map);
478 if (cpus_empty(tmp))
479 return;
480
481 if (assign_irq_vector(irq, mask))
482 return;
483
484 cpus_and(tmp, cfg->domain, mask);
485
486 dest = cpu_mask_to_apicid(tmp);
487 /*
488 * Only the high 8 bits are valid.
489 */
490 dest = SET_APIC_LOGICAL_ID(dest);
491
492 spin_lock_irqsave(&ioapic_lock, flags);
493 __target_IO_APIC_irq(irq, dest, cfg->vector);
494 irq_to_desc(irq)->affinity = mask;
495 spin_unlock_irqrestore(&ioapic_lock, flags);
496}
497
498#endif /* CONFIG_SMP */
499
435/* 500/*
436 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 501 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
437 * shared ISA-space IRQs, so we have to support them. We are super 502 * shared ISA-space IRQs, so we have to support them. We are super
@@ -586,45 +651,6 @@ static void clear_IO_APIC(void)
586 clear_IO_APIC_pin(apic, pin); 651 clear_IO_APIC_pin(apic, pin);
587} 652}
588 653
589#ifdef CONFIG_SMP
590static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
591{
592 struct irq_cfg *cfg;
593 unsigned long flags;
594 int pin;
595 struct irq_pin_list *entry;
596 unsigned int apicid_value;
597 cpumask_t tmp;
598
599
600 cfg = irq_cfg(irq);
601 entry = cfg->irq_2_pin;
602
603 cpus_and(tmp, cpumask, cpu_online_map);
604 if (cpus_empty(tmp))
605 tmp = TARGET_CPUS;
606
607 cpus_and(cpumask, tmp, CPU_MASK_ALL);
608
609 apicid_value = cpu_mask_to_apicid(cpumask);
610 /* Prepare to do the io_apic_write */
611 apicid_value = apicid_value << 24;
612 spin_lock_irqsave(&ioapic_lock, flags);
613 for (;;) {
614 if (!entry)
615 break;
616 pin = entry->pin;
617 io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
618 if (!entry->next)
619 break;
620 entry = entry->next;
621 }
622 irq_to_desc(irq)->affinity = cpumask;
623 spin_unlock_irqrestore(&ioapic_lock, flags);
624}
625
626#endif /* CONFIG_SMP */
627
628#ifndef CONFIG_SMP 654#ifndef CONFIG_SMP
629void send_IPI_self(int vector) 655void send_IPI_self(int vector)
630{ 656{
@@ -789,32 +815,6 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
789} 815}
790EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 816EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
791 817
792/*
793 * This function currently is only a helper for the i386 smp boot process where
794 * we need to reprogram the ioredtbls to cater for the cpus which have come online
795 * so mask in all cases should simply be TARGET_CPUS
796 */
797#ifdef CONFIG_SMP
798void __init setup_ioapic_dest(void)
799{
800 int pin, ioapic, irq, irq_entry;
801
802 if (skip_ioapic_setup == 1)
803 return;
804
805 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
806 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
807 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
808 if (irq_entry == -1)
809 continue;
810 irq = pin_2_irq(irq_entry, ioapic, pin);
811 set_ioapic_affinity_irq(irq, TARGET_CPUS);
812 }
813
814 }
815}
816#endif
817
818#if defined(CONFIG_EISA) || defined(CONFIG_MCA) 818#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
819/* 819/*
820 * EISA Edge/Level control register, ELCR 820 * EISA Edge/Level control register, ELCR
@@ -1046,47 +1046,138 @@ static inline int IO_APIC_irq_trigger(int irq)
1046 return 0; 1046 return 0;
1047} 1047}
1048 1048
1049void lock_vector_lock(void)
1050{
1051 /* Used to the online set of cpus does not change
1052 * during assign_irq_vector.
1053 */
1054 spin_lock(&vector_lock);
1055}
1049 1056
1050static int __assign_irq_vector(int irq) 1057void unlock_vector_lock(void)
1051{ 1058{
1052 static int current_vector = FIRST_DEVICE_VECTOR, current_offset; 1059 spin_unlock(&vector_lock);
1053 int vector, offset; 1060}
1054 struct irq_cfg *cfg;
1055 1061
1056 cfg = irq_cfg(irq); 1062static int __assign_irq_vector(int irq, cpumask_t mask)
1057 if (cfg->vector > 0) 1063{
1058 return cfg->vector; 1064 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1065 unsigned int old_vector;
1066 int cpu;
1067 struct irq_cfg *cfg;
1059 1068
1060 vector = current_vector; 1069 cfg = irq_cfg(irq);
1061 offset = current_offset;
1062next:
1063 vector += 8;
1064 if (vector >= first_system_vector) {
1065 offset = (offset + 1) % 8;
1066 vector = FIRST_DEVICE_VECTOR + offset;
1067 }
1068 if (vector == current_vector)
1069 return -ENOSPC;
1070 if (test_and_set_bit(vector, used_vectors))
1071 goto next;
1072 1070
1073 current_vector = vector; 1071 /* Only try and allocate irqs on cpus that are present */
1074 current_offset = offset; 1072 cpus_and(mask, mask, cpu_online_map);
1075 cfg->vector = vector;
1076 1073
1077 return vector; 1074 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1078} 1075 return -EBUSY;
1079 1076
1080static int assign_irq_vector(int irq) 1077 old_vector = cfg->vector;
1081{ 1078 if (old_vector) {
1079 cpumask_t tmp;
1080 cpus_and(tmp, cfg->domain, mask);
1081 if (!cpus_empty(tmp))
1082 return 0;
1083 }
1084
1085 for_each_cpu_mask_nr(cpu, mask) {
1086 cpumask_t domain, new_mask;
1087 int new_cpu;
1088 int vector, offset;
1089
1090 domain = vector_allocation_domain(cpu);
1091 cpus_and(new_mask, domain, cpu_online_map);
1092
1093 vector = current_vector;
1094 offset = current_offset;
1095next:
1096 vector += 8;
1097 if (vector >= first_system_vector) {
1098 /* If we run out of vectors on large boxen, must share them. */
1099 offset = (offset + 1) % 8;
1100 vector = FIRST_DEVICE_VECTOR + offset;
1101 }
1102 if (unlikely(current_vector == vector))
1103 continue;
1104 if (vector == SYSCALL_VECTOR)
1105 goto next;
1106
1107 for_each_cpu_mask_nr(new_cpu, new_mask)
1108 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1109 goto next;
1110 /* Found one! */
1111 current_vector = vector;
1112 current_offset = offset;
1113 if (old_vector) {
1114 cfg->move_in_progress = 1;
1115 cfg->old_domain = cfg->domain;
1116 }
1117 for_each_cpu_mask_nr(new_cpu, new_mask)
1118 per_cpu(vector_irq, new_cpu)[vector] = irq;
1119 cfg->vector = vector;
1120 cfg->domain = domain;
1121 return 0;
1122 }
1123 return -ENOSPC;
1124}
1125
1126static int assign_irq_vector(int irq, cpumask_t mask)
1127{
1128 int err;
1082 unsigned long flags; 1129 unsigned long flags;
1083 int vector;
1084 1130
1085 spin_lock_irqsave(&vector_lock, flags); 1131 spin_lock_irqsave(&vector_lock, flags);
1086 vector = __assign_irq_vector(irq); 1132 err = __assign_irq_vector(irq, mask);
1087 spin_unlock_irqrestore(&vector_lock, flags); 1133 spin_unlock_irqrestore(&vector_lock, flags);
1088 1134
1089 return vector; 1135 return err;
1136}
1137
1138static void __clear_irq_vector(int irq)
1139{
1140 struct irq_cfg *cfg;
1141 cpumask_t mask;
1142 int cpu, vector;
1143
1144 cfg = irq_cfg(irq);
1145 BUG_ON(!cfg->vector);
1146
1147 vector = cfg->vector;
1148 cpus_and(mask, cfg->domain, cpu_online_map);
1149 for_each_cpu_mask_nr(cpu, mask)
1150 per_cpu(vector_irq, cpu)[vector] = -1;
1151
1152 cfg->vector = 0;
1153 cpus_clear(cfg->domain);
1154}
1155
1156void __setup_vector_irq(int cpu)
1157{
1158 /* Initialize vector_irq on a new cpu */
1159 /* This function must be called with vector_lock held */
1160 int irq, vector;
1161 struct irq_cfg *cfg;
1162
1163 /* Mark the inuse vectors */
1164 for_each_irq_cfg(cfg) {
1165 if (!cpu_isset(cpu, cfg->domain))
1166 continue;
1167 vector = cfg->vector;
1168 irq = cfg->irq;
1169 per_cpu(vector_irq, cpu)[vector] = irq;
1170 }
1171 /* Mark the free vectors */
1172 for (vector = 0; vector < NR_VECTORS; ++vector) {
1173 irq = per_cpu(vector_irq, cpu)[vector];
1174 if (irq < 0)
1175 continue;
1176
1177 cfg = irq_cfg(irq);
1178 if (!cpu_isset(cpu, cfg->domain))
1179 per_cpu(vector_irq, cpu)[vector] = -1;
1180 }
1090} 1181}
1091 1182
1092static struct irq_chip ioapic_chip; 1183static struct irq_chip ioapic_chip;
@@ -1095,7 +1186,7 @@ static struct irq_chip ioapic_chip;
1095#define IOAPIC_EDGE 0 1186#define IOAPIC_EDGE 0
1096#define IOAPIC_LEVEL 1 1187#define IOAPIC_LEVEL 1
1097 1188
1098static void ioapic_register_intr(int irq, int vector, unsigned long trigger) 1189static void ioapic_register_intr(int irq, unsigned long trigger)
1099{ 1190{
1100 struct irq_desc *desc; 1191 struct irq_desc *desc;
1101 1192
@@ -1115,79 +1206,109 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
1115 set_irq_chip_and_handler_name(irq, &ioapic_chip, 1206 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1116 handle_edge_irq, "edge"); 1207 handle_edge_irq, "edge");
1117 } 1208 }
1118 set_intr_gate(vector, interrupt[irq]);
1119} 1209}
1120 1210
1121static void __init setup_IO_APIC_irqs(void) 1211static int setup_ioapic_entry(int apic, int irq,
1212 struct IO_APIC_route_entry *entry,
1213 unsigned int destination, int trigger,
1214 int polarity, int vector)
1122{ 1215{
1216 /*
1217 * add it to the IO-APIC irq-routing table:
1218 */
1219 memset(entry,0,sizeof(*entry));
1220
1221 entry->delivery_mode = INT_DELIVERY_MODE;
1222 entry->dest_mode = INT_DEST_MODE;
1223 entry->dest.logical.logical_dest = destination;
1224
1225 entry->mask = 0; /* enable IRQ */
1226 entry->trigger = trigger;
1227 entry->polarity = polarity;
1228 entry->vector = vector;
1229
1230 /* Mask level triggered irqs.
1231 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1232 */
1233 if (trigger)
1234 entry->mask = 1;
1235
1236 return 0;
1237}
1238
1239static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1240 int trigger, int polarity)
1241{
1242 struct irq_cfg *cfg;
1123 struct IO_APIC_route_entry entry; 1243 struct IO_APIC_route_entry entry;
1124 int apic, pin, idx, irq, first_notcon = 1, vector; 1244 cpumask_t mask;
1245
1246 if (!IO_APIC_IRQ(irq))
1247 return;
1248
1249 cfg = irq_cfg(irq);
1250
1251 mask = TARGET_CPUS;
1252 if (assign_irq_vector(irq, mask))
1253 return;
1254
1255 cpus_and(mask, cfg->domain, mask);
1256
1257 apic_printk(APIC_VERBOSE,KERN_DEBUG
1258 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1259 "IRQ %d Mode:%i Active:%i)\n",
1260 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
1261 irq, trigger, polarity);
1262
1263
1264 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1265 cpu_mask_to_apicid(mask), trigger, polarity,
1266 cfg->vector)) {
1267 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1268 mp_ioapics[apic].mp_apicid, pin);
1269 __clear_irq_vector(irq);
1270 return;
1271 }
1272
1273 ioapic_register_intr(irq, trigger);
1274 if (irq < 16)
1275 disable_8259A_irq(irq);
1276
1277 ioapic_write_entry(apic, pin, entry);
1278}
1279
1280static void __init setup_IO_APIC_irqs(void)
1281{
1282 int apic, pin, idx, irq, first_notcon = 1;
1125 1283
1126 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1284 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1127 1285
1128 for (apic = 0; apic < nr_ioapics; apic++) { 1286 for (apic = 0; apic < nr_ioapics; apic++) {
1129 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1287 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1130 1288
1131 /* 1289 idx = find_irq_entry(apic,pin,mp_INT);
1132 * add it to the IO-APIC irq-routing table:
1133 */
1134 memset(&entry, 0, sizeof(entry));
1135
1136 entry.delivery_mode = INT_DELIVERY_MODE;
1137 entry.dest_mode = INT_DEST_MODE;
1138 entry.mask = 0; /* enable IRQ */
1139 entry.dest.logical.logical_dest =
1140 cpu_mask_to_apicid(TARGET_CPUS);
1141
1142 idx = find_irq_entry(apic, pin, mp_INT);
1143 if (idx == -1) { 1290 if (idx == -1) {
1144 if (first_notcon) { 1291 if (first_notcon) {
1145 apic_printk(APIC_VERBOSE, KERN_DEBUG 1292 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
1146 " IO-APIC (apicid-pin) %d-%d",
1147 mp_ioapics[apic].mp_apicid,
1148 pin);
1149 first_notcon = 0; 1293 first_notcon = 0;
1150 } else 1294 } else
1151 apic_printk(APIC_VERBOSE, ", %d-%d", 1295 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
1152 mp_ioapics[apic].mp_apicid, pin);
1153 continue; 1296 continue;
1154 } 1297 }
1155
1156 if (!first_notcon) { 1298 if (!first_notcon) {
1157 apic_printk(APIC_VERBOSE, " not connected.\n"); 1299 apic_printk(APIC_VERBOSE, " not connected.\n");
1158 first_notcon = 1; 1300 first_notcon = 1;
1159 } 1301 }
1160 1302
1161 entry.trigger = irq_trigger(idx);
1162 entry.polarity = irq_polarity(idx);
1163
1164 if (irq_trigger(idx)) {
1165 entry.trigger = 1;
1166 entry.mask = 1;
1167 }
1168
1169 irq = pin_2_irq(idx, apic, pin); 1303 irq = pin_2_irq(idx, apic, pin);
1170 /*
1171 * skip adding the timer int on secondary nodes, which causes
1172 * a small but painful rift in the time-space continuum
1173 */
1174 if (multi_timer_check(apic, irq))
1175 continue;
1176 else
1177 add_pin_to_irq(irq, apic, pin);
1178 1304
1179 if (!apic && !IO_APIC_IRQ(irq)) 1305 if (multi_timer_check(apic, irq))
1180 continue; 1306 continue;
1181 1307
1182 if (IO_APIC_IRQ(irq)) { 1308 add_pin_to_irq(irq, apic, pin);
1183 vector = assign_irq_vector(irq);
1184 entry.vector = vector;
1185 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
1186 1309
1187 if (!apic && (irq < 16)) 1310 setup_IO_APIC_irq(apic, pin, irq,
1188 disable_8259A_irq(irq); 1311 irq_trigger(idx), irq_polarity(idx));
1189 }
1190 ioapic_write_entry(apic, pin, entry);
1191 } 1312 }
1192 } 1313 }
1193 1314
@@ -1221,7 +1342,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1221 * The timer IRQ doesn't have to know that behind the 1342 * The timer IRQ doesn't have to know that behind the
1222 * scene we may have a 8259A-master in AEOI mode ... 1343 * scene we may have a 8259A-master in AEOI mode ...
1223 */ 1344 */
1224 ioapic_register_intr(0, vector, IOAPIC_EDGE); 1345 ioapic_register_intr(0, IOAPIC_EDGE);
1225 1346
1226 /* 1347 /*
1227 * Add it to the IO-APIC irq-routing table: 1348 * Add it to the IO-APIC irq-routing table:
@@ -1805,8 +1926,10 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
1805 return was_pending; 1926 return was_pending;
1806} 1927}
1807 1928
1929static void irq_complete_move(unsigned int irq);
1808static void ack_ioapic_irq(unsigned int irq) 1930static void ack_ioapic_irq(unsigned int irq)
1809{ 1931{
1932 irq_complete_move(irq);
1810 move_native_irq(irq); 1933 move_native_irq(irq);
1811 ack_APIC_irq(); 1934 ack_APIC_irq();
1812} 1935}
@@ -1816,6 +1939,7 @@ static void ack_ioapic_quirk_irq(unsigned int irq)
1816 unsigned long v; 1939 unsigned long v;
1817 int i; 1940 int i;
1818 1941
1942 irq_complete_move(irq);
1819 move_native_irq(irq); 1943 move_native_irq(irq);
1820/* 1944/*
1821 * It appears there is an erratum which affects at least version 0x11 1945 * It appears there is an erratum which affects at least version 0x11
@@ -1858,6 +1982,64 @@ static int ioapic_retrigger_irq(unsigned int irq)
1858 return 1; 1982 return 1;
1859} 1983}
1860 1984
1985#ifdef CONFIG_SMP
1986asmlinkage void smp_irq_move_cleanup_interrupt(void)
1987{
1988 unsigned vector, me;
1989 ack_APIC_irq();
1990 irq_enter();
1991
1992 me = smp_processor_id();
1993 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1994 unsigned int irq;
1995 struct irq_desc *desc;
1996 struct irq_cfg *cfg;
1997 irq = __get_cpu_var(vector_irq)[vector];
1998
1999 desc = irq_to_desc(irq);
2000 if (!desc)
2001 continue;
2002
2003 cfg = irq_cfg(irq);
2004 spin_lock(&desc->lock);
2005 if (!cfg->move_cleanup_count)
2006 goto unlock;
2007
2008 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
2009 goto unlock;
2010
2011 __get_cpu_var(vector_irq)[vector] = -1;
2012 cfg->move_cleanup_count--;
2013unlock:
2014 spin_unlock(&desc->lock);
2015 }
2016
2017 irq_exit();
2018}
2019
2020static void irq_complete_move(unsigned int irq)
2021{
2022 struct irq_cfg *cfg = irq_cfg(irq);
2023 unsigned vector, me;
2024
2025 if (likely(!cfg->move_in_progress))
2026 return;
2027
2028 vector = ~get_irq_regs()->orig_ax;
2029 me = smp_processor_id();
2030 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
2031 cpumask_t cleanup_mask;
2032
2033 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2034 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2035 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2036 cfg->move_in_progress = 0;
2037 }
2038}
2039#else
2040static inline void irq_complete_move(unsigned int irq) {}
2041#endif
2042
1861static struct irq_chip ioapic_chip __read_mostly = { 2043static struct irq_chip ioapic_chip __read_mostly = {
1862 .name = "IO-APIC", 2044 .name = "IO-APIC",
1863 .startup = startup_ioapic_irq, 2045 .startup = startup_ioapic_irq,
@@ -1940,7 +2122,7 @@ static struct irq_chip lapic_chip __read_mostly = {
1940 .ack = ack_lapic_irq, 2122 .ack = ack_lapic_irq,
1941}; 2123};
1942 2124
1943static void lapic_register_intr(int irq, int vector) 2125static void lapic_register_intr(int irq)
1944{ 2126{
1945 struct irq_desc *desc; 2127 struct irq_desc *desc;
1946 2128
@@ -1948,7 +2130,6 @@ static void lapic_register_intr(int irq, int vector)
1948 desc->status &= ~IRQ_LEVEL; 2130 desc->status &= ~IRQ_LEVEL;
1949 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2131 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
1950 "edge"); 2132 "edge");
1951 set_intr_gate(vector, interrupt[irq]);
1952} 2133}
1953 2134
1954static void __init setup_nmi(void) 2135static void __init setup_nmi(void)
@@ -2036,9 +2217,9 @@ static inline void __init unlock_ExtINT_logic(void)
2036 */ 2217 */
2037static inline void __init check_timer(void) 2218static inline void __init check_timer(void)
2038{ 2219{
2220 struct irq_cfg *cfg = irq_cfg(0);
2039 int apic1, pin1, apic2, pin2; 2221 int apic1, pin1, apic2, pin2;
2040 int no_pin1 = 0; 2222 int no_pin1 = 0;
2041 int vector;
2042 unsigned int ver; 2223 unsigned int ver;
2043 unsigned long flags; 2224 unsigned long flags;
2044 2225
@@ -2051,8 +2232,7 @@ static inline void __init check_timer(void)
2051 * get/set the timer IRQ vector: 2232 * get/set the timer IRQ vector:
2052 */ 2233 */
2053 disable_8259A_irq(0); 2234 disable_8259A_irq(0);
2054 vector = assign_irq_vector(0); 2235 assign_irq_vector(0, TARGET_CPUS);
2055 set_intr_gate(vector, interrupt[0]);
2056 2236
2057 /* 2237 /*
2058 * As IRQ0 is to be enabled in the 8259A, the virtual 2238 * As IRQ0 is to be enabled in the 8259A, the virtual
@@ -2074,7 +2254,7 @@ static inline void __init check_timer(void)
2074 2254
2075 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " 2255 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2076 "apic1=%d pin1=%d apic2=%d pin2=%d\n", 2256 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2077 vector, apic1, pin1, apic2, pin2); 2257 cfg->vector, apic1, pin1, apic2, pin2);
2078 2258
2079 /* 2259 /*
2080 * Some BIOS writers are clueless and report the ExtINTA 2260 * Some BIOS writers are clueless and report the ExtINTA
@@ -2098,7 +2278,7 @@ static inline void __init check_timer(void)
2098 */ 2278 */
2099 if (no_pin1) { 2279 if (no_pin1) {
2100 add_pin_to_irq(0, apic1, pin1); 2280 add_pin_to_irq(0, apic1, pin1);
2101 setup_timer_IRQ0_pin(apic1, pin1, vector); 2281 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2102 } 2282 }
2103 unmask_IO_APIC_irq(0); 2283 unmask_IO_APIC_irq(0);
2104 if (timer_irq_works()) { 2284 if (timer_irq_works()) {
@@ -2123,7 +2303,7 @@ static inline void __init check_timer(void)
2123 * legacy devices should be connected to IO APIC #0 2303 * legacy devices should be connected to IO APIC #0
2124 */ 2304 */
2125 replace_pin_at_irq(0, apic1, pin1, apic2, pin2); 2305 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
2126 setup_timer_IRQ0_pin(apic2, pin2, vector); 2306 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2127 unmask_IO_APIC_irq(0); 2307 unmask_IO_APIC_irq(0);
2128 enable_8259A_irq(0); 2308 enable_8259A_irq(0);
2129 if (timer_irq_works()) { 2309 if (timer_irq_works()) {
@@ -2154,8 +2334,8 @@ static inline void __init check_timer(void)
2154 apic_printk(APIC_QUIET, KERN_INFO 2334 apic_printk(APIC_QUIET, KERN_INFO
2155 "...trying to set up timer as Virtual Wire IRQ...\n"); 2335 "...trying to set up timer as Virtual Wire IRQ...\n");
2156 2336
2157 lapic_register_intr(0, vector); 2337 lapic_register_intr(0);
2158 apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ 2338 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2159 enable_8259A_irq(0); 2339 enable_8259A_irq(0);
2160 2340
2161 if (timer_irq_works()) { 2341 if (timer_irq_works()) {
@@ -2163,7 +2343,7 @@ static inline void __init check_timer(void)
2163 goto out; 2343 goto out;
2164 } 2344 }
2165 disable_8259A_irq(0); 2345 disable_8259A_irq(0);
2166 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); 2346 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2167 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2347 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2168 2348
2169 apic_printk(APIC_QUIET, KERN_INFO 2349 apic_printk(APIC_QUIET, KERN_INFO
@@ -2207,12 +2387,6 @@ out:
2207 2387
2208void __init setup_IO_APIC(void) 2388void __init setup_IO_APIC(void)
2209{ 2389{
2210 int i;
2211
2212 /* Reserve all the system vectors. */
2213 for (i = first_system_vector; i < NR_VECTORS; i++)
2214 set_bit(i, used_vectors);
2215
2216 enable_IO_APIC(); 2390 enable_IO_APIC();
2217 2391
2218 io_apic_irqs = ~PIC_IRQS; 2392 io_apic_irqs = ~PIC_IRQS;
@@ -2334,12 +2508,14 @@ device_initcall(ioapic_init_sysfs);
2334unsigned int create_irq_nr(unsigned int irq_want) 2508unsigned int create_irq_nr(unsigned int irq_want)
2335{ 2509{
2336 /* Allocate an unused irq */ 2510 /* Allocate an unused irq */
2337 unsigned int irq, new, vector = 0; 2511 unsigned int irq, new;
2338 unsigned long flags; 2512 unsigned long flags;
2339 struct irq_cfg *cfg_new; 2513 struct irq_cfg *cfg_new;
2340 2514
2515#ifndef CONFIG_HAVE_SPARSE_IRQ
2341 /* only can use bus/dev/fn.. when per_cpu vector is used */ 2516 /* only can use bus/dev/fn.. when per_cpu vector is used */
2342 irq_want = nr_irqs - 1; 2517 irq_want = nr_irqs - 1;
2518#endif
2343 2519
2344 irq = 0; 2520 irq = 0;
2345 spin_lock_irqsave(&vector_lock, flags); 2521 spin_lock_irqsave(&vector_lock, flags);
@@ -2351,15 +2527,13 @@ unsigned int create_irq_nr(unsigned int irq_want)
2351 continue; 2527 continue;
2352 if (!cfg_new) 2528 if (!cfg_new)
2353 cfg_new = irq_cfg_alloc(new); 2529 cfg_new = irq_cfg_alloc(new);
2354 vector = __assign_irq_vector(new); 2530 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
2355 if (likely(vector > 0))
2356 irq = new; 2531 irq = new;
2357 break; 2532 break;
2358 } 2533 }
2359 spin_unlock_irqrestore(&vector_lock, flags); 2534 spin_unlock_irqrestore(&vector_lock, flags);
2360 2535
2361 if (irq > 0) { 2536 if (irq > 0) {
2362 set_intr_gate(vector, interrupt[irq]);
2363 dynamic_irq_init(irq); 2537 dynamic_irq_init(irq);
2364 } 2538 }
2365 return irq; 2539 return irq;
@@ -2377,8 +2551,7 @@ void destroy_irq(unsigned int irq)
2377 dynamic_irq_cleanup(irq); 2551 dynamic_irq_cleanup(irq);
2378 2552
2379 spin_lock_irqsave(&vector_lock, flags); 2553 spin_lock_irqsave(&vector_lock, flags);
2380 clear_bit(irq_cfg(irq)->vector, used_vectors); 2554 __clear_irq_vector(irq);
2381 irq_cfg(irq)->vector = 0;
2382 spin_unlock_irqrestore(&vector_lock, flags); 2555 spin_unlock_irqrestore(&vector_lock, flags);
2383} 2556}
2384 2557
@@ -2388,57 +2561,65 @@ void destroy_irq(unsigned int irq)
2388#ifdef CONFIG_PCI_MSI 2561#ifdef CONFIG_PCI_MSI
2389static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) 2562static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
2390{ 2563{
2391 int vector; 2564 struct irq_cfg *cfg;
2565 int err;
2392 unsigned dest; 2566 unsigned dest;
2567 cpumask_t tmp;
2393 2568
2394 vector = assign_irq_vector(irq); 2569 tmp = TARGET_CPUS;
2395 if (vector >= 0) { 2570 err = assign_irq_vector(irq, tmp);
2396 dest = cpu_mask_to_apicid(TARGET_CPUS); 2571 if (err)
2397 2572 return err;
2398 msg->address_hi = MSI_ADDR_BASE_HI;
2399 msg->address_lo =
2400 MSI_ADDR_BASE_LO |
2401 ((INT_DEST_MODE == 0) ?
2402MSI_ADDR_DEST_MODE_PHYSICAL:
2403 MSI_ADDR_DEST_MODE_LOGICAL) |
2404 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2405 MSI_ADDR_REDIRECTION_CPU:
2406 MSI_ADDR_REDIRECTION_LOWPRI) |
2407 MSI_ADDR_DEST_ID(dest);
2408 2573
2409 msg->data = 2574 cfg = irq_cfg(irq);
2410 MSI_DATA_TRIGGER_EDGE | 2575 cpus_and(tmp, cfg->domain, tmp);
2411 MSI_DATA_LEVEL_ASSERT | 2576 dest = cpu_mask_to_apicid(tmp);
2412 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 2577
2413MSI_DATA_DELIVERY_FIXED: 2578 msg->address_hi = MSI_ADDR_BASE_HI;
2414 MSI_DATA_DELIVERY_LOWPRI) | 2579 msg->address_lo =
2415 MSI_DATA_VECTOR(vector); 2580 MSI_ADDR_BASE_LO |
2416 } 2581 ((INT_DEST_MODE == 0) ?
2417 return vector; 2582 MSI_ADDR_DEST_MODE_PHYSICAL:
2583 MSI_ADDR_DEST_MODE_LOGICAL) |
2584 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2585 MSI_ADDR_REDIRECTION_CPU:
2586 MSI_ADDR_REDIRECTION_LOWPRI) |
2587 MSI_ADDR_DEST_ID(dest);
2588
2589 msg->data =
2590 MSI_DATA_TRIGGER_EDGE |
2591 MSI_DATA_LEVEL_ASSERT |
2592 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2593 MSI_DATA_DELIVERY_FIXED:
2594 MSI_DATA_DELIVERY_LOWPRI) |
2595 MSI_DATA_VECTOR(cfg->vector);
2596
2597 return err;
2418} 2598}
2419 2599
2420#ifdef CONFIG_SMP 2600#ifdef CONFIG_SMP
2421static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 2601static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2422{ 2602{
2603 struct irq_cfg *cfg;
2423 struct msi_msg msg; 2604 struct msi_msg msg;
2424 unsigned int dest; 2605 unsigned int dest;
2425 cpumask_t tmp; 2606 cpumask_t tmp;
2426 int vector;
2427 2607
2428 cpus_and(tmp, mask, cpu_online_map); 2608 cpus_and(tmp, mask, cpu_online_map);
2429 if (cpus_empty(tmp)) 2609 if (cpus_empty(tmp))
2430 tmp = TARGET_CPUS; 2610 return;
2431 2611
2432 vector = assign_irq_vector(irq); 2612 if (assign_irq_vector(irq, mask))
2433 if (vector < 0)
2434 return; 2613 return;
2435 2614
2436 dest = cpu_mask_to_apicid(mask); 2615 cfg = irq_cfg(irq);
2616 cpus_and(tmp, cfg->domain, mask);
2617 dest = cpu_mask_to_apicid(tmp);
2437 2618
2438 read_msi_msg(irq, &msg); 2619 read_msi_msg(irq, &msg);
2439 2620
2440 msg.data &= ~MSI_DATA_VECTOR_MASK; 2621 msg.data &= ~MSI_DATA_VECTOR_MASK;
2441 msg.data |= MSI_DATA_VECTOR(vector); 2622 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2442 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 2623 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2443 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 2624 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2444 2625
@@ -2517,15 +2698,15 @@ void arch_teardown_msi_irq(unsigned int irq)
2517 2698
2518#ifdef CONFIG_SMP 2699#ifdef CONFIG_SMP
2519 2700
2520static void target_ht_irq(unsigned int irq, unsigned int dest) 2701static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
2521{ 2702{
2522 struct ht_irq_msg msg; 2703 struct ht_irq_msg msg;
2523 fetch_ht_irq_msg(irq, &msg); 2704 fetch_ht_irq_msg(irq, &msg);
2524 2705
2525 msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK); 2706 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
2526 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); 2707 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
2527 2708
2528 msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest); 2709 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
2529 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); 2710 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
2530 2711
2531 write_ht_irq_msg(irq, &msg); 2712 write_ht_irq_msg(irq, &msg);
@@ -2533,18 +2714,22 @@ static void target_ht_irq(unsigned int irq, unsigned int dest)
2533 2714
2534static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) 2715static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2535{ 2716{
2717 struct irq_cfg *cfg;
2536 unsigned int dest; 2718 unsigned int dest;
2537 cpumask_t tmp; 2719 cpumask_t tmp;
2538 2720
2539 cpus_and(tmp, mask, cpu_online_map); 2721 cpus_and(tmp, mask, cpu_online_map);
2540 if (cpus_empty(tmp)) 2722 if (cpus_empty(tmp))
2541 tmp = TARGET_CPUS; 2723 return;
2542 2724
2543 cpus_and(mask, tmp, CPU_MASK_ALL); 2725 if (assign_irq_vector(irq, mask))
2726 return;
2544 2727
2545 dest = cpu_mask_to_apicid(mask); 2728 cfg = irq_cfg(irq);
2729 cpus_and(tmp, cfg->domain, mask);
2730 dest = cpu_mask_to_apicid(tmp);
2546 2731
2547 target_ht_irq(irq, dest); 2732 target_ht_irq(irq, dest, cfg->vector);
2548 irq_to_desc(irq)->affinity = mask; 2733 irq_to_desc(irq)->affinity = mask;
2549} 2734}
2550#endif 2735#endif
@@ -2562,16 +2747,18 @@ static struct irq_chip ht_irq_chip = {
2562 2747
2563int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 2748int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2564{ 2749{
2565 int vector; 2750 struct irq_cfg *cfg;
2751 int err;
2752 cpumask_t tmp;
2566 2753
2567 vector = assign_irq_vector(irq); 2754 tmp = TARGET_CPUS;
2568 if (vector >= 0) { 2755 err = assign_irq_vector(irq, tmp);
2756 if ( !err) {
2569 struct ht_irq_msg msg; 2757 struct ht_irq_msg msg;
2570 unsigned dest; 2758 unsigned dest;
2571 cpumask_t tmp;
2572 2759
2573 cpus_clear(tmp); 2760 cfg = irq_cfg(irq);
2574 cpu_set(vector >> 8, tmp); 2761 cpus_and(tmp, cfg->domain, tmp);
2575 dest = cpu_mask_to_apicid(tmp); 2762 dest = cpu_mask_to_apicid(tmp);
2576 2763
2577 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 2764 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
@@ -2579,7 +2766,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2579 msg.address_lo = 2766 msg.address_lo =
2580 HT_IRQ_LOW_BASE | 2767 HT_IRQ_LOW_BASE |
2581 HT_IRQ_LOW_DEST_ID(dest) | 2768 HT_IRQ_LOW_DEST_ID(dest) |
2582 HT_IRQ_LOW_VECTOR(vector) | 2769 HT_IRQ_LOW_VECTOR(cfg->vector) |
2583 ((INT_DEST_MODE == 0) ? 2770 ((INT_DEST_MODE == 0) ?
2584 HT_IRQ_LOW_DM_PHYSICAL : 2771 HT_IRQ_LOW_DM_PHYSICAL :
2585 HT_IRQ_LOW_DM_LOGICAL) | 2772 HT_IRQ_LOW_DM_LOGICAL) |
@@ -2594,7 +2781,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2594 set_irq_chip_and_handler_name(irq, &ht_irq_chip, 2781 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
2595 handle_edge_irq, "edge"); 2782 handle_edge_irq, "edge");
2596 } 2783 }
2597 return vector; 2784 return err;
2598} 2785}
2599#endif /* CONFIG_HT_IRQ */ 2786#endif /* CONFIG_HT_IRQ */
2600 2787
@@ -2705,10 +2892,8 @@ int __init io_apic_get_redir_entries(int ioapic)
2705} 2892}
2706 2893
2707 2894
2708int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, int active_high_low) 2895int io_apic_set_pci_routing(int ioapic, int pin, int irq, int triggering, int polarity)
2709{ 2896{
2710 struct IO_APIC_route_entry entry;
2711
2712 if (!IO_APIC_IRQ(irq)) { 2897 if (!IO_APIC_IRQ(irq)) {
2713 printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 2898 printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
2714 ioapic); 2899 ioapic);
@@ -2716,39 +2901,12 @@ int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, int ac
2716 } 2901 }
2717 2902
2718 /* 2903 /*
2719 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
2720 * Note that we mask (disable) IRQs now -- these get enabled when the
2721 * corresponding device driver registers for this IRQ.
2722 */
2723
2724 memset(&entry, 0, sizeof(entry));
2725
2726 entry.delivery_mode = INT_DELIVERY_MODE;
2727 entry.dest_mode = INT_DEST_MODE;
2728 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
2729 entry.trigger = edge_level;
2730 entry.polarity = active_high_low;
2731 entry.mask = 1;
2732
2733 /*
2734 * IRQs < 16 are already in the irq_2_pin[] map 2904 * IRQs < 16 are already in the irq_2_pin[] map
2735 */ 2905 */
2736 if (irq >= 16) 2906 if (irq >= 16)
2737 add_pin_to_irq(irq, ioapic, pin); 2907 add_pin_to_irq(irq, ioapic, pin);
2738 2908
2739 entry.vector = assign_irq_vector(irq); 2909 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
2740
2741 apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
2742 "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
2743 mp_ioapics[ioapic].mp_apicid, pin, entry.vector, irq,
2744 edge_level, active_high_low);
2745
2746 ioapic_register_intr(irq, entry.vector, edge_level);
2747
2748 if (!ioapic && (irq < 16))
2749 disable_8259A_irq(irq);
2750
2751 ioapic_write_entry(ioapic, pin, entry);
2752 2910
2753 return 0; 2911 return 0;
2754} 2912}
@@ -2774,6 +2932,47 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
2774 2932
2775#endif /* CONFIG_ACPI */ 2933#endif /* CONFIG_ACPI */
2776 2934
2935/*
2936 * This function currently is only a helper for the i386 smp boot process where
2937 * we need to reprogram the ioredtbls to cater for the cpus which have come online
2938 * so mask in all cases should simply be TARGET_CPUS
2939 */
2940#ifdef CONFIG_SMP
2941void __init setup_ioapic_dest(void)
2942{
2943 int pin, ioapic, irq, irq_entry;
2944 struct irq_cfg *cfg;
2945 struct irq_desc *desc;
2946
2947 if (skip_ioapic_setup == 1)
2948 return;
2949
2950 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
2951 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
2952 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
2953 if (irq_entry == -1)
2954 continue;
2955 irq = pin_2_irq(irq_entry, ioapic, pin);
2956
2957 /* setup_IO_APIC_irqs could fail to get vector for some device
2958 * when you have too many devices, because at that time only boot
2959 * cpu is online.
2960 */
2961 cfg = irq_cfg(irq);
2962 if (!cfg->vector)
2963 setup_IO_APIC_irq(ioapic, pin, irq,
2964 irq_trigger(irq_entry),
2965 irq_polarity(irq_entry));
2966 else {
2967 desc = irq_to_desc(irq);
2968 set_ioapic_affinity_irq(irq, TARGET_CPUS);
2969 }
2970 }
2971
2972 }
2973}
2974#endif
2975
2777static int __init parse_disable_timer_pin_1(char *arg) 2976static int __init parse_disable_timer_pin_1(char *arg)
2778{ 2977{
2779 disable_timer_pin_1 = 1; 2978 disable_timer_pin_1 = 1;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 0a57e39159a8..b51ffdcfa31a 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -223,21 +223,25 @@ unsigned int do_IRQ(struct pt_regs *regs)
223{ 223{
224 struct pt_regs *old_regs; 224 struct pt_regs *old_regs;
225 /* high bit used in ret_from_ code */ 225 /* high bit used in ret_from_ code */
226 int overflow, irq = ~regs->orig_ax; 226 int overflow;
227 unsigned vector = ~regs->orig_ax;
227 struct irq_desc *desc; 228 struct irq_desc *desc;
229 unsigned irq;
228 230
229 desc = irq_to_desc(irq);
230 if (unlikely(!desc)) {
231 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
232 __func__, irq);
233 BUG();
234 }
235 231
236 old_regs = set_irq_regs(regs); 232 old_regs = set_irq_regs(regs);
237 irq_enter(); 233 irq_enter();
234 irq = __get_cpu_var(vector_irq)[vector];
238 235
239 overflow = check_stack_overflow(); 236 overflow = check_stack_overflow();
240 237
238 desc = irq_to_desc(irq);
239 if (unlikely(!desc)) {
240 printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x\n",
241 __func__, irq, vector);
242 BUG();
243 }
244
241 if (!execute_on_irq_stack(overflow, desc, irq)) { 245 if (!execute_on_irq_stack(overflow, desc, irq)) {
242 if (unlikely(overflow)) 246 if (unlikely(overflow))
243 print_stack_overflow(); 247 print_stack_overflow();
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index ded09ac2642e..9092103a18eb 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -90,6 +90,27 @@ static struct irqaction irq2 = {
90 .name = "cascade", 90 .name = "cascade",
91}; 91};
92 92
93DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
94 [0 ... IRQ0_VECTOR - 1] = -1,
95 [IRQ0_VECTOR] = 0,
96 [IRQ1_VECTOR] = 1,
97 [IRQ2_VECTOR] = 2,
98 [IRQ3_VECTOR] = 3,
99 [IRQ4_VECTOR] = 4,
100 [IRQ5_VECTOR] = 5,
101 [IRQ6_VECTOR] = 6,
102 [IRQ7_VECTOR] = 7,
103 [IRQ8_VECTOR] = 8,
104 [IRQ9_VECTOR] = 9,
105 [IRQ10_VECTOR] = 10,
106 [IRQ11_VECTOR] = 11,
107 [IRQ12_VECTOR] = 12,
108 [IRQ13_VECTOR] = 13,
109 [IRQ14_VECTOR] = 14,
110 [IRQ15_VECTOR] = 15,
111 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
112};
113
93/* Overridden in paravirt.c */ 114/* Overridden in paravirt.c */
94void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); 115void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
95 116
@@ -105,22 +126,14 @@ void __init native_init_IRQ(void)
105 * us. (some of these will be overridden and become 126 * us. (some of these will be overridden and become
106 * 'special' SMP interrupts) 127 * 'special' SMP interrupts)
107 */ 128 */
108 for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { 129 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
109 int vector = FIRST_EXTERNAL_VECTOR + i;
110 if (i >= nr_irqs)
111 break;
112 /* SYSCALL_VECTOR was reserved in trap_init. */ 130 /* SYSCALL_VECTOR was reserved in trap_init. */
113 if (!test_bit(vector, used_vectors)) 131 if (i != SYSCALL_VECTOR)
114 set_intr_gate(vector, interrupt[i]); 132 set_intr_gate(i, interrupt[i]);
115 } 133 }
116 134
117#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
118 /*
119 * IRQ0 must be given a fixed assignment and initialized,
120 * because it's used before the IO-APIC is set up.
121 */
122 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
123 135
136#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
124 /* 137 /*
125 * The reschedule interrupt is a CPU-to-CPU reschedule-helper 138 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
126 * IPI, driven by wakeup. 139 * IPI, driven by wakeup.
@@ -135,6 +148,9 @@ void __init native_init_IRQ(void)
135 148
136 /* IPI for single call function */ 149 /* IPI for single call function */
137 set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); 150 set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt);
151
152 /* Low priority IPI to cleanup after moving an irq */
153 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
138#endif 154#endif
139 155
140#ifdef CONFIG_X86_LOCAL_APIC 156#ifdef CONFIG_X86_LOCAL_APIC
@@ -168,3 +184,4 @@ void __init native_init_IRQ(void)
168 184
169 irq_ctx_init(smp_processor_id()); 185 irq_ctx_init(smp_processor_id());
170} 186}
187
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 65f0b8a47bed..48ee4f9435f4 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -582,7 +582,7 @@ static void __init lguest_init_IRQ(void)
582 for (i = 0; i < LGUEST_IRQS; i++) { 582 for (i = 0; i < LGUEST_IRQS; i++) {
583 int vector = FIRST_EXTERNAL_VECTOR + i; 583 int vector = FIRST_EXTERNAL_VECTOR + i;
584 if (vector != SYSCALL_VECTOR) { 584 if (vector != SYSCALL_VECTOR) {
585 set_intr_gate(vector, interrupt[i]); 585 set_intr_gate(vector, interrupt[vector]);
586 set_irq_chip_and_handler_name(i, &lguest_irq_controller, 586 set_irq_chip_and_handler_name(i, &lguest_irq_controller,
587 handle_level_irq, 587 handle_level_irq,
588 "level"); 588 "level");
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
index df37fc9d6a26..3c3b471ea496 100644
--- a/arch/x86/mach-generic/bigsmp.c
+++ b/arch/x86/mach-generic/bigsmp.c
@@ -41,6 +41,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
41 { } 41 { }
42}; 42};
43 43
44static cpumask_t vector_allocation_domain(int cpu)
45{
46 return cpumask_of_cpu(cpu);
47}
44 48
45static int probe_bigsmp(void) 49static int probe_bigsmp(void)
46{ 50{
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
index 6513d41ea21e..28459cab3ddb 100644
--- a/arch/x86/mach-generic/es7000.c
+++ b/arch/x86/mach-generic/es7000.c
@@ -75,4 +75,18 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
75} 75}
76#endif 76#endif
77 77
78static cpumask_t vector_allocation_domain(int cpu)
79{
80 /* Careful. Some cpus do not strictly honor the set of cpus
81 * specified in the interrupt destination when using lowest
82 * priority interrupt delivery mode.
83 *
84 * In particular there was a hyperthreading cpu observed to
85 * deliver interrupts to the wrong hyperthread when only one
86 * hyperthread was specified in the interrupt desitination.
87 */
88 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
89 return domain;
90}
91
78struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); 92struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c
index 8cf58394975e..71a309b122e6 100644
--- a/arch/x86/mach-generic/numaq.c
+++ b/arch/x86/mach-generic/numaq.c
@@ -38,4 +38,18 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
38 return 0; 38 return 0;
39} 39}
40 40
41static cpumask_t vector_allocation_domain(int cpu)
42{
43 /* Careful. Some cpus do not strictly honor the set of cpus
44 * specified in the interrupt destination when using lowest
45 * priority interrupt delivery mode.
46 *
47 * In particular there was a hyperthreading cpu observed to
48 * deliver interrupts to the wrong hyperthread when only one
49 * hyperthread was specified in the interrupt desitination.
50 */
51 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
52 return domain;
53}
54
41struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); 55struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c
index 6ad6b67a723d..6272b5e69da6 100644
--- a/arch/x86/mach-generic/summit.c
+++ b/arch/x86/mach-generic/summit.c
@@ -23,4 +23,18 @@ static int probe_summit(void)
23 return 0; 23 return 0;
24} 24}
25 25
26static cpumask_t vector_allocation_domain(int cpu)
27{
28 /* Careful. Some cpus do not strictly honor the set of cpus
29 * specified in the interrupt destination when using lowest
30 * priority interrupt delivery mode.
31 *
32 * In particular there was a hyperthreading cpu observed to
33 * deliver interrupts to the wrong hyperthread when only one
34 * hyperthread was specified in the interrupt desitination.
35 */
36 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
37 return domain;
38}
39
26struct genapic apic_summit = APIC_INIT("summit", probe_summit); 40struct genapic apic_summit = APIC_INIT("summit", probe_summit);