aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/irq.h1
-rw-r--r--arch/x86/include/asm/irq_vectors.h48
-rw-r--r--arch/x86/kernel/apic/io_apic.c156
-rw-r--r--arch/x86/kernel/apic/nmi.c6
-rw-r--r--arch/x86/kernel/irqinit.c35
-rw-r--r--arch/x86/kernel/smpboot.c6
-rw-r--r--arch/x86/kernel/vmiclock_32.c6
7 files changed, 129 insertions, 129 deletions
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 5458380b6ef8..262292729fc4 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -48,5 +48,6 @@ extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
48extern int vector_used_by_percpu_irq(unsigned int vector); 48extern int vector_used_by_percpu_irq(unsigned int vector);
49 49
50extern void init_ISA_irqs(void); 50extern void init_ISA_irqs(void);
51extern int nr_legacy_irqs;
51 52
52#endif /* _ASM_X86_IRQ_H */ 53#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4611f085cd43..8767d99c4f64 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -28,28 +28,33 @@
28#define MCE_VECTOR 0x12 28#define MCE_VECTOR 0x12
29 29
30/* 30/*
31 * IDT vectors usable for external interrupt sources start 31 * IDT vectors usable for external interrupt sources start at 0x20.
32 * at 0x20: 32 * (0x80 is the syscall vector, 0x30-0x3f are for ISA)
33 */ 33 */
34#define FIRST_EXTERNAL_VECTOR 0x20 34#define FIRST_EXTERNAL_VECTOR 0x20
35 35/*
36#ifdef CONFIG_X86_32 36 * We start allocating at 0x21 to spread out vectors evenly between
37# define SYSCALL_VECTOR 0x80 37 * priority levels. (0x80 is the syscall vector)
38# define IA32_SYSCALL_VECTOR 0x80 38 */
39#else 39#define VECTOR_OFFSET_START 1
40# define IA32_SYSCALL_VECTOR 0x80
41#endif
42 40
43/* 41/*
44 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering 42 * Reserve the lowest usable vector (and hence lowest priority) 0x20 for
45 * cleanup after irq migration. 43 * triggering cleanup after irq migration. 0x21-0x2f will still be used
44 * for device interrupts.
46 */ 45 */
47#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR 46#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
48 47
48#define IA32_SYSCALL_VECTOR 0x80
49#ifdef CONFIG_X86_32
50# define SYSCALL_VECTOR 0x80
51#endif
52
49/* 53/*
50 * Vectors 0x30-0x3f are used for ISA interrupts. 54 * Vectors 0x30-0x3f are used for ISA interrupts.
55 * round up to the next 16-vector boundary
51 */ 56 */
52#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) 57#define IRQ0_VECTOR ((FIRST_EXTERNAL_VECTOR + 16) & ~15)
53 58
54#define IRQ1_VECTOR (IRQ0_VECTOR + 1) 59#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
55#define IRQ2_VECTOR (IRQ0_VECTOR + 2) 60#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
@@ -120,13 +125,6 @@
120 */ 125 */
121#define MCE_SELF_VECTOR 0xeb 126#define MCE_SELF_VECTOR 0xeb
122 127
123/*
124 * First APIC vector available to drivers: (vectors 0x30-0xee) we
125 * start at 0x31(0x41) to spread out vectors evenly between priority
126 * levels. (0x80 is the syscall vector)
127 */
128#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
129
130#define NR_VECTORS 256 128#define NR_VECTORS 256
131 129
132#define FPU_IRQ 13 130#define FPU_IRQ 13
@@ -154,21 +152,21 @@ static inline int invalid_vm86_irq(int irq)
154 152
155#define NR_IRQS_LEGACY 16 153#define NR_IRQS_LEGACY 16
156 154
157#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
158#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) 155#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
159 156
160#ifdef CONFIG_X86_IO_APIC 157#ifdef CONFIG_X86_IO_APIC
161# ifdef CONFIG_SPARSE_IRQ 158# ifdef CONFIG_SPARSE_IRQ
159# define CPU_VECTOR_LIMIT (64 * NR_CPUS)
162# define NR_IRQS \ 160# define NR_IRQS \
163 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ 161 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
164 (NR_VECTORS + CPU_VECTOR_LIMIT) : \ 162 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
165 (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) 163 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
166# else 164# else
167# if NR_CPUS < MAX_IO_APICS 165# define CPU_VECTOR_LIMIT (32 * NR_CPUS)
168# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) 166# define NR_IRQS \
169# else 167 (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \
170# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) 168 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
171# endif 169 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
172# endif 170# endif
173#else /* !CONFIG_X86_IO_APIC: */ 171#else /* !CONFIG_X86_IO_APIC: */
174# define NR_IRQS NR_IRQS_LEGACY 172# define NR_IRQS NR_IRQS_LEGACY
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 527390cd6115..979589881c80 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -73,8 +73,8 @@
73 */ 73 */
74int sis_apic_bug = -1; 74int sis_apic_bug = -1;
75 75
76static DEFINE_SPINLOCK(ioapic_lock); 76static DEFINE_RAW_SPINLOCK(ioapic_lock);
77static DEFINE_SPINLOCK(vector_lock); 77static DEFINE_RAW_SPINLOCK(vector_lock);
78 78
79/* 79/*
80 * # of IRQ routing registers 80 * # of IRQ routing registers
@@ -94,8 +94,6 @@ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
94/* # of MP IRQ source entries */ 94/* # of MP IRQ source entries */
95int mp_irq_entries; 95int mp_irq_entries;
96 96
97/* Number of legacy interrupts */
98static int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
99/* GSI interrupts */ 97/* GSI interrupts */
100static int nr_irqs_gsi = NR_IRQS_LEGACY; 98static int nr_irqs_gsi = NR_IRQS_LEGACY;
101 99
@@ -140,27 +138,10 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node)
140 138
141/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 139/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
142#ifdef CONFIG_SPARSE_IRQ 140#ifdef CONFIG_SPARSE_IRQ
143static struct irq_cfg irq_cfgx[] = { 141static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
144#else 142#else
145static struct irq_cfg irq_cfgx[NR_IRQS] = { 143static struct irq_cfg irq_cfgx[NR_IRQS];
146#endif 144#endif
147 [0] = { .vector = IRQ0_VECTOR, },
148 [1] = { .vector = IRQ1_VECTOR, },
149 [2] = { .vector = IRQ2_VECTOR, },
150 [3] = { .vector = IRQ3_VECTOR, },
151 [4] = { .vector = IRQ4_VECTOR, },
152 [5] = { .vector = IRQ5_VECTOR, },
153 [6] = { .vector = IRQ6_VECTOR, },
154 [7] = { .vector = IRQ7_VECTOR, },
155 [8] = { .vector = IRQ8_VECTOR, },
156 [9] = { .vector = IRQ9_VECTOR, },
157 [10] = { .vector = IRQ10_VECTOR, },
158 [11] = { .vector = IRQ11_VECTOR, },
159 [12] = { .vector = IRQ12_VECTOR, },
160 [13] = { .vector = IRQ13_VECTOR, },
161 [14] = { .vector = IRQ14_VECTOR, },
162 [15] = { .vector = IRQ15_VECTOR, },
163};
164 145
165void __init io_apic_disable_legacy(void) 146void __init io_apic_disable_legacy(void)
166{ 147{
@@ -185,8 +166,14 @@ int __init arch_early_irq_init(void)
185 desc->chip_data = &cfg[i]; 166 desc->chip_data = &cfg[i];
186 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); 167 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
187 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); 168 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
188 if (i < nr_legacy_irqs) 169 /*
189 cpumask_setall(cfg[i].domain); 170 * For legacy IRQ's, start with assigning irq0 to irq15 to
171 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
172 */
173 if (i < nr_legacy_irqs) {
174 cfg[i].vector = IRQ0_VECTOR + i;
175 cpumask_set_cpu(0, cfg[i].domain);
176 }
190 } 177 }
191 178
192 return 0; 179 return 0;
@@ -406,7 +393,7 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
406 struct irq_pin_list *entry; 393 struct irq_pin_list *entry;
407 unsigned long flags; 394 unsigned long flags;
408 395
409 spin_lock_irqsave(&ioapic_lock, flags); 396 raw_spin_lock_irqsave(&ioapic_lock, flags);
410 for_each_irq_pin(entry, cfg->irq_2_pin) { 397 for_each_irq_pin(entry, cfg->irq_2_pin) {
411 unsigned int reg; 398 unsigned int reg;
412 int pin; 399 int pin;
@@ -415,11 +402,11 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
415 reg = io_apic_read(entry->apic, 0x10 + pin*2); 402 reg = io_apic_read(entry->apic, 0x10 + pin*2);
416 /* Is the remote IRR bit set? */ 403 /* Is the remote IRR bit set? */
417 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 404 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
418 spin_unlock_irqrestore(&ioapic_lock, flags); 405 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
419 return true; 406 return true;
420 } 407 }
421 } 408 }
422 spin_unlock_irqrestore(&ioapic_lock, flags); 409 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
423 410
424 return false; 411 return false;
425} 412}
@@ -433,10 +420,10 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
433{ 420{
434 union entry_union eu; 421 union entry_union eu;
435 unsigned long flags; 422 unsigned long flags;
436 spin_lock_irqsave(&ioapic_lock, flags); 423 raw_spin_lock_irqsave(&ioapic_lock, flags);
437 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 424 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
438 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 425 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
439 spin_unlock_irqrestore(&ioapic_lock, flags); 426 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
440 return eu.entry; 427 return eu.entry;
441} 428}
442 429
@@ -459,9 +446,9 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
459void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 446void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
460{ 447{
461 unsigned long flags; 448 unsigned long flags;
462 spin_lock_irqsave(&ioapic_lock, flags); 449 raw_spin_lock_irqsave(&ioapic_lock, flags);
463 __ioapic_write_entry(apic, pin, e); 450 __ioapic_write_entry(apic, pin, e);
464 spin_unlock_irqrestore(&ioapic_lock, flags); 451 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
465} 452}
466 453
467/* 454/*
@@ -474,10 +461,10 @@ static void ioapic_mask_entry(int apic, int pin)
474 unsigned long flags; 461 unsigned long flags;
475 union entry_union eu = { .entry.mask = 1 }; 462 union entry_union eu = { .entry.mask = 1 };
476 463
477 spin_lock_irqsave(&ioapic_lock, flags); 464 raw_spin_lock_irqsave(&ioapic_lock, flags);
478 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 465 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
479 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 466 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
480 spin_unlock_irqrestore(&ioapic_lock, flags); 467 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
481} 468}
482 469
483/* 470/*
@@ -604,9 +591,9 @@ static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
604 591
605 BUG_ON(!cfg); 592 BUG_ON(!cfg);
606 593
607 spin_lock_irqsave(&ioapic_lock, flags); 594 raw_spin_lock_irqsave(&ioapic_lock, flags);
608 __mask_IO_APIC_irq(cfg); 595 __mask_IO_APIC_irq(cfg);
609 spin_unlock_irqrestore(&ioapic_lock, flags); 596 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
610} 597}
611 598
612static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) 599static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
@@ -614,9 +601,9 @@ static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
614 struct irq_cfg *cfg = desc->chip_data; 601 struct irq_cfg *cfg = desc->chip_data;
615 unsigned long flags; 602 unsigned long flags;
616 603
617 spin_lock_irqsave(&ioapic_lock, flags); 604 raw_spin_lock_irqsave(&ioapic_lock, flags);
618 __unmask_IO_APIC_irq(cfg); 605 __unmask_IO_APIC_irq(cfg);
619 spin_unlock_irqrestore(&ioapic_lock, flags); 606 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
620} 607}
621 608
622static void mask_IO_APIC_irq(unsigned int irq) 609static void mask_IO_APIC_irq(unsigned int irq)
@@ -1140,12 +1127,12 @@ void lock_vector_lock(void)
1140 /* Used to the online set of cpus does not change 1127 /* Used to the online set of cpus does not change
1141 * during assign_irq_vector. 1128 * during assign_irq_vector.
1142 */ 1129 */
1143 spin_lock(&vector_lock); 1130 raw_spin_lock(&vector_lock);
1144} 1131}
1145 1132
1146void unlock_vector_lock(void) 1133void unlock_vector_lock(void)
1147{ 1134{
1148 spin_unlock(&vector_lock); 1135 raw_spin_unlock(&vector_lock);
1149} 1136}
1150 1137
1151static int 1138static int
@@ -1162,7 +1149,8 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1162 * Also, we've got to be careful not to trash gate 1149 * Also, we've got to be careful not to trash gate
1163 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1150 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1164 */ 1151 */
1165 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1152 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1153 static int current_offset = VECTOR_OFFSET_START % 8;
1166 unsigned int old_vector; 1154 unsigned int old_vector;
1167 int cpu, err; 1155 int cpu, err;
1168 cpumask_var_t tmp_mask; 1156 cpumask_var_t tmp_mask;
@@ -1198,7 +1186,7 @@ next:
1198 if (vector >= first_system_vector) { 1186 if (vector >= first_system_vector) {
1199 /* If out of vectors on large boxen, must share them. */ 1187 /* If out of vectors on large boxen, must share them. */
1200 offset = (offset + 1) % 8; 1188 offset = (offset + 1) % 8;
1201 vector = FIRST_DEVICE_VECTOR + offset; 1189 vector = FIRST_EXTERNAL_VECTOR + offset;
1202 } 1190 }
1203 if (unlikely(current_vector == vector)) 1191 if (unlikely(current_vector == vector))
1204 continue; 1192 continue;
@@ -1232,9 +1220,9 @@ int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1232 int err; 1220 int err;
1233 unsigned long flags; 1221 unsigned long flags;
1234 1222
1235 spin_lock_irqsave(&vector_lock, flags); 1223 raw_spin_lock_irqsave(&vector_lock, flags);
1236 err = __assign_irq_vector(irq, cfg, mask); 1224 err = __assign_irq_vector(irq, cfg, mask);
1237 spin_unlock_irqrestore(&vector_lock, flags); 1225 raw_spin_unlock_irqrestore(&vector_lock, flags);
1238 return err; 1226 return err;
1239} 1227}
1240 1228
@@ -1268,11 +1256,16 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1268void __setup_vector_irq(int cpu) 1256void __setup_vector_irq(int cpu)
1269{ 1257{
1270 /* Initialize vector_irq on a new cpu */ 1258 /* Initialize vector_irq on a new cpu */
1271 /* This function must be called with vector_lock held */
1272 int irq, vector; 1259 int irq, vector;
1273 struct irq_cfg *cfg; 1260 struct irq_cfg *cfg;
1274 struct irq_desc *desc; 1261 struct irq_desc *desc;
1275 1262
1263 /*
1264 * vector_lock will make sure that we don't run into irq vector
1265 * assignments that might be happening on another cpu in parallel,
1266 * while we setup our initial vector to irq mappings.
1267 */
1268 raw_spin_lock(&vector_lock);
1276 /* Mark the inuse vectors */ 1269 /* Mark the inuse vectors */
1277 for_each_irq_desc(irq, desc) { 1270 for_each_irq_desc(irq, desc) {
1278 cfg = desc->chip_data; 1271 cfg = desc->chip_data;
@@ -1291,6 +1284,7 @@ void __setup_vector_irq(int cpu)
1291 if (!cpumask_test_cpu(cpu, cfg->domain)) 1284 if (!cpumask_test_cpu(cpu, cfg->domain))
1292 per_cpu(vector_irq, cpu)[vector] = -1; 1285 per_cpu(vector_irq, cpu)[vector] = -1;
1293 } 1286 }
1287 raw_spin_unlock(&vector_lock);
1294} 1288}
1295 1289
1296static struct irq_chip ioapic_chip; 1290static struct irq_chip ioapic_chip;
@@ -1440,6 +1434,14 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
1440 1434
1441 cfg = desc->chip_data; 1435 cfg = desc->chip_data;
1442 1436
1437 /*
1438 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
1439 * controllers like 8259. Now that IO-APIC can handle this irq, update
1440 * the cfg->domain.
1441 */
1442 if (irq < nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1443 apic->vector_allocation_domain(0, cfg->domain);
1444
1443 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1445 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1444 return; 1446 return;
1445 1447
@@ -1651,14 +1653,14 @@ __apicdebuginit(void) print_IO_APIC(void)
1651 1653
1652 for (apic = 0; apic < nr_ioapics; apic++) { 1654 for (apic = 0; apic < nr_ioapics; apic++) {
1653 1655
1654 spin_lock_irqsave(&ioapic_lock, flags); 1656 raw_spin_lock_irqsave(&ioapic_lock, flags);
1655 reg_00.raw = io_apic_read(apic, 0); 1657 reg_00.raw = io_apic_read(apic, 0);
1656 reg_01.raw = io_apic_read(apic, 1); 1658 reg_01.raw = io_apic_read(apic, 1);
1657 if (reg_01.bits.version >= 0x10) 1659 if (reg_01.bits.version >= 0x10)
1658 reg_02.raw = io_apic_read(apic, 2); 1660 reg_02.raw = io_apic_read(apic, 2);
1659 if (reg_01.bits.version >= 0x20) 1661 if (reg_01.bits.version >= 0x20)
1660 reg_03.raw = io_apic_read(apic, 3); 1662 reg_03.raw = io_apic_read(apic, 3);
1661 spin_unlock_irqrestore(&ioapic_lock, flags); 1663 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1662 1664
1663 printk("\n"); 1665 printk("\n");
1664 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); 1666 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
@@ -1953,9 +1955,9 @@ void __init enable_IO_APIC(void)
1953 * The number of IO-APIC IRQ registers (== #pins): 1955 * The number of IO-APIC IRQ registers (== #pins):
1954 */ 1956 */
1955 for (apic = 0; apic < nr_ioapics; apic++) { 1957 for (apic = 0; apic < nr_ioapics; apic++) {
1956 spin_lock_irqsave(&ioapic_lock, flags); 1958 raw_spin_lock_irqsave(&ioapic_lock, flags);
1957 reg_01.raw = io_apic_read(apic, 1); 1959 reg_01.raw = io_apic_read(apic, 1);
1958 spin_unlock_irqrestore(&ioapic_lock, flags); 1960 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1959 nr_ioapic_registers[apic] = reg_01.bits.entries+1; 1961 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1960 } 1962 }
1961 1963
@@ -2095,9 +2097,9 @@ void __init setup_ioapic_ids_from_mpc(void)
2095 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { 2097 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
2096 2098
2097 /* Read the register 0 value */ 2099 /* Read the register 0 value */
2098 spin_lock_irqsave(&ioapic_lock, flags); 2100 raw_spin_lock_irqsave(&ioapic_lock, flags);
2099 reg_00.raw = io_apic_read(apic_id, 0); 2101 reg_00.raw = io_apic_read(apic_id, 0);
2100 spin_unlock_irqrestore(&ioapic_lock, flags); 2102 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2101 2103
2102 old_id = mp_ioapics[apic_id].apicid; 2104 old_id = mp_ioapics[apic_id].apicid;
2103 2105
@@ -2156,16 +2158,16 @@ void __init setup_ioapic_ids_from_mpc(void)
2156 mp_ioapics[apic_id].apicid); 2158 mp_ioapics[apic_id].apicid);
2157 2159
2158 reg_00.bits.ID = mp_ioapics[apic_id].apicid; 2160 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2159 spin_lock_irqsave(&ioapic_lock, flags); 2161 raw_spin_lock_irqsave(&ioapic_lock, flags);
2160 io_apic_write(apic_id, 0, reg_00.raw); 2162 io_apic_write(apic_id, 0, reg_00.raw);
2161 spin_unlock_irqrestore(&ioapic_lock, flags); 2163 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2162 2164
2163 /* 2165 /*
2164 * Sanity check 2166 * Sanity check
2165 */ 2167 */
2166 spin_lock_irqsave(&ioapic_lock, flags); 2168 raw_spin_lock_irqsave(&ioapic_lock, flags);
2167 reg_00.raw = io_apic_read(apic_id, 0); 2169 reg_00.raw = io_apic_read(apic_id, 0);
2168 spin_unlock_irqrestore(&ioapic_lock, flags); 2170 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2169 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) 2171 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2170 printk("could not set ID!\n"); 2172 printk("could not set ID!\n");
2171 else 2173 else
@@ -2248,7 +2250,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2248 unsigned long flags; 2250 unsigned long flags;
2249 struct irq_cfg *cfg; 2251 struct irq_cfg *cfg;
2250 2252
2251 spin_lock_irqsave(&ioapic_lock, flags); 2253 raw_spin_lock_irqsave(&ioapic_lock, flags);
2252 if (irq < nr_legacy_irqs) { 2254 if (irq < nr_legacy_irqs) {
2253 disable_8259A_irq(irq); 2255 disable_8259A_irq(irq);
2254 if (i8259A_irq_pending(irq)) 2256 if (i8259A_irq_pending(irq))
@@ -2256,7 +2258,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2256 } 2258 }
2257 cfg = irq_cfg(irq); 2259 cfg = irq_cfg(irq);
2258 __unmask_IO_APIC_irq(cfg); 2260 __unmask_IO_APIC_irq(cfg);
2259 spin_unlock_irqrestore(&ioapic_lock, flags); 2261 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2260 2262
2261 return was_pending; 2263 return was_pending;
2262} 2264}
@@ -2267,9 +2269,9 @@ static int ioapic_retrigger_irq(unsigned int irq)
2267 struct irq_cfg *cfg = irq_cfg(irq); 2269 struct irq_cfg *cfg = irq_cfg(irq);
2268 unsigned long flags; 2270 unsigned long flags;
2269 2271
2270 spin_lock_irqsave(&vector_lock, flags); 2272 raw_spin_lock_irqsave(&vector_lock, flags);
2271 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2273 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2272 spin_unlock_irqrestore(&vector_lock, flags); 2274 raw_spin_unlock_irqrestore(&vector_lock, flags);
2273 2275
2274 return 1; 2276 return 1;
2275} 2277}
@@ -2362,14 +2364,14 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2362 irq = desc->irq; 2364 irq = desc->irq;
2363 cfg = desc->chip_data; 2365 cfg = desc->chip_data;
2364 2366
2365 spin_lock_irqsave(&ioapic_lock, flags); 2367 raw_spin_lock_irqsave(&ioapic_lock, flags);
2366 ret = set_desc_affinity(desc, mask, &dest); 2368 ret = set_desc_affinity(desc, mask, &dest);
2367 if (!ret) { 2369 if (!ret) {
2368 /* Only the high 8 bits are valid. */ 2370 /* Only the high 8 bits are valid. */
2369 dest = SET_APIC_LOGICAL_ID(dest); 2371 dest = SET_APIC_LOGICAL_ID(dest);
2370 __target_IO_APIC_irq(irq, dest, cfg); 2372 __target_IO_APIC_irq(irq, dest, cfg);
2371 } 2373 }
2372 spin_unlock_irqrestore(&ioapic_lock, flags); 2374 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2373 2375
2374 return ret; 2376 return ret;
2375} 2377}
@@ -2604,9 +2606,9 @@ static void eoi_ioapic_irq(struct irq_desc *desc)
2604 irq = desc->irq; 2606 irq = desc->irq;
2605 cfg = desc->chip_data; 2607 cfg = desc->chip_data;
2606 2608
2607 spin_lock_irqsave(&ioapic_lock, flags); 2609 raw_spin_lock_irqsave(&ioapic_lock, flags);
2608 __eoi_ioapic_irq(irq, cfg); 2610 __eoi_ioapic_irq(irq, cfg);
2609 spin_unlock_irqrestore(&ioapic_lock, flags); 2611 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2610} 2612}
2611 2613
2612static void ack_apic_level(unsigned int irq) 2614static void ack_apic_level(unsigned int irq)
@@ -3188,13 +3190,13 @@ static int ioapic_resume(struct sys_device *dev)
3188 data = container_of(dev, struct sysfs_ioapic_data, dev); 3190 data = container_of(dev, struct sysfs_ioapic_data, dev);
3189 entry = data->entry; 3191 entry = data->entry;
3190 3192
3191 spin_lock_irqsave(&ioapic_lock, flags); 3193 raw_spin_lock_irqsave(&ioapic_lock, flags);
3192 reg_00.raw = io_apic_read(dev->id, 0); 3194 reg_00.raw = io_apic_read(dev->id, 0);
3193 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { 3195 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3194 reg_00.bits.ID = mp_ioapics[dev->id].apicid; 3196 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3195 io_apic_write(dev->id, 0, reg_00.raw); 3197 io_apic_write(dev->id, 0, reg_00.raw);
3196 } 3198 }
3197 spin_unlock_irqrestore(&ioapic_lock, flags); 3199 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3198 for (i = 0; i < nr_ioapic_registers[dev->id]; i++) 3200 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3199 ioapic_write_entry(dev->id, i, entry[i]); 3201 ioapic_write_entry(dev->id, i, entry[i]);
3200 3202
@@ -3257,7 +3259,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3257 if (irq_want < nr_irqs_gsi) 3259 if (irq_want < nr_irqs_gsi)
3258 irq_want = nr_irqs_gsi; 3260 irq_want = nr_irqs_gsi;
3259 3261
3260 spin_lock_irqsave(&vector_lock, flags); 3262 raw_spin_lock_irqsave(&vector_lock, flags);
3261 for (new = irq_want; new < nr_irqs; new++) { 3263 for (new = irq_want; new < nr_irqs; new++) {
3262 desc_new = irq_to_desc_alloc_node(new, node); 3264 desc_new = irq_to_desc_alloc_node(new, node);
3263 if (!desc_new) { 3265 if (!desc_new) {
@@ -3276,7 +3278,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3276 irq = new; 3278 irq = new;
3277 break; 3279 break;
3278 } 3280 }
3279 spin_unlock_irqrestore(&vector_lock, flags); 3281 raw_spin_unlock_irqrestore(&vector_lock, flags);
3280 3282
3281 if (irq > 0) 3283 if (irq > 0)
3282 dynamic_irq_init_keep_chip_data(irq); 3284 dynamic_irq_init_keep_chip_data(irq);
@@ -3306,9 +3308,9 @@ void destroy_irq(unsigned int irq)
3306 dynamic_irq_cleanup_keep_chip_data(irq); 3308 dynamic_irq_cleanup_keep_chip_data(irq);
3307 3309
3308 free_irte(irq); 3310 free_irte(irq);
3309 spin_lock_irqsave(&vector_lock, flags); 3311 raw_spin_lock_irqsave(&vector_lock, flags);
3310 __clear_irq_vector(irq, get_irq_chip_data(irq)); 3312 __clear_irq_vector(irq, get_irq_chip_data(irq));
3311 spin_unlock_irqrestore(&vector_lock, flags); 3313 raw_spin_unlock_irqrestore(&vector_lock, flags);
3312} 3314}
3313 3315
3314/* 3316/*
@@ -3845,9 +3847,9 @@ int __init io_apic_get_redir_entries (int ioapic)
3845 union IO_APIC_reg_01 reg_01; 3847 union IO_APIC_reg_01 reg_01;
3846 unsigned long flags; 3848 unsigned long flags;
3847 3849
3848 spin_lock_irqsave(&ioapic_lock, flags); 3850 raw_spin_lock_irqsave(&ioapic_lock, flags);
3849 reg_01.raw = io_apic_read(ioapic, 1); 3851 reg_01.raw = io_apic_read(ioapic, 1);
3850 spin_unlock_irqrestore(&ioapic_lock, flags); 3852 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3851 3853
3852 return reg_01.bits.entries; 3854 return reg_01.bits.entries;
3853} 3855}
@@ -3987,9 +3989,9 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3987 if (physids_empty(apic_id_map)) 3989 if (physids_empty(apic_id_map))
3988 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 3990 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3989 3991
3990 spin_lock_irqsave(&ioapic_lock, flags); 3992 raw_spin_lock_irqsave(&ioapic_lock, flags);
3991 reg_00.raw = io_apic_read(ioapic, 0); 3993 reg_00.raw = io_apic_read(ioapic, 0);
3992 spin_unlock_irqrestore(&ioapic_lock, flags); 3994 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3993 3995
3994 if (apic_id >= get_physical_broadcast()) { 3996 if (apic_id >= get_physical_broadcast()) {
3995 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 3997 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
@@ -4023,10 +4025,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
4023 if (reg_00.bits.ID != apic_id) { 4025 if (reg_00.bits.ID != apic_id) {
4024 reg_00.bits.ID = apic_id; 4026 reg_00.bits.ID = apic_id;
4025 4027
4026 spin_lock_irqsave(&ioapic_lock, flags); 4028 raw_spin_lock_irqsave(&ioapic_lock, flags);
4027 io_apic_write(ioapic, 0, reg_00.raw); 4029 io_apic_write(ioapic, 0, reg_00.raw);
4028 reg_00.raw = io_apic_read(ioapic, 0); 4030 reg_00.raw = io_apic_read(ioapic, 0);
4029 spin_unlock_irqrestore(&ioapic_lock, flags); 4031 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4030 4032
4031 /* Sanity check */ 4033 /* Sanity check */
4032 if (reg_00.bits.ID != apic_id) { 4034 if (reg_00.bits.ID != apic_id) {
@@ -4047,9 +4049,9 @@ int __init io_apic_get_version(int ioapic)
4047 union IO_APIC_reg_01 reg_01; 4049 union IO_APIC_reg_01 reg_01;
4048 unsigned long flags; 4050 unsigned long flags;
4049 4051
4050 spin_lock_irqsave(&ioapic_lock, flags); 4052 raw_spin_lock_irqsave(&ioapic_lock, flags);
4051 reg_01.raw = io_apic_read(ioapic, 1); 4053 reg_01.raw = io_apic_read(ioapic, 1);
4052 spin_unlock_irqrestore(&ioapic_lock, flags); 4054 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4053 4055
4054 return reg_01.bits.version; 4056 return reg_01.bits.version;
4055} 4057}
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 0159a69396cb..24e7742d633a 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -416,13 +416,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
416 416
417 /* We can be called before check_nmi_watchdog, hence NULL check. */ 417 /* We can be called before check_nmi_watchdog, hence NULL check. */
418 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { 418 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
419 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 419 static DEFINE_RAW_SPINLOCK(lock); /* Serialise the printks */
420 420
421 spin_lock(&lock); 421 raw_spin_lock(&lock);
422 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); 422 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
423 show_regs(regs); 423 show_regs(regs);
424 dump_stack(); 424 dump_stack();
425 spin_unlock(&lock); 425 raw_spin_unlock(&lock);
426 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 426 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
427 427
428 rc = 1; 428 rc = 1;
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index d5932226614f..fce55d532631 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -84,24 +84,7 @@ static struct irqaction irq2 = {
84}; 84};
85 85
86DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 86DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
87 [0 ... IRQ0_VECTOR - 1] = -1, 87 [0 ... NR_VECTORS - 1] = -1,
88 [IRQ0_VECTOR] = 0,
89 [IRQ1_VECTOR] = 1,
90 [IRQ2_VECTOR] = 2,
91 [IRQ3_VECTOR] = 3,
92 [IRQ4_VECTOR] = 4,
93 [IRQ5_VECTOR] = 5,
94 [IRQ6_VECTOR] = 6,
95 [IRQ7_VECTOR] = 7,
96 [IRQ8_VECTOR] = 8,
97 [IRQ9_VECTOR] = 9,
98 [IRQ10_VECTOR] = 10,
99 [IRQ11_VECTOR] = 11,
100 [IRQ12_VECTOR] = 12,
101 [IRQ13_VECTOR] = 13,
102 [IRQ14_VECTOR] = 14,
103 [IRQ15_VECTOR] = 15,
104 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
105}; 88};
106 89
107int vector_used_by_percpu_irq(unsigned int vector) 90int vector_used_by_percpu_irq(unsigned int vector)
@@ -116,6 +99,9 @@ int vector_used_by_percpu_irq(unsigned int vector)
116 return 0; 99 return 0;
117} 100}
118 101
102/* Number of legacy interrupts */
103int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
104
119void __init init_ISA_irqs(void) 105void __init init_ISA_irqs(void)
120{ 106{
121 int i; 107 int i;
@@ -142,6 +128,19 @@ void __init init_ISA_irqs(void)
142 128
143void __init init_IRQ(void) 129void __init init_IRQ(void)
144{ 130{
131 int i;
132
133 /*
134 * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
135 * If these IRQ's are handled by legacy interrupt-controllers like PIC,
136 * then this configuration will likely be static after the boot. If
137 * these IRQ's are handled by more mordern controllers like IO-APIC,
138 * then this vector space can be freed and re-used dynamically as the
139 * irq's migrate etc.
140 */
141 for (i = 0; i < nr_legacy_irqs; i++)
142 per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
143
145 x86_init.irqs.intr_init(); 144 x86_init.irqs.intr_init();
146} 145}
147 146
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 96f5f40a5c29..da99eef1f0dc 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -241,6 +241,11 @@ static void __cpuinit smp_callin(void)
241 map_cpu_to_logical_apicid(); 241 map_cpu_to_logical_apicid();
242 242
243 notify_cpu_starting(cpuid); 243 notify_cpu_starting(cpuid);
244
245 /*
246 * Need to setup vector mappings before we enable interrupts.
247 */
248 __setup_vector_irq(smp_processor_id());
244 /* 249 /*
245 * Get our bogomips. 250 * Get our bogomips.
246 * 251 *
@@ -315,7 +320,6 @@ notrace static void __cpuinit start_secondary(void *unused)
315 */ 320 */
316 ipi_call_lock(); 321 ipi_call_lock();
317 lock_vector_lock(); 322 lock_vector_lock();
318 __setup_vector_irq(smp_processor_id());
319 set_cpu_online(smp_processor_id(), true); 323 set_cpu_online(smp_processor_id(), true);
320 unlock_vector_lock(); 324 unlock_vector_lock();
321 ipi_call_unlock(); 325 ipi_call_unlock();
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 74c92bb194df..2f1ca5614292 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -79,11 +79,7 @@ unsigned long vmi_tsc_khz(void)
79 79
80static inline unsigned int vmi_get_timer_vector(void) 80static inline unsigned int vmi_get_timer_vector(void)
81{ 81{
82#ifdef CONFIG_X86_IO_APIC 82 return IRQ0_VECTOR;
83 return FIRST_DEVICE_VECTOR;
84#else
85 return FIRST_EXTERNAL_VECTOR;
86#endif
87} 83}
88 84
89/** vmi clockchip */ 85/** vmi clockchip */