aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/ia32/ia32_aout.c1
-rw-r--r--arch/x86/include/asm/i8259.h2
-rw-r--r--arch/x86/include/asm/io_apic.h1
-rw-r--r--arch/x86/include/asm/irq.h1
-rw-r--r--arch/x86/include/asm/irq_vectors.h48
-rw-r--r--arch/x86/kernel/acpi/boot.c9
-rw-r--r--arch/x86/kernel/apic/io_apic.c229
-rw-r--r--arch/x86/kernel/apic/nmi.c6
-rw-r--r--arch/x86/kernel/i8259.c30
-rw-r--r--arch/x86/kernel/irqinit.c35
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--arch/x86/kernel/time.c4
-rw-r--r--arch/x86/kernel/visws_quirks.c6
-rw-r--r--arch/x86/kernel/vmiclock_32.c6
15 files changed, 226 insertions, 173 deletions
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index f9f472462753..14531abdd0ce 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -327,7 +327,6 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
327 current->mm->free_area_cache = TASK_UNMAPPED_BASE; 327 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
328 current->mm->cached_hole_size = 0; 328 current->mm->cached_hole_size = 0;
329 329
330 current->mm->mmap = NULL;
331 install_exec_creds(bprm); 330 install_exec_creds(bprm);
332 current->flags &= ~PF_FORKNOEXEC; 331 current->flags &= ~PF_FORKNOEXEC;
333 332
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 2832babd91fc..1655147646aa 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -24,7 +24,7 @@ extern unsigned int cached_irq_mask;
24#define SLAVE_ICW4_DEFAULT 0x01 24#define SLAVE_ICW4_DEFAULT 0x01
25#define PIC_ICW4_AEOI 2 25#define PIC_ICW4_AEOI 2
26 26
27extern spinlock_t i8259A_lock; 27extern raw_spinlock_t i8259A_lock;
28 28
29/* the PIC may need a careful delay on some platforms, hence specific calls */ 29/* the PIC may need a careful delay on some platforms, hence specific calls */
30static inline unsigned char inb_pic(unsigned int port) 30static inline unsigned char inb_pic(unsigned int port)
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 84fdd5110948..31dfb42d8649 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -158,6 +158,7 @@ extern int io_apic_get_redir_entries(int ioapic);
158struct io_apic_irq_attr; 158struct io_apic_irq_attr;
159extern int io_apic_set_pci_routing(struct device *dev, int irq, 159extern int io_apic_set_pci_routing(struct device *dev, int irq,
160 struct io_apic_irq_attr *irq_attr); 160 struct io_apic_irq_attr *irq_attr);
161void setup_IO_APIC_irq_extra(u32 gsi);
161extern int (*ioapic_renumber_irq)(int ioapic, int irq); 162extern int (*ioapic_renumber_irq)(int ioapic, int irq);
162extern void ioapic_init_mappings(void); 163extern void ioapic_init_mappings(void);
163extern void ioapic_insert_resources(void); 164extern void ioapic_insert_resources(void);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 5458380b6ef8..262292729fc4 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -48,5 +48,6 @@ extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
48extern int vector_used_by_percpu_irq(unsigned int vector); 48extern int vector_used_by_percpu_irq(unsigned int vector);
49 49
50extern void init_ISA_irqs(void); 50extern void init_ISA_irqs(void);
51extern int nr_legacy_irqs;
51 52
52#endif /* _ASM_X86_IRQ_H */ 53#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4611f085cd43..8767d99c4f64 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -28,28 +28,33 @@
28#define MCE_VECTOR 0x12 28#define MCE_VECTOR 0x12
29 29
30/* 30/*
31 * IDT vectors usable for external interrupt sources start 31 * IDT vectors usable for external interrupt sources start at 0x20.
32 * at 0x20: 32 * (0x80 is the syscall vector, 0x30-0x3f are for ISA)
33 */ 33 */
34#define FIRST_EXTERNAL_VECTOR 0x20 34#define FIRST_EXTERNAL_VECTOR 0x20
35 35/*
36#ifdef CONFIG_X86_32 36 * We start allocating at 0x21 to spread out vectors evenly between
37# define SYSCALL_VECTOR 0x80 37 * priority levels. (0x80 is the syscall vector)
38# define IA32_SYSCALL_VECTOR 0x80 38 */
39#else 39#define VECTOR_OFFSET_START 1
40# define IA32_SYSCALL_VECTOR 0x80
41#endif
42 40
43/* 41/*
44 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering 42 * Reserve the lowest usable vector (and hence lowest priority) 0x20 for
45 * cleanup after irq migration. 43 * triggering cleanup after irq migration. 0x21-0x2f will still be used
44 * for device interrupts.
46 */ 45 */
47#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR 46#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
48 47
48#define IA32_SYSCALL_VECTOR 0x80
49#ifdef CONFIG_X86_32
50# define SYSCALL_VECTOR 0x80
51#endif
52
49/* 53/*
50 * Vectors 0x30-0x3f are used for ISA interrupts. 54 * Vectors 0x30-0x3f are used for ISA interrupts.
55 * round up to the next 16-vector boundary
51 */ 56 */
52#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) 57#define IRQ0_VECTOR ((FIRST_EXTERNAL_VECTOR + 16) & ~15)
53 58
54#define IRQ1_VECTOR (IRQ0_VECTOR + 1) 59#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
55#define IRQ2_VECTOR (IRQ0_VECTOR + 2) 60#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
@@ -120,13 +125,6 @@
120 */ 125 */
121#define MCE_SELF_VECTOR 0xeb 126#define MCE_SELF_VECTOR 0xeb
122 127
123/*
124 * First APIC vector available to drivers: (vectors 0x30-0xee) we
125 * start at 0x31(0x41) to spread out vectors evenly between priority
126 * levels. (0x80 is the syscall vector)
127 */
128#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
129
130#define NR_VECTORS 256 128#define NR_VECTORS 256
131 129
132#define FPU_IRQ 13 130#define FPU_IRQ 13
@@ -154,21 +152,21 @@ static inline int invalid_vm86_irq(int irq)
154 152
155#define NR_IRQS_LEGACY 16 153#define NR_IRQS_LEGACY 16
156 154
157#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
158#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) 155#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
159 156
160#ifdef CONFIG_X86_IO_APIC 157#ifdef CONFIG_X86_IO_APIC
161# ifdef CONFIG_SPARSE_IRQ 158# ifdef CONFIG_SPARSE_IRQ
159# define CPU_VECTOR_LIMIT (64 * NR_CPUS)
162# define NR_IRQS \ 160# define NR_IRQS \
163 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ 161 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
164 (NR_VECTORS + CPU_VECTOR_LIMIT) : \ 162 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
165 (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) 163 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
166# else 164# else
167# if NR_CPUS < MAX_IO_APICS 165# define CPU_VECTOR_LIMIT (32 * NR_CPUS)
168# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) 166# define NR_IRQS \
169# else 167 (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \
170# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) 168 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
171# endif 169 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
172# endif 170# endif
173#else /* !CONFIG_X86_IO_APIC: */ 171#else /* !CONFIG_X86_IO_APIC: */
174# define NR_IRQS NR_IRQS_LEGACY 172# define NR_IRQS NR_IRQS_LEGACY
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 054a5f5548b0..db2773c6defd 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -447,6 +447,12 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
447int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) 447int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
448{ 448{
449 *irq = gsi; 449 *irq = gsi;
450
451#ifdef CONFIG_X86_IO_APIC
452 if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
453 setup_IO_APIC_irq_extra(gsi);
454#endif
455
450 return 0; 456 return 0;
451} 457}
452 458
@@ -474,7 +480,8 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
474 plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity); 480 plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
475 } 481 }
476#endif 482#endif
477 acpi_gsi_to_irq(plat_gsi, &irq); 483 irq = plat_gsi;
484
478 return irq; 485 return irq;
479} 486}
480 487
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 3592a72f3f0a..b34854358ee6 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -73,8 +73,8 @@
73 */ 73 */
74int sis_apic_bug = -1; 74int sis_apic_bug = -1;
75 75
76static DEFINE_SPINLOCK(ioapic_lock); 76static DEFINE_RAW_SPINLOCK(ioapic_lock);
77static DEFINE_SPINLOCK(vector_lock); 77static DEFINE_RAW_SPINLOCK(vector_lock);
78 78
79/* 79/*
80 * # of IRQ routing registers 80 * # of IRQ routing registers
@@ -167,8 +167,14 @@ int __init arch_early_irq_init(void)
167 desc->chip_data = &cfg[i]; 167 desc->chip_data = &cfg[i];
168 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); 168 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
169 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); 169 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
170 if (i < legacy_pic->nr_legacy_irqs) 170 /*
171 cpumask_setall(cfg[i].domain); 171 * For legacy IRQ's, start with assigning irq0 to irq15 to
172 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
173 */
174 if (i < legacy_pic->nr_legacy_irqs) {
175 cfg[i].vector = IRQ0_VECTOR + i;
176 cpumask_set_cpu(0, cfg[i].domain);
177 }
172 } 178 }
173 179
174 return 0; 180 return 0;
@@ -388,7 +394,7 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
388 struct irq_pin_list *entry; 394 struct irq_pin_list *entry;
389 unsigned long flags; 395 unsigned long flags;
390 396
391 spin_lock_irqsave(&ioapic_lock, flags); 397 raw_spin_lock_irqsave(&ioapic_lock, flags);
392 for_each_irq_pin(entry, cfg->irq_2_pin) { 398 for_each_irq_pin(entry, cfg->irq_2_pin) {
393 unsigned int reg; 399 unsigned int reg;
394 int pin; 400 int pin;
@@ -397,11 +403,11 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
397 reg = io_apic_read(entry->apic, 0x10 + pin*2); 403 reg = io_apic_read(entry->apic, 0x10 + pin*2);
398 /* Is the remote IRR bit set? */ 404 /* Is the remote IRR bit set? */
399 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 405 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
400 spin_unlock_irqrestore(&ioapic_lock, flags); 406 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
401 return true; 407 return true;
402 } 408 }
403 } 409 }
404 spin_unlock_irqrestore(&ioapic_lock, flags); 410 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
405 411
406 return false; 412 return false;
407} 413}
@@ -415,10 +421,10 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
415{ 421{
416 union entry_union eu; 422 union entry_union eu;
417 unsigned long flags; 423 unsigned long flags;
418 spin_lock_irqsave(&ioapic_lock, flags); 424 raw_spin_lock_irqsave(&ioapic_lock, flags);
419 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 425 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
420 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 426 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
421 spin_unlock_irqrestore(&ioapic_lock, flags); 427 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
422 return eu.entry; 428 return eu.entry;
423} 429}
424 430
@@ -441,9 +447,9 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
441void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 447void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
442{ 448{
443 unsigned long flags; 449 unsigned long flags;
444 spin_lock_irqsave(&ioapic_lock, flags); 450 raw_spin_lock_irqsave(&ioapic_lock, flags);
445 __ioapic_write_entry(apic, pin, e); 451 __ioapic_write_entry(apic, pin, e);
446 spin_unlock_irqrestore(&ioapic_lock, flags); 452 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
447} 453}
448 454
449/* 455/*
@@ -456,10 +462,10 @@ static void ioapic_mask_entry(int apic, int pin)
456 unsigned long flags; 462 unsigned long flags;
457 union entry_union eu = { .entry.mask = 1 }; 463 union entry_union eu = { .entry.mask = 1 };
458 464
459 spin_lock_irqsave(&ioapic_lock, flags); 465 raw_spin_lock_irqsave(&ioapic_lock, flags);
460 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 466 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
461 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 467 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
462 spin_unlock_irqrestore(&ioapic_lock, flags); 468 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
463} 469}
464 470
465/* 471/*
@@ -586,9 +592,9 @@ static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
586 592
587 BUG_ON(!cfg); 593 BUG_ON(!cfg);
588 594
589 spin_lock_irqsave(&ioapic_lock, flags); 595 raw_spin_lock_irqsave(&ioapic_lock, flags);
590 __mask_IO_APIC_irq(cfg); 596 __mask_IO_APIC_irq(cfg);
591 spin_unlock_irqrestore(&ioapic_lock, flags); 597 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
592} 598}
593 599
594static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) 600static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
@@ -596,9 +602,9 @@ static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
596 struct irq_cfg *cfg = desc->chip_data; 602 struct irq_cfg *cfg = desc->chip_data;
597 unsigned long flags; 603 unsigned long flags;
598 604
599 spin_lock_irqsave(&ioapic_lock, flags); 605 raw_spin_lock_irqsave(&ioapic_lock, flags);
600 __unmask_IO_APIC_irq(cfg); 606 __unmask_IO_APIC_irq(cfg);
601 spin_unlock_irqrestore(&ioapic_lock, flags); 607 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
602} 608}
603 609
604static void mask_IO_APIC_irq(unsigned int irq) 610static void mask_IO_APIC_irq(unsigned int irq)
@@ -1122,12 +1128,12 @@ void lock_vector_lock(void)
1122 /* Used to the online set of cpus does not change 1128 /* Used to the online set of cpus does not change
1123 * during assign_irq_vector. 1129 * during assign_irq_vector.
1124 */ 1130 */
1125 spin_lock(&vector_lock); 1131 raw_spin_lock(&vector_lock);
1126} 1132}
1127 1133
1128void unlock_vector_lock(void) 1134void unlock_vector_lock(void)
1129{ 1135{
1130 spin_unlock(&vector_lock); 1136 raw_spin_unlock(&vector_lock);
1131} 1137}
1132 1138
1133static int 1139static int
@@ -1144,7 +1150,8 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1144 * Also, we've got to be careful not to trash gate 1150 * Also, we've got to be careful not to trash gate
1145 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1151 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1146 */ 1152 */
1147 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1153 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1154 static int current_offset = VECTOR_OFFSET_START % 8;
1148 unsigned int old_vector; 1155 unsigned int old_vector;
1149 int cpu, err; 1156 int cpu, err;
1150 cpumask_var_t tmp_mask; 1157 cpumask_var_t tmp_mask;
@@ -1180,7 +1187,7 @@ next:
1180 if (vector >= first_system_vector) { 1187 if (vector >= first_system_vector) {
1181 /* If out of vectors on large boxen, must share them. */ 1188 /* If out of vectors on large boxen, must share them. */
1182 offset = (offset + 1) % 8; 1189 offset = (offset + 1) % 8;
1183 vector = FIRST_DEVICE_VECTOR + offset; 1190 vector = FIRST_EXTERNAL_VECTOR + offset;
1184 } 1191 }
1185 if (unlikely(current_vector == vector)) 1192 if (unlikely(current_vector == vector))
1186 continue; 1193 continue;
@@ -1214,9 +1221,9 @@ int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1214 int err; 1221 int err;
1215 unsigned long flags; 1222 unsigned long flags;
1216 1223
1217 spin_lock_irqsave(&vector_lock, flags); 1224 raw_spin_lock_irqsave(&vector_lock, flags);
1218 err = __assign_irq_vector(irq, cfg, mask); 1225 err = __assign_irq_vector(irq, cfg, mask);
1219 spin_unlock_irqrestore(&vector_lock, flags); 1226 raw_spin_unlock_irqrestore(&vector_lock, flags);
1220 return err; 1227 return err;
1221} 1228}
1222 1229
@@ -1250,11 +1257,16 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1250void __setup_vector_irq(int cpu) 1257void __setup_vector_irq(int cpu)
1251{ 1258{
1252 /* Initialize vector_irq on a new cpu */ 1259 /* Initialize vector_irq on a new cpu */
1253 /* This function must be called with vector_lock held */
1254 int irq, vector; 1260 int irq, vector;
1255 struct irq_cfg *cfg; 1261 struct irq_cfg *cfg;
1256 struct irq_desc *desc; 1262 struct irq_desc *desc;
1257 1263
1264 /*
1265 * vector_lock will make sure that we don't run into irq vector
1266 * assignments that might be happening on another cpu in parallel,
1267 * while we setup our initial vector to irq mappings.
1268 */
1269 raw_spin_lock(&vector_lock);
1258 /* Mark the inuse vectors */ 1270 /* Mark the inuse vectors */
1259 for_each_irq_desc(irq, desc) { 1271 for_each_irq_desc(irq, desc) {
1260 cfg = desc->chip_data; 1272 cfg = desc->chip_data;
@@ -1273,6 +1285,7 @@ void __setup_vector_irq(int cpu)
1273 if (!cpumask_test_cpu(cpu, cfg->domain)) 1285 if (!cpumask_test_cpu(cpu, cfg->domain))
1274 per_cpu(vector_irq, cpu)[vector] = -1; 1286 per_cpu(vector_irq, cpu)[vector] = -1;
1275 } 1287 }
1288 raw_spin_unlock(&vector_lock);
1276} 1289}
1277 1290
1278static struct irq_chip ioapic_chip; 1291static struct irq_chip ioapic_chip;
@@ -1422,6 +1435,14 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
1422 1435
1423 cfg = desc->chip_data; 1436 cfg = desc->chip_data;
1424 1437
1438 /*
1439 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
1440 * controllers like 8259. Now that IO-APIC can handle this irq, update
1441 * the cfg->domain.
1442 */
1443 if (irq < nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1444 apic->vector_allocation_domain(0, cfg->domain);
1445
1425 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1446 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1426 return; 1447 return;
1427 1448
@@ -1521,6 +1542,56 @@ static void __init setup_IO_APIC_irqs(void)
1521} 1542}
1522 1543
1523/* 1544/*
1545 * for the gsit that is not in first ioapic
1546 * but could not use acpi_register_gsi()
1547 * like some special sci in IBM x3330
1548 */
1549void setup_IO_APIC_irq_extra(u32 gsi)
1550{
1551 int apic_id = 0, pin, idx, irq;
1552 int node = cpu_to_node(boot_cpu_id);
1553 struct irq_desc *desc;
1554 struct irq_cfg *cfg;
1555
1556 /*
1557 * Convert 'gsi' to 'ioapic.pin'.
1558 */
1559 apic_id = mp_find_ioapic(gsi);
1560 if (apic_id < 0)
1561 return;
1562
1563 pin = mp_find_ioapic_pin(apic_id, gsi);
1564 idx = find_irq_entry(apic_id, pin, mp_INT);
1565 if (idx == -1)
1566 return;
1567
1568 irq = pin_2_irq(idx, apic_id, pin);
1569#ifdef CONFIG_SPARSE_IRQ
1570 desc = irq_to_desc(irq);
1571 if (desc)
1572 return;
1573#endif
1574 desc = irq_to_desc_alloc_node(irq, node);
1575 if (!desc) {
1576 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1577 return;
1578 }
1579
1580 cfg = desc->chip_data;
1581 add_pin_to_irq_node(cfg, node, apic_id, pin);
1582
1583 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
1584 pr_debug("Pin %d-%d already programmed\n",
1585 mp_ioapics[apic_id].apicid, pin);
1586 return;
1587 }
1588 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
1589
1590 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1591 irq_trigger(idx), irq_polarity(idx));
1592}
1593
1594/*
1524 * Set up the timer pin, possibly with the 8259A-master behind. 1595 * Set up the timer pin, possibly with the 8259A-master behind.
1525 */ 1596 */
1526static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, 1597static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
@@ -1583,14 +1654,14 @@ __apicdebuginit(void) print_IO_APIC(void)
1583 1654
1584 for (apic = 0; apic < nr_ioapics; apic++) { 1655 for (apic = 0; apic < nr_ioapics; apic++) {
1585 1656
1586 spin_lock_irqsave(&ioapic_lock, flags); 1657 raw_spin_lock_irqsave(&ioapic_lock, flags);
1587 reg_00.raw = io_apic_read(apic, 0); 1658 reg_00.raw = io_apic_read(apic, 0);
1588 reg_01.raw = io_apic_read(apic, 1); 1659 reg_01.raw = io_apic_read(apic, 1);
1589 if (reg_01.bits.version >= 0x10) 1660 if (reg_01.bits.version >= 0x10)
1590 reg_02.raw = io_apic_read(apic, 2); 1661 reg_02.raw = io_apic_read(apic, 2);
1591 if (reg_01.bits.version >= 0x20) 1662 if (reg_01.bits.version >= 0x20)
1592 reg_03.raw = io_apic_read(apic, 3); 1663 reg_03.raw = io_apic_read(apic, 3);
1593 spin_unlock_irqrestore(&ioapic_lock, flags); 1664 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1594 1665
1595 printk("\n"); 1666 printk("\n");
1596 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); 1667 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
@@ -1812,7 +1883,7 @@ __apicdebuginit(void) print_PIC(void)
1812 1883
1813 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1884 printk(KERN_DEBUG "\nprinting PIC contents\n");
1814 1885
1815 spin_lock_irqsave(&i8259A_lock, flags); 1886 raw_spin_lock_irqsave(&i8259A_lock, flags);
1816 1887
1817 v = inb(0xa1) << 8 | inb(0x21); 1888 v = inb(0xa1) << 8 | inb(0x21);
1818 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1889 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
@@ -1826,7 +1897,7 @@ __apicdebuginit(void) print_PIC(void)
1826 outb(0x0a,0xa0); 1897 outb(0x0a,0xa0);
1827 outb(0x0a,0x20); 1898 outb(0x0a,0x20);
1828 1899
1829 spin_unlock_irqrestore(&i8259A_lock, flags); 1900 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1830 1901
1831 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1902 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1832 1903
@@ -1885,9 +1956,9 @@ void __init enable_IO_APIC(void)
1885 * The number of IO-APIC IRQ registers (== #pins): 1956 * The number of IO-APIC IRQ registers (== #pins):
1886 */ 1957 */
1887 for (apic = 0; apic < nr_ioapics; apic++) { 1958 for (apic = 0; apic < nr_ioapics; apic++) {
1888 spin_lock_irqsave(&ioapic_lock, flags); 1959 raw_spin_lock_irqsave(&ioapic_lock, flags);
1889 reg_01.raw = io_apic_read(apic, 1); 1960 reg_01.raw = io_apic_read(apic, 1);
1890 spin_unlock_irqrestore(&ioapic_lock, flags); 1961 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1891 nr_ioapic_registers[apic] = reg_01.bits.entries+1; 1962 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1892 } 1963 }
1893 1964
@@ -2027,9 +2098,9 @@ void __init setup_ioapic_ids_from_mpc(void)
2027 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { 2098 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
2028 2099
2029 /* Read the register 0 value */ 2100 /* Read the register 0 value */
2030 spin_lock_irqsave(&ioapic_lock, flags); 2101 raw_spin_lock_irqsave(&ioapic_lock, flags);
2031 reg_00.raw = io_apic_read(apic_id, 0); 2102 reg_00.raw = io_apic_read(apic_id, 0);
2032 spin_unlock_irqrestore(&ioapic_lock, flags); 2103 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2033 2104
2034 old_id = mp_ioapics[apic_id].apicid; 2105 old_id = mp_ioapics[apic_id].apicid;
2035 2106
@@ -2088,16 +2159,16 @@ void __init setup_ioapic_ids_from_mpc(void)
2088 mp_ioapics[apic_id].apicid); 2159 mp_ioapics[apic_id].apicid);
2089 2160
2090 reg_00.bits.ID = mp_ioapics[apic_id].apicid; 2161 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2091 spin_lock_irqsave(&ioapic_lock, flags); 2162 raw_spin_lock_irqsave(&ioapic_lock, flags);
2092 io_apic_write(apic_id, 0, reg_00.raw); 2163 io_apic_write(apic_id, 0, reg_00.raw);
2093 spin_unlock_irqrestore(&ioapic_lock, flags); 2164 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2094 2165
2095 /* 2166 /*
2096 * Sanity check 2167 * Sanity check
2097 */ 2168 */
2098 spin_lock_irqsave(&ioapic_lock, flags); 2169 raw_spin_lock_irqsave(&ioapic_lock, flags);
2099 reg_00.raw = io_apic_read(apic_id, 0); 2170 reg_00.raw = io_apic_read(apic_id, 0);
2100 spin_unlock_irqrestore(&ioapic_lock, flags); 2171 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2101 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) 2172 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2102 printk("could not set ID!\n"); 2173 printk("could not set ID!\n");
2103 else 2174 else
@@ -2180,7 +2251,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2180 unsigned long flags; 2251 unsigned long flags;
2181 struct irq_cfg *cfg; 2252 struct irq_cfg *cfg;
2182 2253
2183 spin_lock_irqsave(&ioapic_lock, flags); 2254 raw_spin_lock_irqsave(&ioapic_lock, flags);
2184 if (irq < legacy_pic->nr_legacy_irqs) { 2255 if (irq < legacy_pic->nr_legacy_irqs) {
2185 legacy_pic->chip->mask(irq); 2256 legacy_pic->chip->mask(irq);
2186 if (legacy_pic->irq_pending(irq)) 2257 if (legacy_pic->irq_pending(irq))
@@ -2188,7 +2259,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2188 } 2259 }
2189 cfg = irq_cfg(irq); 2260 cfg = irq_cfg(irq);
2190 __unmask_IO_APIC_irq(cfg); 2261 __unmask_IO_APIC_irq(cfg);
2191 spin_unlock_irqrestore(&ioapic_lock, flags); 2262 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2192 2263
2193 return was_pending; 2264 return was_pending;
2194} 2265}
@@ -2199,9 +2270,9 @@ static int ioapic_retrigger_irq(unsigned int irq)
2199 struct irq_cfg *cfg = irq_cfg(irq); 2270 struct irq_cfg *cfg = irq_cfg(irq);
2200 unsigned long flags; 2271 unsigned long flags;
2201 2272
2202 spin_lock_irqsave(&vector_lock, flags); 2273 raw_spin_lock_irqsave(&vector_lock, flags);
2203 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2274 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2204 spin_unlock_irqrestore(&vector_lock, flags); 2275 raw_spin_unlock_irqrestore(&vector_lock, flags);
2205 2276
2206 return 1; 2277 return 1;
2207} 2278}
@@ -2294,14 +2365,14 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2294 irq = desc->irq; 2365 irq = desc->irq;
2295 cfg = desc->chip_data; 2366 cfg = desc->chip_data;
2296 2367
2297 spin_lock_irqsave(&ioapic_lock, flags); 2368 raw_spin_lock_irqsave(&ioapic_lock, flags);
2298 ret = set_desc_affinity(desc, mask, &dest); 2369 ret = set_desc_affinity(desc, mask, &dest);
2299 if (!ret) { 2370 if (!ret) {
2300 /* Only the high 8 bits are valid. */ 2371 /* Only the high 8 bits are valid. */
2301 dest = SET_APIC_LOGICAL_ID(dest); 2372 dest = SET_APIC_LOGICAL_ID(dest);
2302 __target_IO_APIC_irq(irq, dest, cfg); 2373 __target_IO_APIC_irq(irq, dest, cfg);
2303 } 2374 }
2304 spin_unlock_irqrestore(&ioapic_lock, flags); 2375 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2305 2376
2306 return ret; 2377 return ret;
2307} 2378}
@@ -2536,9 +2607,9 @@ static void eoi_ioapic_irq(struct irq_desc *desc)
2536 irq = desc->irq; 2607 irq = desc->irq;
2537 cfg = desc->chip_data; 2608 cfg = desc->chip_data;
2538 2609
2539 spin_lock_irqsave(&ioapic_lock, flags); 2610 raw_spin_lock_irqsave(&ioapic_lock, flags);
2540 __eoi_ioapic_irq(irq, cfg); 2611 __eoi_ioapic_irq(irq, cfg);
2541 spin_unlock_irqrestore(&ioapic_lock, flags); 2612 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2542} 2613}
2543 2614
2544static void ack_apic_level(unsigned int irq) 2615static void ack_apic_level(unsigned int irq)
@@ -3120,13 +3191,13 @@ static int ioapic_resume(struct sys_device *dev)
3120 data = container_of(dev, struct sysfs_ioapic_data, dev); 3191 data = container_of(dev, struct sysfs_ioapic_data, dev);
3121 entry = data->entry; 3192 entry = data->entry;
3122 3193
3123 spin_lock_irqsave(&ioapic_lock, flags); 3194 raw_spin_lock_irqsave(&ioapic_lock, flags);
3124 reg_00.raw = io_apic_read(dev->id, 0); 3195 reg_00.raw = io_apic_read(dev->id, 0);
3125 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { 3196 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3126 reg_00.bits.ID = mp_ioapics[dev->id].apicid; 3197 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3127 io_apic_write(dev->id, 0, reg_00.raw); 3198 io_apic_write(dev->id, 0, reg_00.raw);
3128 } 3199 }
3129 spin_unlock_irqrestore(&ioapic_lock, flags); 3200 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3130 for (i = 0; i < nr_ioapic_registers[dev->id]; i++) 3201 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3131 ioapic_write_entry(dev->id, i, entry[i]); 3202 ioapic_write_entry(dev->id, i, entry[i]);
3132 3203
@@ -3189,7 +3260,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3189 if (irq_want < nr_irqs_gsi) 3260 if (irq_want < nr_irqs_gsi)
3190 irq_want = nr_irqs_gsi; 3261 irq_want = nr_irqs_gsi;
3191 3262
3192 spin_lock_irqsave(&vector_lock, flags); 3263 raw_spin_lock_irqsave(&vector_lock, flags);
3193 for (new = irq_want; new < nr_irqs; new++) { 3264 for (new = irq_want; new < nr_irqs; new++) {
3194 desc_new = irq_to_desc_alloc_node(new, node); 3265 desc_new = irq_to_desc_alloc_node(new, node);
3195 if (!desc_new) { 3266 if (!desc_new) {
@@ -3208,14 +3279,11 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3208 irq = new; 3279 irq = new;
3209 break; 3280 break;
3210 } 3281 }
3211 spin_unlock_irqrestore(&vector_lock, flags); 3282 raw_spin_unlock_irqrestore(&vector_lock, flags);
3283
3284 if (irq > 0)
3285 dynamic_irq_init_keep_chip_data(irq);
3212 3286
3213 if (irq > 0) {
3214 dynamic_irq_init(irq);
3215 /* restore it, in case dynamic_irq_init clear it */
3216 if (desc_new)
3217 desc_new->chip_data = cfg_new;
3218 }
3219 return irq; 3287 return irq;
3220} 3288}
3221 3289
@@ -3237,20 +3305,13 @@ int create_irq(void)
3237void destroy_irq(unsigned int irq) 3305void destroy_irq(unsigned int irq)
3238{ 3306{
3239 unsigned long flags; 3307 unsigned long flags;
3240 struct irq_cfg *cfg;
3241 struct irq_desc *desc;
3242 3308
3243 /* store it, in case dynamic_irq_cleanup clear it */ 3309 dynamic_irq_cleanup_keep_chip_data(irq);
3244 desc = irq_to_desc(irq);
3245 cfg = desc->chip_data;
3246 dynamic_irq_cleanup(irq);
3247 /* connect back irq_cfg */
3248 desc->chip_data = cfg;
3249 3310
3250 free_irte(irq); 3311 free_irte(irq);
3251 spin_lock_irqsave(&vector_lock, flags); 3312 raw_spin_lock_irqsave(&vector_lock, flags);
3252 __clear_irq_vector(irq, cfg); 3313 __clear_irq_vector(irq, get_irq_chip_data(irq));
3253 spin_unlock_irqrestore(&vector_lock, flags); 3314 raw_spin_unlock_irqrestore(&vector_lock, flags);
3254} 3315}
3255 3316
3256/* 3317/*
@@ -3787,9 +3848,9 @@ int __init io_apic_get_redir_entries (int ioapic)
3787 union IO_APIC_reg_01 reg_01; 3848 union IO_APIC_reg_01 reg_01;
3788 unsigned long flags; 3849 unsigned long flags;
3789 3850
3790 spin_lock_irqsave(&ioapic_lock, flags); 3851 raw_spin_lock_irqsave(&ioapic_lock, flags);
3791 reg_01.raw = io_apic_read(ioapic, 1); 3852 reg_01.raw = io_apic_read(ioapic, 1);
3792 spin_unlock_irqrestore(&ioapic_lock, flags); 3853 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3793 3854
3794 return reg_01.bits.entries; 3855 return reg_01.bits.entries;
3795} 3856}
@@ -3816,28 +3877,6 @@ void __init probe_nr_irqs_gsi(void)
3816 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3877 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3817} 3878}
3818 3879
3819#ifdef CONFIG_SPARSE_IRQ
3820int __init arch_probe_nr_irqs(void)
3821{
3822 int nr;
3823
3824 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3825 nr_irqs = NR_VECTORS * nr_cpu_ids;
3826
3827 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3828#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3829 /*
3830 * for MSI and HT dyn irq
3831 */
3832 nr += nr_irqs_gsi * 16;
3833#endif
3834 if (nr < nr_irqs)
3835 nr_irqs = nr;
3836
3837 return 0;
3838}
3839#endif
3840
3841static int __io_apic_set_pci_routing(struct device *dev, int irq, 3880static int __io_apic_set_pci_routing(struct device *dev, int irq,
3842 struct io_apic_irq_attr *irq_attr) 3881 struct io_apic_irq_attr *irq_attr)
3843{ 3882{
@@ -3951,9 +3990,9 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3951 if (physids_empty(apic_id_map)) 3990 if (physids_empty(apic_id_map))
3952 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 3991 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3953 3992
3954 spin_lock_irqsave(&ioapic_lock, flags); 3993 raw_spin_lock_irqsave(&ioapic_lock, flags);
3955 reg_00.raw = io_apic_read(ioapic, 0); 3994 reg_00.raw = io_apic_read(ioapic, 0);
3956 spin_unlock_irqrestore(&ioapic_lock, flags); 3995 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3957 3996
3958 if (apic_id >= get_physical_broadcast()) { 3997 if (apic_id >= get_physical_broadcast()) {
3959 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 3998 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
@@ -3987,10 +4026,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3987 if (reg_00.bits.ID != apic_id) { 4026 if (reg_00.bits.ID != apic_id) {
3988 reg_00.bits.ID = apic_id; 4027 reg_00.bits.ID = apic_id;
3989 4028
3990 spin_lock_irqsave(&ioapic_lock, flags); 4029 raw_spin_lock_irqsave(&ioapic_lock, flags);
3991 io_apic_write(ioapic, 0, reg_00.raw); 4030 io_apic_write(ioapic, 0, reg_00.raw);
3992 reg_00.raw = io_apic_read(ioapic, 0); 4031 reg_00.raw = io_apic_read(ioapic, 0);
3993 spin_unlock_irqrestore(&ioapic_lock, flags); 4032 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3994 4033
3995 /* Sanity check */ 4034 /* Sanity check */
3996 if (reg_00.bits.ID != apic_id) { 4035 if (reg_00.bits.ID != apic_id) {
@@ -4011,9 +4050,9 @@ int __init io_apic_get_version(int ioapic)
4011 union IO_APIC_reg_01 reg_01; 4050 union IO_APIC_reg_01 reg_01;
4012 unsigned long flags; 4051 unsigned long flags;
4013 4052
4014 spin_lock_irqsave(&ioapic_lock, flags); 4053 raw_spin_lock_irqsave(&ioapic_lock, flags);
4015 reg_01.raw = io_apic_read(ioapic, 1); 4054 reg_01.raw = io_apic_read(ioapic, 1);
4016 spin_unlock_irqrestore(&ioapic_lock, flags); 4055 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4017 4056
4018 return reg_01.bits.version; 4057 return reg_01.bits.version;
4019} 4058}
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 3817739acee9..f72b5f0f388e 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -416,13 +416,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
416 416
417 /* We can be called before check_nmi_watchdog, hence NULL check. */ 417 /* We can be called before check_nmi_watchdog, hence NULL check. */
418 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { 418 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
419 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 419 static DEFINE_RAW_SPINLOCK(lock); /* Serialise the printks */
420 420
421 spin_lock(&lock); 421 raw_spin_lock(&lock);
422 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); 422 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
423 show_regs(regs); 423 show_regs(regs);
424 dump_stack(); 424 dump_stack();
425 spin_unlock(&lock); 425 raw_spin_unlock(&lock);
426 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 426 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
427 427
428 rc = 1; 428 rc = 1;
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 1c790e75f7a0..9bac6817456f 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -32,7 +32,7 @@
32 */ 32 */
33 33
34static int i8259A_auto_eoi; 34static int i8259A_auto_eoi;
35DEFINE_SPINLOCK(i8259A_lock); 35DEFINE_RAW_SPINLOCK(i8259A_lock);
36static void mask_and_ack_8259A(unsigned int); 36static void mask_and_ack_8259A(unsigned int);
37static void mask_8259A(void); 37static void mask_8259A(void);
38static void unmask_8259A(void); 38static void unmask_8259A(void);
@@ -74,13 +74,13 @@ static void disable_8259A_irq(unsigned int irq)
74 unsigned int mask = 1 << irq; 74 unsigned int mask = 1 << irq;
75 unsigned long flags; 75 unsigned long flags;
76 76
77 spin_lock_irqsave(&i8259A_lock, flags); 77 raw_spin_lock_irqsave(&i8259A_lock, flags);
78 cached_irq_mask |= mask; 78 cached_irq_mask |= mask;
79 if (irq & 8) 79 if (irq & 8)
80 outb(cached_slave_mask, PIC_SLAVE_IMR); 80 outb(cached_slave_mask, PIC_SLAVE_IMR);
81 else 81 else
82 outb(cached_master_mask, PIC_MASTER_IMR); 82 outb(cached_master_mask, PIC_MASTER_IMR);
83 spin_unlock_irqrestore(&i8259A_lock, flags); 83 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
84} 84}
85 85
86static void enable_8259A_irq(unsigned int irq) 86static void enable_8259A_irq(unsigned int irq)
@@ -88,13 +88,13 @@ static void enable_8259A_irq(unsigned int irq)
88 unsigned int mask = ~(1 << irq); 88 unsigned int mask = ~(1 << irq);
89 unsigned long flags; 89 unsigned long flags;
90 90
91 spin_lock_irqsave(&i8259A_lock, flags); 91 raw_spin_lock_irqsave(&i8259A_lock, flags);
92 cached_irq_mask &= mask; 92 cached_irq_mask &= mask;
93 if (irq & 8) 93 if (irq & 8)
94 outb(cached_slave_mask, PIC_SLAVE_IMR); 94 outb(cached_slave_mask, PIC_SLAVE_IMR);
95 else 95 else
96 outb(cached_master_mask, PIC_MASTER_IMR); 96 outb(cached_master_mask, PIC_MASTER_IMR);
97 spin_unlock_irqrestore(&i8259A_lock, flags); 97 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
98} 98}
99 99
100static int i8259A_irq_pending(unsigned int irq) 100static int i8259A_irq_pending(unsigned int irq)
@@ -103,12 +103,12 @@ static int i8259A_irq_pending(unsigned int irq)
103 unsigned long flags; 103 unsigned long flags;
104 int ret; 104 int ret;
105 105
106 spin_lock_irqsave(&i8259A_lock, flags); 106 raw_spin_lock_irqsave(&i8259A_lock, flags);
107 if (irq < 8) 107 if (irq < 8)
108 ret = inb(PIC_MASTER_CMD) & mask; 108 ret = inb(PIC_MASTER_CMD) & mask;
109 else 109 else
110 ret = inb(PIC_SLAVE_CMD) & (mask >> 8); 110 ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
111 spin_unlock_irqrestore(&i8259A_lock, flags); 111 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
112 112
113 return ret; 113 return ret;
114} 114}
@@ -156,7 +156,7 @@ static void mask_and_ack_8259A(unsigned int irq)
156 unsigned int irqmask = 1 << irq; 156 unsigned int irqmask = 1 << irq;
157 unsigned long flags; 157 unsigned long flags;
158 158
159 spin_lock_irqsave(&i8259A_lock, flags); 159 raw_spin_lock_irqsave(&i8259A_lock, flags);
160 /* 160 /*
161 * Lightweight spurious IRQ detection. We do not want 161 * Lightweight spurious IRQ detection. We do not want
162 * to overdo spurious IRQ handling - it's usually a sign 162 * to overdo spurious IRQ handling - it's usually a sign
@@ -189,7 +189,7 @@ handle_real_irq:
189 outb(cached_master_mask, PIC_MASTER_IMR); 189 outb(cached_master_mask, PIC_MASTER_IMR);
190 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ 190 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
191 } 191 }
192 spin_unlock_irqrestore(&i8259A_lock, flags); 192 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
193 return; 193 return;
194 194
195spurious_8259A_irq: 195spurious_8259A_irq:
@@ -291,24 +291,24 @@ static void mask_8259A(void)
291{ 291{
292 unsigned long flags; 292 unsigned long flags;
293 293
294 spin_lock_irqsave(&i8259A_lock, flags); 294 raw_spin_lock_irqsave(&i8259A_lock, flags);
295 295
296 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 296 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
297 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 297 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
298 298
299 spin_unlock_irqrestore(&i8259A_lock, flags); 299 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
300} 300}
301 301
302static void unmask_8259A(void) 302static void unmask_8259A(void)
303{ 303{
304 unsigned long flags; 304 unsigned long flags;
305 305
306 spin_lock_irqsave(&i8259A_lock, flags); 306 raw_spin_lock_irqsave(&i8259A_lock, flags);
307 307
308 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ 308 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
309 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ 309 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
310 310
311 spin_unlock_irqrestore(&i8259A_lock, flags); 311 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
312} 312}
313 313
314static void init_8259A(int auto_eoi) 314static void init_8259A(int auto_eoi)
@@ -317,7 +317,7 @@ static void init_8259A(int auto_eoi)
317 317
318 i8259A_auto_eoi = auto_eoi; 318 i8259A_auto_eoi = auto_eoi;
319 319
320 spin_lock_irqsave(&i8259A_lock, flags); 320 raw_spin_lock_irqsave(&i8259A_lock, flags);
321 321
322 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 322 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
323 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 323 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
@@ -362,7 +362,7 @@ static void init_8259A(int auto_eoi)
362 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ 362 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
363 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ 363 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
364 364
365 spin_unlock_irqrestore(&i8259A_lock, flags); 365 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
366} 366}
367 367
368/* 368/*
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 89b9510e8030..d2f787b3de56 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -84,24 +84,7 @@ static struct irqaction irq2 = {
84}; 84};
85 85
86DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 86DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
87 [0 ... IRQ0_VECTOR - 1] = -1, 87 [0 ... NR_VECTORS - 1] = -1,
88 [IRQ0_VECTOR] = 0,
89 [IRQ1_VECTOR] = 1,
90 [IRQ2_VECTOR] = 2,
91 [IRQ3_VECTOR] = 3,
92 [IRQ4_VECTOR] = 4,
93 [IRQ5_VECTOR] = 5,
94 [IRQ6_VECTOR] = 6,
95 [IRQ7_VECTOR] = 7,
96 [IRQ8_VECTOR] = 8,
97 [IRQ9_VECTOR] = 9,
98 [IRQ10_VECTOR] = 10,
99 [IRQ11_VECTOR] = 11,
100 [IRQ12_VECTOR] = 12,
101 [IRQ13_VECTOR] = 13,
102 [IRQ14_VECTOR] = 14,
103 [IRQ15_VECTOR] = 15,
104 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
105}; 88};
106 89
107int vector_used_by_percpu_irq(unsigned int vector) 90int vector_used_by_percpu_irq(unsigned int vector)
@@ -116,6 +99,9 @@ int vector_used_by_percpu_irq(unsigned int vector)
116 return 0; 99 return 0;
117} 100}
118 101
102/* Number of legacy interrupts */
103int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
104
119void __init init_ISA_irqs(void) 105void __init init_ISA_irqs(void)
120{ 106{
121 int i; 107 int i;
@@ -142,6 +128,19 @@ void __init init_ISA_irqs(void)
142 128
143void __init init_IRQ(void) 129void __init init_IRQ(void)
144{ 130{
131 int i;
132
133 /*
134 * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
135 * If these IRQ's are handled by legacy interrupt-controllers like PIC,
136 * then this configuration will likely be static after the boot. If
137 * these IRQ's are handled by more mordern controllers like IO-APIC,
138 * then this vector space can be freed and re-used dynamically as the
139 * irq's migrate etc.
140 */
141 for (i = 0; i < nr_legacy_irqs; i++)
142 per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
143
145 x86_init.irqs.intr_init(); 144 x86_init.irqs.intr_init();
146} 145}
147 146
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 704bddcdf64d..8e1aac86b50c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -461,6 +461,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
461 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"), 461 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
462 }, 462 },
463 }, 463 },
464 { /* Handle problems with rebooting on the iMac9,1. */
465 .callback = set_pci_reboot,
466 .ident = "Apple iMac9,1",
467 .matches = {
468 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
469 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
470 },
471 },
464 { } 472 { }
465}; 473};
466 474
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f7a52f4a21a5..86f7edcd0438 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -243,6 +243,11 @@ static void __cpuinit smp_callin(void)
243 map_cpu_to_logical_apicid(); 243 map_cpu_to_logical_apicid();
244 244
245 notify_cpu_starting(cpuid); 245 notify_cpu_starting(cpuid);
246
247 /*
248 * Need to setup vector mappings before we enable interrupts.
249 */
250 __setup_vector_irq(smp_processor_id());
246 /* 251 /*
247 * Get our bogomips. 252 * Get our bogomips.
248 * 253 *
@@ -317,7 +322,6 @@ notrace static void __cpuinit start_secondary(void *unused)
317 */ 322 */
318 ipi_call_lock(); 323 ipi_call_lock();
319 lock_vector_lock(); 324 lock_vector_lock();
320 __setup_vector_irq(smp_processor_id());
321 set_cpu_online(smp_processor_id(), true); 325 set_cpu_online(smp_processor_id(), true);
322 unlock_vector_lock(); 326 unlock_vector_lock();
323 ipi_call_unlock(); 327 ipi_call_unlock();
@@ -1216,11 +1220,12 @@ __init void prefill_possible_map(void)
1216 1220
1217 total_cpus = max_t(int, possible, num_processors + disabled_cpus); 1221 total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1218 1222
1219 if (possible > CONFIG_NR_CPUS) { 1223 /* nr_cpu_ids could be reduced via nr_cpus= */
1224 if (possible > nr_cpu_ids) {
1220 printk(KERN_WARNING 1225 printk(KERN_WARNING
1221 "%d Processors exceeds NR_CPUS limit of %d\n", 1226 "%d Processors exceeds NR_CPUS limit of %d\n",
1222 possible, CONFIG_NR_CPUS); 1227 possible, nr_cpu_ids);
1223 possible = CONFIG_NR_CPUS; 1228 possible = nr_cpu_ids;
1224 } 1229 }
1225 1230
1226 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 1231 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index be2573448ed9..fb5cc5e14cfa 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -70,11 +70,11 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
70 * manually to deassert NMI lines for the watchdog if run 70 * manually to deassert NMI lines for the watchdog if run
71 * on an 82489DX-based system. 71 * on an 82489DX-based system.
72 */ 72 */
73 spin_lock(&i8259A_lock); 73 raw_spin_lock(&i8259A_lock);
74 outb(0x0c, PIC_MASTER_OCW3); 74 outb(0x0c, PIC_MASTER_OCW3);
75 /* Ack the IRQ; AEOI will end it automatically. */ 75 /* Ack the IRQ; AEOI will end it automatically. */
76 inb(PIC_MASTER_POLL); 76 inb(PIC_MASTER_POLL);
77 spin_unlock(&i8259A_lock); 77 raw_spin_unlock(&i8259A_lock);
78 } 78 }
79 79
80 global_clock_event->event_handler(global_clock_event); 80 global_clock_event->event_handler(global_clock_event);
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index f067e9556a47..e680ea52db9b 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -553,7 +553,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
553 struct irq_desc *desc; 553 struct irq_desc *desc;
554 unsigned long flags; 554 unsigned long flags;
555 555
556 spin_lock_irqsave(&i8259A_lock, flags); 556 raw_spin_lock_irqsave(&i8259A_lock, flags);
557 557
558 /* Find out what's interrupting in the PIIX4 master 8259 */ 558 /* Find out what's interrupting in the PIIX4 master 8259 */
559 outb(0x0c, 0x20); /* OCW3 Poll command */ 559 outb(0x0c, 0x20); /* OCW3 Poll command */
@@ -590,7 +590,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
590 outb(0x60 + realirq, 0x20); 590 outb(0x60 + realirq, 0x20);
591 } 591 }
592 592
593 spin_unlock_irqrestore(&i8259A_lock, flags); 593 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
594 594
595 desc = irq_to_desc(realirq); 595 desc = irq_to_desc(realirq);
596 596
@@ -608,7 +608,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
608 return IRQ_HANDLED; 608 return IRQ_HANDLED;
609 609
610out_unlock: 610out_unlock:
611 spin_unlock_irqrestore(&i8259A_lock, flags); 611 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
612 return IRQ_NONE; 612 return IRQ_NONE;
613} 613}
614 614
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 74c92bb194df..2f1ca5614292 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -79,11 +79,7 @@ unsigned long vmi_tsc_khz(void)
79 79
80static inline unsigned int vmi_get_timer_vector(void) 80static inline unsigned int vmi_get_timer_vector(void)
81{ 81{
82#ifdef CONFIG_X86_IO_APIC 82 return IRQ0_VECTOR;
83 return FIRST_DEVICE_VECTOR;
84#else
85 return FIRST_EXTERNAL_VECTOR;
86#endif
87} 83}
88 84
89/** vmi clockchip */ 85/** vmi clockchip */