aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-01 13:02:50 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-01 13:02:50 -0400
commit4420471f14b79f2a42e4603be7794ea49b68bca4 (patch)
treed391d25458bc0aa86dcf2823fd6c1464883b0533 /arch
parent15e957d08dd4a841359cfec59ecb74041e0097aa (diff)
parente0e42142bab96404de535cceb85d6533d5ad7942 (diff)
Merge branch 'x86/apic' into irq/numa
Conflicts: arch/x86/kernel/apic/io_apic.c Merge reason: non-trivial interaction between ongoing work in io_apic.c and the NUMA migration feature in the irq tree. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/apic.h28
-rw-r--r--arch/x86/include/asm/i8259.h4
-rw-r--r--arch/x86/include/asm/io_apic.h4
-rw-r--r--arch/x86/include/asm/irq_remapping.h2
-rw-r--r--arch/x86/include/asm/irq_vectors.h1
-rw-r--r--arch/x86/include/asm/setup.h1
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/apic/apic.c253
-rw-r--r--arch/x86/kernel/apic/es7000_32.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c341
-rw-r--r--arch/x86/kernel/apic/probe_64.c2
-rw-r--r--arch/x86/kernel/apic/summit_32.c7
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/irq.c19
-rw-r--r--arch/x86/kernel/irqinit.c (renamed from arch/x86/kernel/irqinit_32.c)149
-rw-r--r--arch/x86/kernel/irqinit_64.c177
-rw-r--r--arch/x86/kernel/setup.c18
-rw-r--r--arch/x86/kernel/smp.c20
-rw-r--r--arch/x86/kernel/smpboot.c14
-rw-r--r--arch/x86/kernel/traps.c5
21 files changed, 455 insertions, 598 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 674e21e9f0a0..e03485b2828a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -349,7 +349,7 @@ config X86_UV
349 depends on X86_64 349 depends on X86_64
350 depends on X86_EXTENDED_PLATFORM 350 depends on X86_EXTENDED_PLATFORM
351 depends on NUMA 351 depends on NUMA
352 select X86_X2APIC 352 depends on X86_X2APIC
353 ---help--- 353 ---help---
354 This option is needed in order to support SGI Ultraviolet systems. 354 This option is needed in order to support SGI Ultraviolet systems.
355 If you don't have one of these, you should say N here. 355 If you don't have one of these, you should say N here.
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 42f2f8377422..3738438a91f5 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -107,8 +107,7 @@ extern u32 native_safe_apic_wait_icr_idle(void);
107extern void native_apic_icr_write(u32 low, u32 id); 107extern void native_apic_icr_write(u32 low, u32 id);
108extern u64 native_apic_icr_read(void); 108extern u64 native_apic_icr_read(void);
109 109
110#define EIM_8BIT_APIC_ID 0 110extern int x2apic_mode;
111#define EIM_32BIT_APIC_ID 1
112 111
113#ifdef CONFIG_X86_X2APIC 112#ifdef CONFIG_X86_X2APIC
114/* 113/*
@@ -166,10 +165,9 @@ static inline u64 native_x2apic_icr_read(void)
166 return val; 165 return val;
167} 166}
168 167
169extern int x2apic, x2apic_phys; 168extern int x2apic_phys;
170extern void check_x2apic(void); 169extern void check_x2apic(void);
171extern void enable_x2apic(void); 170extern void enable_x2apic(void);
172extern void enable_IR_x2apic(void);
173extern void x2apic_icr_write(u32 low, u32 id); 171extern void x2apic_icr_write(u32 low, u32 id);
174static inline int x2apic_enabled(void) 172static inline int x2apic_enabled(void)
175{ 173{
@@ -183,6 +181,8 @@ static inline int x2apic_enabled(void)
183 return 1; 181 return 1;
184 return 0; 182 return 0;
185} 183}
184
185#define x2apic_supported() (cpu_has_x2apic)
186#else 186#else
187static inline void check_x2apic(void) 187static inline void check_x2apic(void)
188{ 188{
@@ -190,28 +190,20 @@ static inline void check_x2apic(void)
190static inline void enable_x2apic(void) 190static inline void enable_x2apic(void)
191{ 191{
192} 192}
193static inline void enable_IR_x2apic(void)
194{
195}
196static inline int x2apic_enabled(void) 193static inline int x2apic_enabled(void)
197{ 194{
198 return 0; 195 return 0;
199} 196}
200 197
201#define x2apic 0 198#define x2apic_preenabled 0
202 199#define x2apic_supported() 0
203#endif 200#endif
204 201
205extern int get_physical_broadcast(void); 202extern void enable_IR_x2apic(void);
206 203
207#ifdef CONFIG_X86_X2APIC 204extern int get_physical_broadcast(void);
208static inline void ack_x2APIC_irq(void)
209{
210 /* Docs say use 0 for future compatibility */
211 native_apic_msr_write(APIC_EOI, 0);
212}
213#endif
214 205
206extern void apic_disable(void);
215extern int lapic_get_maxlvt(void); 207extern int lapic_get_maxlvt(void);
216extern void clear_local_APIC(void); 208extern void clear_local_APIC(void);
217extern void connect_bsp_APIC(void); 209extern void connect_bsp_APIC(void);
@@ -252,7 +244,7 @@ static inline void lapic_shutdown(void) { }
252#define local_apic_timer_c2_ok 1 244#define local_apic_timer_c2_ok 1
253static inline void init_apic_mappings(void) { } 245static inline void init_apic_mappings(void) { }
254static inline void disable_local_APIC(void) { } 246static inline void disable_local_APIC(void) { }
255 247static inline void apic_disable(void) { }
256#endif /* !CONFIG_X86_LOCAL_APIC */ 248#endif /* !CONFIG_X86_LOCAL_APIC */
257 249
258#ifdef CONFIG_X86_64 250#ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 1a99e6c092af..58d7091eeb1f 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -60,8 +60,4 @@ extern struct irq_chip i8259A_chip;
60extern void mask_8259A(void); 60extern void mask_8259A(void);
61extern void unmask_8259A(void); 61extern void unmask_8259A(void);
62 62
63#ifdef CONFIG_X86_32
64extern void init_ISA_irqs(void);
65#endif
66
67#endif /* _ASM_X86_I8259_H */ 63#endif /* _ASM_X86_I8259_H */
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 07f2913ba5de..27bd2fdd00ae 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -161,15 +161,11 @@ extern int io_apic_set_pci_routing(struct device *dev, int ioapic, int pin,
161extern int (*ioapic_renumber_irq)(int ioapic, int irq); 161extern int (*ioapic_renumber_irq)(int ioapic, int irq);
162extern void ioapic_init_mappings(void); 162extern void ioapic_init_mappings(void);
163 163
164#ifdef CONFIG_X86_64
165extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); 164extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
166extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); 165extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
167extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); 166extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
168extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); 167extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
169extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); 168extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
170extern void reinit_intr_remapped_IO_APIC(int intr_remapping,
171 struct IO_APIC_route_entry **ioapic_entries);
172#endif
173 169
174extern void probe_nr_irqs_gsi(void); 170extern void probe_nr_irqs_gsi(void);
175 171
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 0396760fccb8..f275e2244505 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -1,6 +1,6 @@
1#ifndef _ASM_X86_IRQ_REMAPPING_H 1#ifndef _ASM_X86_IRQ_REMAPPING_H
2#define _ASM_X86_IRQ_REMAPPING_H 2#define _ASM_X86_IRQ_REMAPPING_H
3 3
4#define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8) 4#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
5 5
6#endif /* _ASM_X86_IRQ_REMAPPING_H */ 6#endif /* _ASM_X86_IRQ_REMAPPING_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 3cbd79bbb47c..910b5a3d6751 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -34,6 +34,7 @@
34 34
35#ifdef CONFIG_X86_32 35#ifdef CONFIG_X86_32
36# define SYSCALL_VECTOR 0x80 36# define SYSCALL_VECTOR 0x80
37# define IA32_SYSCALL_VECTOR 0x80
37#else 38#else
38# define IA32_SYSCALL_VECTOR 0x80 39# define IA32_SYSCALL_VECTOR 0x80
39#endif 40#endif
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index bdc2ada05ae0..4093d1ed6db2 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -33,7 +33,6 @@ struct x86_quirks {
33 int (*setup_ioapic_ids)(void); 33 int (*setup_ioapic_ids)(void);
34}; 34};
35 35
36extern void x86_quirk_pre_intr_init(void);
37extern void x86_quirk_intr_init(void); 36extern void x86_quirk_intr_init(void);
38 37
39extern void x86_quirk_trap_init(void); 38extern void x86_quirk_trap_init(void);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 145cce75cda7..16e3acfe19e6 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -28,7 +28,7 @@ CFLAGS_paravirt.o := $(nostackp)
28obj-y := process_$(BITS).o signal.o entry_$(BITS).o 28obj-y := process_$(BITS).o signal.o entry_$(BITS).o
29obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o 29obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
30obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o 30obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o
31obj-y += setup.o i8259.o irqinit_$(BITS).o 31obj-y += setup.o i8259.o irqinit.o
32obj-$(CONFIG_X86_VISWS) += visws_quirks.o 32obj-$(CONFIG_X86_VISWS) += visws_quirks.o
33obj-$(CONFIG_X86_32) += probe_roms_32.o 33obj-$(CONFIG_X86_32) += probe_roms_32.o
34obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 34obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f2870920f246..28f747d61d78 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -98,6 +98,29 @@ early_param("lapic", parse_lapic);
98/* Local APIC was disabled by the BIOS and enabled by the kernel */ 98/* Local APIC was disabled by the BIOS and enabled by the kernel */
99static int enabled_via_apicbase; 99static int enabled_via_apicbase;
100 100
101/*
102 * Handle interrupt mode configuration register (IMCR).
103 * This register controls whether the interrupt signals
104 * that reach the BSP come from the master PIC or from the
105 * local APIC. Before entering Symmetric I/O Mode, either
106 * the BIOS or the operating system must switch out of
107 * PIC Mode by changing the IMCR.
108 */
109static inline void imcr_pic_to_apic(void)
110{
111 /* select IMCR register */
112 outb(0x70, 0x22);
113 /* NMI and 8259 INTR go through APIC */
114 outb(0x01, 0x23);
115}
116
117static inline void imcr_apic_to_pic(void)
118{
119 /* select IMCR register */
120 outb(0x70, 0x22);
121 /* NMI and 8259 INTR go directly to BSP */
122 outb(0x00, 0x23);
123}
101#endif 124#endif
102 125
103#ifdef CONFIG_X86_64 126#ifdef CONFIG_X86_64
@@ -111,13 +134,19 @@ static __init int setup_apicpmtimer(char *s)
111__setup("apicpmtimer", setup_apicpmtimer); 134__setup("apicpmtimer", setup_apicpmtimer);
112#endif 135#endif
113 136
137int x2apic_mode;
114#ifdef CONFIG_X86_X2APIC 138#ifdef CONFIG_X86_X2APIC
115int x2apic;
116/* x2apic enabled before OS handover */ 139/* x2apic enabled before OS handover */
117static int x2apic_preenabled; 140static int x2apic_preenabled;
118static int disable_x2apic; 141static int disable_x2apic;
119static __init int setup_nox2apic(char *str) 142static __init int setup_nox2apic(char *str)
120{ 143{
144 if (x2apic_enabled()) {
145 pr_warning("Bios already enabled x2apic, "
146 "can't enforce nox2apic");
147 return 0;
148 }
149
121 disable_x2apic = 1; 150 disable_x2apic = 1;
122 setup_clear_cpu_cap(X86_FEATURE_X2APIC); 151 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
123 return 0; 152 return 0;
@@ -209,6 +238,24 @@ static int modern_apic(void)
209 return lapic_get_version() >= 0x14; 238 return lapic_get_version() >= 0x14;
210} 239}
211 240
241/*
242 * bare function to substitute write operation
243 * and it's _that_ fast :)
244 */
245void native_apic_write_dummy(u32 reg, u32 v)
246{
247 WARN_ON_ONCE((cpu_has_apic || !disable_apic));
248}
249
250/*
251 * right after this call apic->write doesn't do anything
252 * note that there is no restore operation it works one way
253 */
254void apic_disable(void)
255{
256 apic->write = native_apic_write_dummy;
257}
258
212void native_apic_wait_icr_idle(void) 259void native_apic_wait_icr_idle(void)
213{ 260{
214 while (apic_read(APIC_ICR) & APIC_ICR_BUSY) 261 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
@@ -815,7 +862,7 @@ void clear_local_APIC(void)
815 u32 v; 862 u32 v;
816 863
817 /* APIC hasn't been mapped yet */ 864 /* APIC hasn't been mapped yet */
818 if (!x2apic && !apic_phys) 865 if (!x2apic_mode && !apic_phys)
819 return; 866 return;
820 867
821 maxlvt = lapic_get_maxlvt(); 868 maxlvt = lapic_get_maxlvt();
@@ -1287,7 +1334,7 @@ void check_x2apic(void)
1287{ 1334{
1288 if (x2apic_enabled()) { 1335 if (x2apic_enabled()) {
1289 pr_info("x2apic enabled by BIOS, switching to x2apic ops\n"); 1336 pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
1290 x2apic_preenabled = x2apic = 1; 1337 x2apic_preenabled = x2apic_mode = 1;
1291 } 1338 }
1292} 1339}
1293 1340
@@ -1295,7 +1342,7 @@ void enable_x2apic(void)
1295{ 1342{
1296 int msr, msr2; 1343 int msr, msr2;
1297 1344
1298 if (!x2apic) 1345 if (!x2apic_mode)
1299 return; 1346 return;
1300 1347
1301 rdmsr(MSR_IA32_APICBASE, msr, msr2); 1348 rdmsr(MSR_IA32_APICBASE, msr, msr2);
@@ -1304,6 +1351,7 @@ void enable_x2apic(void)
1304 wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); 1351 wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
1305 } 1352 }
1306} 1353}
1354#endif /* CONFIG_X86_X2APIC */
1307 1355
1308void __init enable_IR_x2apic(void) 1356void __init enable_IR_x2apic(void)
1309{ 1357{
@@ -1312,32 +1360,21 @@ void __init enable_IR_x2apic(void)
1312 unsigned long flags; 1360 unsigned long flags;
1313 struct IO_APIC_route_entry **ioapic_entries = NULL; 1361 struct IO_APIC_route_entry **ioapic_entries = NULL;
1314 1362
1315 if (!cpu_has_x2apic) 1363 ret = dmar_table_init();
1316 return; 1364 if (ret) {
1317 1365 pr_debug("dmar_table_init() failed with %d:\n", ret);
1318 if (!x2apic_preenabled && disable_x2apic) { 1366 goto ir_failed;
1319 pr_info("Skipped enabling x2apic and Interrupt-remapping "
1320 "because of nox2apic\n");
1321 return;
1322 } 1367 }
1323 1368
1324 if (x2apic_preenabled && disable_x2apic) 1369 if (!intr_remapping_supported()) {
1325 panic("Bios already enabled x2apic, can't enforce nox2apic"); 1370 pr_debug("intr-remapping not supported\n");
1326 1371 goto ir_failed;
1327 if (!x2apic_preenabled && skip_ioapic_setup) {
1328 pr_info("Skipped enabling x2apic and Interrupt-remapping "
1329 "because of skipping io-apic setup\n");
1330 return;
1331 } 1372 }
1332 1373
1333 ret = dmar_table_init();
1334 if (ret) {
1335 pr_info("dmar_table_init() failed with %d:\n", ret);
1336 1374
1337 if (x2apic_preenabled) 1375 if (!x2apic_preenabled && skip_ioapic_setup) {
1338 panic("x2apic enabled by bios. But IR enabling failed"); 1376 pr_info("Skipped enabling intr-remap because of skipping "
1339 else 1377 "io-apic setup\n");
1340 pr_info("Not enabling x2apic,Intr-remapping\n");
1341 return; 1378 return;
1342 } 1379 }
1343 1380
@@ -1357,19 +1394,16 @@ void __init enable_IR_x2apic(void)
1357 mask_IO_APIC_setup(ioapic_entries); 1394 mask_IO_APIC_setup(ioapic_entries);
1358 mask_8259A(); 1395 mask_8259A();
1359 1396
1360 ret = enable_intr_remapping(EIM_32BIT_APIC_ID); 1397 ret = enable_intr_remapping(x2apic_supported());
1361
1362 if (ret && x2apic_preenabled) {
1363 local_irq_restore(flags);
1364 panic("x2apic enabled by bios. But IR enabling failed");
1365 }
1366
1367 if (ret) 1398 if (ret)
1368 goto end_restore; 1399 goto end_restore;
1369 1400
1370 if (!x2apic) { 1401 pr_info("Enabled Interrupt-remapping\n");
1371 x2apic = 1; 1402
1403 if (x2apic_supported() && !x2apic_mode) {
1404 x2apic_mode = 1;
1372 enable_x2apic(); 1405 enable_x2apic();
1406 pr_info("Enabled x2apic\n");
1373 } 1407 }
1374 1408
1375end_restore: 1409end_restore:
@@ -1378,37 +1412,34 @@ end_restore:
1378 * IR enabling failed 1412 * IR enabling failed
1379 */ 1413 */
1380 restore_IO_APIC_setup(ioapic_entries); 1414 restore_IO_APIC_setup(ioapic_entries);
1381 else
1382 reinit_intr_remapped_IO_APIC(x2apic_preenabled, ioapic_entries);
1383 1415
1384 unmask_8259A(); 1416 unmask_8259A();
1385 local_irq_restore(flags); 1417 local_irq_restore(flags);
1386 1418
1387end: 1419end:
1388 if (!ret) {
1389 if (!x2apic_preenabled)
1390 pr_info("Enabled x2apic and interrupt-remapping\n");
1391 else
1392 pr_info("Enabled Interrupt-remapping\n");
1393 } else
1394 pr_err("Failed to enable Interrupt-remapping and x2apic\n");
1395 if (ioapic_entries) 1420 if (ioapic_entries)
1396 free_ioapic_entries(ioapic_entries); 1421 free_ioapic_entries(ioapic_entries);
1422
1423 if (!ret)
1424 return;
1425
1426ir_failed:
1427 if (x2apic_preenabled)
1428 panic("x2apic enabled by bios. But IR enabling failed");
1429 else if (cpu_has_x2apic)
1430 pr_info("Not enabling x2apic,Intr-remapping\n");
1397#else 1431#else
1398 if (!cpu_has_x2apic) 1432 if (!cpu_has_x2apic)
1399 return; 1433 return;
1400 1434
1401 if (x2apic_preenabled) 1435 if (x2apic_preenabled)
1402 panic("x2apic enabled prior OS handover," 1436 panic("x2apic enabled prior OS handover,"
1403 " enable CONFIG_INTR_REMAP"); 1437 " enable CONFIG_X86_X2APIC, CONFIG_INTR_REMAP");
1404
1405 pr_info("Enable CONFIG_INTR_REMAP for enabling intr-remapping "
1406 " and x2apic\n");
1407#endif 1438#endif
1408 1439
1409 return; 1440 return;
1410} 1441}
1411#endif /* CONFIG_X86_X2APIC */ 1442
1412 1443
1413#ifdef CONFIG_X86_64 1444#ifdef CONFIG_X86_64
1414/* 1445/*
@@ -1539,7 +1570,7 @@ void __init early_init_lapic_mapping(void)
1539 */ 1570 */
1540void __init init_apic_mappings(void) 1571void __init init_apic_mappings(void)
1541{ 1572{
1542 if (x2apic) { 1573 if (x2apic_mode) {
1543 boot_cpu_physical_apicid = read_apic_id(); 1574 boot_cpu_physical_apicid = read_apic_id();
1544 return; 1575 return;
1545 } 1576 }
@@ -1565,6 +1596,12 @@ void __init init_apic_mappings(void)
1565 */ 1596 */
1566 if (boot_cpu_physical_apicid == -1U) 1597 if (boot_cpu_physical_apicid == -1U)
1567 boot_cpu_physical_apicid = read_apic_id(); 1598 boot_cpu_physical_apicid = read_apic_id();
1599
1600 /* lets check if we may to NOP'ify apic operations */
1601 if (!cpu_has_apic) {
1602 pr_info("APIC: disable apic facility\n");
1603 apic_disable();
1604 }
1568} 1605}
1569 1606
1570/* 1607/*
@@ -1733,8 +1770,7 @@ void __init connect_bsp_APIC(void)
1733 */ 1770 */
1734 apic_printk(APIC_VERBOSE, "leaving PIC mode, " 1771 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
1735 "enabling APIC mode.\n"); 1772 "enabling APIC mode.\n");
1736 outb(0x70, 0x22); 1773 imcr_pic_to_apic();
1737 outb(0x01, 0x23);
1738 } 1774 }
1739#endif 1775#endif
1740 if (apic->enable_apic_mode) 1776 if (apic->enable_apic_mode)
@@ -1762,8 +1798,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
1762 */ 1798 */
1763 apic_printk(APIC_VERBOSE, "disabling APIC mode, " 1799 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
1764 "entering PIC mode.\n"); 1800 "entering PIC mode.\n");
1765 outb(0x70, 0x22); 1801 imcr_apic_to_pic();
1766 outb(0x00, 0x23);
1767 return; 1802 return;
1768 } 1803 }
1769#endif 1804#endif
@@ -1969,10 +2004,10 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1969 2004
1970 local_irq_save(flags); 2005 local_irq_save(flags);
1971 disable_local_APIC(); 2006 disable_local_APIC();
1972#ifdef CONFIG_INTR_REMAP 2007
1973 if (intr_remapping_enabled) 2008 if (intr_remapping_enabled)
1974 disable_intr_remapping(); 2009 disable_intr_remapping();
1975#endif 2010
1976 local_irq_restore(flags); 2011 local_irq_restore(flags);
1977 return 0; 2012 return 0;
1978} 2013}
@@ -1982,8 +2017,6 @@ static int lapic_resume(struct sys_device *dev)
1982 unsigned int l, h; 2017 unsigned int l, h;
1983 unsigned long flags; 2018 unsigned long flags;
1984 int maxlvt; 2019 int maxlvt;
1985
1986#ifdef CONFIG_INTR_REMAP
1987 int ret; 2020 int ret;
1988 struct IO_APIC_route_entry **ioapic_entries = NULL; 2021 struct IO_APIC_route_entry **ioapic_entries = NULL;
1989 2022
@@ -1991,7 +2024,7 @@ static int lapic_resume(struct sys_device *dev)
1991 return 0; 2024 return 0;
1992 2025
1993 local_irq_save(flags); 2026 local_irq_save(flags);
1994 if (x2apic) { 2027 if (intr_remapping_enabled) {
1995 ioapic_entries = alloc_ioapic_entries(); 2028 ioapic_entries = alloc_ioapic_entries();
1996 if (!ioapic_entries) { 2029 if (!ioapic_entries) {
1997 WARN(1, "Alloc ioapic_entries in lapic resume failed."); 2030 WARN(1, "Alloc ioapic_entries in lapic resume failed.");
@@ -2007,17 +2040,10 @@ static int lapic_resume(struct sys_device *dev)
2007 2040
2008 mask_IO_APIC_setup(ioapic_entries); 2041 mask_IO_APIC_setup(ioapic_entries);
2009 mask_8259A(); 2042 mask_8259A();
2010 enable_x2apic();
2011 } 2043 }
2012#else
2013 if (!apic_pm_state.active)
2014 return 0;
2015 2044
2016 local_irq_save(flags); 2045 if (x2apic_mode)
2017 if (x2apic)
2018 enable_x2apic(); 2046 enable_x2apic();
2019#endif
2020
2021 else { 2047 else {
2022 /* 2048 /*
2023 * Make sure the APICBASE points to the right address 2049 * Make sure the APICBASE points to the right address
@@ -2055,20 +2081,15 @@ static int lapic_resume(struct sys_device *dev)
2055 apic_write(APIC_ESR, 0); 2081 apic_write(APIC_ESR, 0);
2056 apic_read(APIC_ESR); 2082 apic_read(APIC_ESR);
2057 2083
2058#ifdef CONFIG_INTR_REMAP 2084 if (intr_remapping_enabled) {
2059 if (intr_remapping_enabled) 2085 reenable_intr_remapping(x2apic_mode);
2060 reenable_intr_remapping(EIM_32BIT_APIC_ID);
2061
2062 if (x2apic) {
2063 unmask_8259A(); 2086 unmask_8259A();
2064 restore_IO_APIC_setup(ioapic_entries); 2087 restore_IO_APIC_setup(ioapic_entries);
2065 free_ioapic_entries(ioapic_entries); 2088 free_ioapic_entries(ioapic_entries);
2066 } 2089 }
2067#endif
2068 2090
2069 local_irq_restore(flags); 2091 local_irq_restore(flags);
2070 2092
2071
2072 return 0; 2093 return 0;
2073} 2094}
2074 2095
@@ -2117,31 +2138,14 @@ static void apic_pm_activate(void) { }
2117#endif /* CONFIG_PM */ 2138#endif /* CONFIG_PM */
2118 2139
2119#ifdef CONFIG_X86_64 2140#ifdef CONFIG_X86_64
2120/* 2141
2121 * apic_is_clustered_box() -- Check if we can expect good TSC 2142static int __cpuinit apic_cluster_num(void)
2122 *
2123 * Thus far, the major user of this is IBM's Summit2 series:
2124 *
2125 * Clustered boxes may have unsynced TSC problems if they are
2126 * multi-chassis. Use available data to take a good guess.
2127 * If in doubt, go HPET.
2128 */
2129__cpuinit int apic_is_clustered_box(void)
2130{ 2143{
2131 int i, clusters, zeros; 2144 int i, clusters, zeros;
2132 unsigned id; 2145 unsigned id;
2133 u16 *bios_cpu_apicid; 2146 u16 *bios_cpu_apicid;
2134 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); 2147 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
2135 2148
2136 /*
2137 * there is not this kind of box with AMD CPU yet.
2138 * Some AMD box with quadcore cpu and 8 sockets apicid
2139 * will be [4, 0x23] or [8, 0x27] could be thought to
2140 * vsmp box still need checking...
2141 */
2142 if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
2143 return 0;
2144
2145 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); 2149 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
2146 bitmap_zero(clustermap, NUM_APIC_CLUSTERS); 2150 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
2147 2151
@@ -2177,18 +2181,67 @@ __cpuinit int apic_is_clustered_box(void)
2177 ++zeros; 2181 ++zeros;
2178 } 2182 }
2179 2183
2180 /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are 2184 return clusters;
2181 * not guaranteed to be synced between boards 2185}
2182 */ 2186
2183 if (is_vsmp_box() && clusters > 1) 2187static int __cpuinitdata multi_checked;
2188static int __cpuinitdata multi;
2189
2190static int __cpuinit set_multi(const struct dmi_system_id *d)
2191{
2192 if (multi)
2193 return 0;
2194 printk(KERN_INFO "APIC: %s detected, Multi Chassis\n", d->ident);
2195 multi = 1;
2196 return 0;
2197}
2198
2199static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = {
2200 {
2201 .callback = set_multi,
2202 .ident = "IBM System Summit2",
2203 .matches = {
2204 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
2205 DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
2206 },
2207 },
2208 {}
2209};
2210
2211static void __cpuinit dmi_check_multi(void)
2212{
2213 if (multi_checked)
2214 return;
2215
2216 dmi_check_system(multi_dmi_table);
2217 multi_checked = 1;
2218}
2219
2220/*
2221 * apic_is_clustered_box() -- Check if we can expect good TSC
2222 *
2223 * Thus far, the major user of this is IBM's Summit2 series:
2224 * Clustered boxes may have unsynced TSC problems if they are
2225 * multi-chassis.
2226 * Use DMI to check them
2227 */
2228__cpuinit int apic_is_clustered_box(void)
2229{
2230 dmi_check_multi();
2231 if (multi)
2184 return 1; 2232 return 1;
2185 2233
2234 if (!is_vsmp_box())
2235 return 0;
2236
2186 /* 2237 /*
2187 * If clusters > 2, then should be multi-chassis. 2238 * ScaleMP vSMPowered boxes have one cluster per board and TSCs are
2188 * May have to revisit this when multi-core + hyperthreaded CPUs come 2239 * not guaranteed to be synced between boards
2189 * out, but AFAIK this will work even for them.
2190 */ 2240 */
2191 return (clusters > 2); 2241 if (apic_cluster_num() > 1)
2242 return 1;
2243
2244 return 0;
2192} 2245}
2193#endif 2246#endif
2194 2247
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 1c11b819f245..8e07c1418661 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -145,7 +145,7 @@ es7000_rename_gsi(int ioapic, int gsi)
145 return gsi; 145 return gsi;
146} 146}
147 147
148static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) 148static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
149{ 149{
150 unsigned long vect = 0, psaival = 0; 150 unsigned long vect = 0, psaival = 0;
151 151
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index e583291fe6c3..21c30e1121ee 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -489,121 +489,6 @@ static void ioapic_mask_entry(int apic, int pin)
489 spin_unlock_irqrestore(&ioapic_lock, flags); 489 spin_unlock_irqrestore(&ioapic_lock, flags);
490} 490}
491 491
492#ifdef CONFIG_SMP
493static void send_cleanup_vector(struct irq_cfg *cfg)
494{
495 cpumask_var_t cleanup_mask;
496
497 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
498 unsigned int i;
499 cfg->move_cleanup_count = 0;
500 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
501 cfg->move_cleanup_count++;
502 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
503 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
504 } else {
505 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
506 cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
507 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
508 free_cpumask_var(cleanup_mask);
509 }
510 cfg->move_in_progress = 0;
511}
512
513static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
514{
515 int apic, pin;
516 struct irq_pin_list *entry;
517 u8 vector = cfg->vector;
518
519 entry = cfg->irq_2_pin;
520 for (;;) {
521 unsigned int reg;
522
523 if (!entry)
524 break;
525
526 apic = entry->apic;
527 pin = entry->pin;
528 /*
529 * With interrupt-remapping, destination information comes
530 * from interrupt-remapping table entry.
531 */
532 if (!irq_remapped(irq))
533 io_apic_write(apic, 0x11 + pin*2, dest);
534 reg = io_apic_read(apic, 0x10 + pin*2);
535 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
536 reg |= vector;
537 io_apic_modify(apic, 0x10 + pin*2, reg);
538 if (!entry->next)
539 break;
540 entry = entry->next;
541 }
542}
543
544static int
545assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
546
547/*
548 * Either sets desc->affinity to a valid value, and returns
549 * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
550 * leaves desc->affinity untouched.
551 */
552static unsigned int
553set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
554{
555 struct irq_cfg *cfg;
556 unsigned int irq;
557
558 if (!cpumask_intersects(mask, cpu_online_mask))
559 return BAD_APICID;
560
561 irq = desc->irq;
562 cfg = desc->chip_data;
563 if (assign_irq_vector(irq, cfg, mask))
564 return BAD_APICID;
565
566 cpumask_copy(desc->affinity, mask);
567
568 return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
569}
570
571static int
572set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
573{
574 struct irq_cfg *cfg;
575 unsigned long flags;
576 unsigned int dest;
577 unsigned int irq;
578 int ret = -1;
579
580 irq = desc->irq;
581 cfg = desc->chip_data;
582
583 spin_lock_irqsave(&ioapic_lock, flags);
584 dest = set_desc_affinity(desc, mask);
585 if (dest != BAD_APICID) {
586 /* Only the high 8 bits are valid. */
587 dest = SET_APIC_LOGICAL_ID(dest);
588 __target_IO_APIC_irq(irq, dest, cfg);
589 ret = 0;
590 }
591 spin_unlock_irqrestore(&ioapic_lock, flags);
592
593 return ret;
594}
595
596static int
597set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
598{
599 struct irq_desc *desc;
600
601 desc = irq_to_desc(irq);
602
603 return set_ioapic_affinity_irq_desc(desc, mask);
604}
605#endif /* CONFIG_SMP */
606
607/* 492/*
608 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 493 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
609 * shared ISA-space IRQs, so we have to support them. We are super 494 * shared ISA-space IRQs, so we have to support them. We are super
@@ -822,7 +707,6 @@ static int __init ioapic_pirq_setup(char *str)
822__setup("pirq=", ioapic_pirq_setup); 707__setup("pirq=", ioapic_pirq_setup);
823#endif /* CONFIG_X86_32 */ 708#endif /* CONFIG_X86_32 */
824 709
825#ifdef CONFIG_INTR_REMAP
826struct IO_APIC_route_entry **alloc_ioapic_entries(void) 710struct IO_APIC_route_entry **alloc_ioapic_entries(void)
827{ 711{
828 int apic; 712 int apic;
@@ -920,20 +804,6 @@ int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
920 return 0; 804 return 0;
921} 805}
922 806
923void reinit_intr_remapped_IO_APIC(int intr_remapping,
924 struct IO_APIC_route_entry **ioapic_entries)
925
926{
927 /*
928 * for now plain restore of previous settings.
929 * TBD: In the case of OS enabling interrupt-remapping,
930 * IO-APIC RTE's need to be setup to point to interrupt-remapping
931 * table entries. for now, do a plain restore, and wait for
932 * the setup_IO_APIC_irqs() to do proper initialization.
933 */
934 restore_IO_APIC_setup(ioapic_entries);
935}
936
937void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) 807void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
938{ 808{
939 int apic; 809 int apic;
@@ -943,7 +813,6 @@ void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
943 813
944 kfree(ioapic_entries); 814 kfree(ioapic_entries);
945} 815}
946#endif
947 816
948/* 817/*
949 * Find the IRQ entry number of a certain pin. 818 * Find the IRQ entry number of a certain pin.
@@ -2332,6 +2201,118 @@ static int ioapic_retrigger_irq(unsigned int irq)
2332 */ 2201 */
2333 2202
2334#ifdef CONFIG_SMP 2203#ifdef CONFIG_SMP
2204static void send_cleanup_vector(struct irq_cfg *cfg)
2205{
2206 cpumask_var_t cleanup_mask;
2207
2208 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
2209 unsigned int i;
2210 cfg->move_cleanup_count = 0;
2211 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2212 cfg->move_cleanup_count++;
2213 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2214 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
2215 } else {
2216 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
2217 cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
2218 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2219 free_cpumask_var(cleanup_mask);
2220 }
2221 cfg->move_in_progress = 0;
2222}
2223
2224static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2225{
2226 int apic, pin;
2227 struct irq_pin_list *entry;
2228 u8 vector = cfg->vector;
2229
2230 entry = cfg->irq_2_pin;
2231 for (;;) {
2232 unsigned int reg;
2233
2234 if (!entry)
2235 break;
2236
2237 apic = entry->apic;
2238 pin = entry->pin;
2239 /*
2240 * With interrupt-remapping, destination information comes
2241 * from interrupt-remapping table entry.
2242 */
2243 if (!irq_remapped(irq))
2244 io_apic_write(apic, 0x11 + pin*2, dest);
2245 reg = io_apic_read(apic, 0x10 + pin*2);
2246 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2247 reg |= vector;
2248 io_apic_modify(apic, 0x10 + pin*2, reg);
2249 if (!entry->next)
2250 break;
2251 entry = entry->next;
2252 }
2253}
2254
2255static int
2256assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
2257
2258/*
2259 * Either sets desc->affinity to a valid value, and returns
2260 * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
2261 * leaves desc->affinity untouched.
2262 */
2263static unsigned int
2264set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
2265{
2266 struct irq_cfg *cfg;
2267 unsigned int irq;
2268
2269 if (!cpumask_intersects(mask, cpu_online_mask))
2270 return BAD_APICID;
2271
2272 irq = desc->irq;
2273 cfg = desc->chip_data;
2274 if (assign_irq_vector(irq, cfg, mask))
2275 return BAD_APICID;
2276
2277 cpumask_copy(desc->affinity, mask);
2278
2279 return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
2280}
2281
2282static int
2283set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2284{
2285 struct irq_cfg *cfg;
2286 unsigned long flags;
2287 unsigned int dest;
2288 unsigned int irq;
2289 int ret = -1;
2290
2291 irq = desc->irq;
2292 cfg = desc->chip_data;
2293
2294 spin_lock_irqsave(&ioapic_lock, flags);
2295 dest = set_desc_affinity(desc, mask);
2296 if (dest != BAD_APICID) {
2297 /* Only the high 8 bits are valid. */
2298 dest = SET_APIC_LOGICAL_ID(dest);
2299 __target_IO_APIC_irq(irq, dest, cfg);
2300 ret = 0;
2301 }
2302 spin_unlock_irqrestore(&ioapic_lock, flags);
2303
2304 return ret;
2305}
2306
2307static int
2308set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
2309{
2310 struct irq_desc *desc;
2311
2312 desc = irq_to_desc(irq);
2313
2314 return set_ioapic_affinity_irq_desc(desc, mask);
2315}
2335 2316
2336#ifdef CONFIG_INTR_REMAP 2317#ifdef CONFIG_INTR_REMAP
2337 2318
@@ -2478,53 +2459,6 @@ static void irq_complete_move(struct irq_desc **descp)
2478static inline void irq_complete_move(struct irq_desc **descp) {} 2459static inline void irq_complete_move(struct irq_desc **descp) {}
2479#endif 2460#endif
2480 2461
2481static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2482{
2483 int apic, pin;
2484 struct irq_pin_list *entry;
2485
2486 entry = cfg->irq_2_pin;
2487 for (;;) {
2488
2489 if (!entry)
2490 break;
2491
2492 apic = entry->apic;
2493 pin = entry->pin;
2494 io_apic_eoi(apic, pin);
2495 entry = entry->next;
2496 }
2497}
2498
2499static void
2500eoi_ioapic_irq(struct irq_desc *desc)
2501{
2502 struct irq_cfg *cfg;
2503 unsigned long flags;
2504 unsigned int irq;
2505
2506 irq = desc->irq;
2507 cfg = desc->chip_data;
2508
2509 spin_lock_irqsave(&ioapic_lock, flags);
2510 __eoi_ioapic_irq(irq, cfg);
2511 spin_unlock_irqrestore(&ioapic_lock, flags);
2512}
2513
2514#ifdef CONFIG_X86_X2APIC
2515static void ack_x2apic_level(unsigned int irq)
2516{
2517 struct irq_desc *desc = irq_to_desc(irq);
2518 ack_x2APIC_irq();
2519 eoi_ioapic_irq(desc);
2520}
2521
2522static void ack_x2apic_edge(unsigned int irq)
2523{
2524 ack_x2APIC_irq();
2525}
2526#endif
2527
2528static void ack_apic_edge(unsigned int irq) 2462static void ack_apic_edge(unsigned int irq)
2529{ 2463{
2530 struct irq_desc *desc = irq_to_desc(irq); 2464 struct irq_desc *desc = irq_to_desc(irq);
@@ -2588,9 +2522,6 @@ static void ack_apic_level(unsigned int irq)
2588 */ 2522 */
2589 ack_APIC_irq(); 2523 ack_APIC_irq();
2590 2524
2591 if (irq_remapped(irq))
2592 eoi_ioapic_irq(desc);
2593
2594 /* Now we can move and renable the irq */ 2525 /* Now we can move and renable the irq */
2595 if (unlikely(do_unmask_irq)) { 2526 if (unlikely(do_unmask_irq)) {
2596 /* Only migrate the irq if the ack has been received. 2527 /* Only migrate the irq if the ack has been received.
@@ -2637,22 +2568,50 @@ static void ack_apic_level(unsigned int irq)
2637} 2568}
2638 2569
2639#ifdef CONFIG_INTR_REMAP 2570#ifdef CONFIG_INTR_REMAP
2571static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2572{
2573 int apic, pin;
2574 struct irq_pin_list *entry;
2575
2576 entry = cfg->irq_2_pin;
2577 for (;;) {
2578
2579 if (!entry)
2580 break;
2581
2582 apic = entry->apic;
2583 pin = entry->pin;
2584 io_apic_eoi(apic, pin);
2585 entry = entry->next;
2586 }
2587}
2588
2589static void
2590eoi_ioapic_irq(struct irq_desc *desc)
2591{
2592 struct irq_cfg *cfg;
2593 unsigned long flags;
2594 unsigned int irq;
2595
2596 irq = desc->irq;
2597 cfg = desc->chip_data;
2598
2599 spin_lock_irqsave(&ioapic_lock, flags);
2600 __eoi_ioapic_irq(irq, cfg);
2601 spin_unlock_irqrestore(&ioapic_lock, flags);
2602}
2603
2640static void ir_ack_apic_edge(unsigned int irq) 2604static void ir_ack_apic_edge(unsigned int irq)
2641{ 2605{
2642#ifdef CONFIG_X86_X2APIC 2606 ack_APIC_irq();
2643 if (x2apic_enabled())
2644 return ack_x2apic_edge(irq);
2645#endif
2646 return ack_apic_edge(irq);
2647} 2607}
2648 2608
2649static void ir_ack_apic_level(unsigned int irq) 2609static void ir_ack_apic_level(unsigned int irq)
2650{ 2610{
2651#ifdef CONFIG_X86_X2APIC 2611 struct irq_desc *desc = irq_to_desc(irq);
2652 if (x2apic_enabled()) 2612
2653 return ack_x2apic_level(irq); 2613 ack_APIC_irq();
2654#endif 2614 eoi_ioapic_irq(desc);
2655 return ack_apic_level(irq);
2656} 2615}
2657#endif /* CONFIG_INTR_REMAP */ 2616#endif /* CONFIG_INTR_REMAP */
2658 2617
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 1783652bb0e5..bc3e880f9b82 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -50,7 +50,7 @@ static struct apic *apic_probe[] __initdata = {
50void __init default_setup_apic_routing(void) 50void __init default_setup_apic_routing(void)
51{ 51{
52#ifdef CONFIG_X86_X2APIC 52#ifdef CONFIG_X86_X2APIC
53 if (x2apic && (apic != &apic_x2apic_phys && 53 if (x2apic_mode && (apic != &apic_x2apic_phys &&
54#ifdef CONFIG_X86_UV 54#ifdef CONFIG_X86_UV
55 apic != &apic_x2apic_uv_x && 55 apic != &apic_x2apic_uv_x &&
56#endif 56#endif
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index 9cfe1f415d81..344eee4ac0a4 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -173,13 +173,6 @@ static inline int is_WPEG(struct rio_detail *rio){
173 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); 173 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
174} 174}
175 175
176
177/* In clustered mode, the high nibble of APIC ID is a cluster number.
178 * The low nibble is a 4-bit bitmap. */
179#define XAPIC_DEST_CPUS_SHIFT 4
180#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
181#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
182
183#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) 176#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
184 177
185static const struct cpumask *summit_target_cpus(void) 178static const struct cpumask *summit_target_cpus(void)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 2bda69352976..93d604dee9b7 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -105,7 +105,7 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
105 cpumask_set_cpu(cpu, retmask); 105 cpumask_set_cpu(cpu, retmask);
106} 106}
107 107
108static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) 108static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
109{ 109{
110#ifdef CONFIG_SMP 110#ifdef CONFIG_SMP
111 unsigned long val; 111 unsigned long val;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index c3fe010d74c8..c1739ac29708 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -24,9 +24,9 @@ void (*generic_interrupt_extension)(void) = NULL;
24 */ 24 */
25void ack_bad_irq(unsigned int irq) 25void ack_bad_irq(unsigned int irq)
26{ 26{
27 printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); 27 if (printk_ratelimit())
28 pr_err("unexpected IRQ trap at vector %02x\n", irq);
28 29
29#ifdef CONFIG_X86_LOCAL_APIC
30 /* 30 /*
31 * Currently unexpected vectors happen only on SMP and APIC. 31 * Currently unexpected vectors happen only on SMP and APIC.
32 * We _must_ ack these because every local APIC has only N 32 * We _must_ ack these because every local APIC has only N
@@ -36,9 +36,7 @@ void ack_bad_irq(unsigned int irq)
36 * completely. 36 * completely.
37 * But only ack when the APIC is enabled -AK 37 * But only ack when the APIC is enabled -AK
38 */ 38 */
39 if (cpu_has_apic) 39 ack_APIC_irq();
40 ack_APIC_irq();
41#endif
42} 40}
43 41
44#define irq_stats(x) (&per_cpu(irq_stat, x)) 42#define irq_stats(x) (&per_cpu(irq_stat, x))
@@ -178,7 +176,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
178 sum += irq_stats(cpu)->irq_thermal_count; 176 sum += irq_stats(cpu)->irq_thermal_count;
179# ifdef CONFIG_X86_64 177# ifdef CONFIG_X86_64
180 sum += irq_stats(cpu)->irq_threshold_count; 178 sum += irq_stats(cpu)->irq_threshold_count;
181#endif 179# endif
182#endif 180#endif
183 return sum; 181 return sum;
184} 182}
@@ -213,14 +211,11 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
213 irq = __get_cpu_var(vector_irq)[vector]; 211 irq = __get_cpu_var(vector_irq)[vector];
214 212
215 if (!handle_irq(irq, regs)) { 213 if (!handle_irq(irq, regs)) {
216#ifdef CONFIG_X86_64 214 ack_APIC_irq();
217 if (!disable_apic)
218 ack_APIC_irq();
219#endif
220 215
221 if (printk_ratelimit()) 216 if (printk_ratelimit())
222 printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n", 217 pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
223 __func__, smp_processor_id(), vector, irq); 218 __func__, smp_processor_id(), vector, irq);
224 } 219 }
225 220
226 irq_exit(); 221 irq_exit();
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit.c
index 368b0a8836f9..2e08b10ad51a 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit.c
@@ -1,20 +1,25 @@
1#include <linux/linkage.h>
1#include <linux/errno.h> 2#include <linux/errno.h>
2#include <linux/signal.h> 3#include <linux/signal.h>
3#include <linux/sched.h> 4#include <linux/sched.h>
4#include <linux/ioport.h> 5#include <linux/ioport.h>
5#include <linux/interrupt.h> 6#include <linux/interrupt.h>
7#include <linux/timex.h>
6#include <linux/slab.h> 8#include <linux/slab.h>
7#include <linux/random.h> 9#include <linux/random.h>
10#include <linux/kprobes.h>
8#include <linux/init.h> 11#include <linux/init.h>
9#include <linux/kernel_stat.h> 12#include <linux/kernel_stat.h>
10#include <linux/sysdev.h> 13#include <linux/sysdev.h>
11#include <linux/bitops.h> 14#include <linux/bitops.h>
15#include <linux/acpi.h>
12#include <linux/io.h> 16#include <linux/io.h>
13#include <linux/delay.h> 17#include <linux/delay.h>
14 18
15#include <asm/atomic.h> 19#include <asm/atomic.h>
16#include <asm/system.h> 20#include <asm/system.h>
17#include <asm/timer.h> 21#include <asm/timer.h>
22#include <asm/hw_irq.h>
18#include <asm/pgtable.h> 23#include <asm/pgtable.h>
19#include <asm/desc.h> 24#include <asm/desc.h>
20#include <asm/apic.h> 25#include <asm/apic.h>
@@ -22,7 +27,23 @@
22#include <asm/i8259.h> 27#include <asm/i8259.h>
23#include <asm/traps.h> 28#include <asm/traps.h>
24 29
30/*
31 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
32 * (these are usually mapped to vectors 0x30-0x3f)
33 */
34
35/*
36 * The IO-APIC gives us many more interrupt sources. Most of these
37 * are unused but an SMP system is supposed to have enough memory ...
38 * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
39 * across the spectrum, so we really want to be prepared to get all
40 * of these. Plus, more powerful systems might have more than 64
41 * IO-APIC registers.
42 *
43 * (these are usually mapped into the 0x30-0xff vector range)
44 */
25 45
46#ifdef CONFIG_X86_32
26/* 47/*
27 * Note that on a 486, we don't want to do a SIGFPE on an irq13 48 * Note that on a 486, we don't want to do a SIGFPE on an irq13
28 * as the irq is unreliable, and exception 16 works correctly 49 * as the irq is unreliable, and exception 16 works correctly
@@ -52,30 +73,7 @@ static struct irqaction fpu_irq = {
52 .handler = math_error_irq, 73 .handler = math_error_irq,
53 .name = "fpu", 74 .name = "fpu",
54}; 75};
55
56void __init init_ISA_irqs(void)
57{
58 int i;
59
60#ifdef CONFIG_X86_LOCAL_APIC
61 init_bsp_APIC();
62#endif 76#endif
63 init_8259A(0);
64
65 /*
66 * 16 old-style INTA-cycle interrupts:
67 */
68 for (i = 0; i < NR_IRQS_LEGACY; i++) {
69 struct irq_desc *desc = irq_to_desc(i);
70
71 desc->status = IRQ_DISABLED;
72 desc->action = NULL;
73 desc->depth = 1;
74
75 set_irq_chip_and_handler_name(i, &i8259A_chip,
76 handle_level_irq, "XT");
77 }
78}
79 77
80/* 78/*
81 * IRQ2 is cascade interrupt to second interrupt controller 79 * IRQ2 is cascade interrupt to second interrupt controller
@@ -118,29 +116,37 @@ int vector_used_by_percpu_irq(unsigned int vector)
118 return 0; 116 return 0;
119} 117}
120 118
121/* Overridden in paravirt.c */ 119static void __init init_ISA_irqs(void)
122void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
123
124void __init native_init_IRQ(void)
125{ 120{
126 int i; 121 int i;
127 122
128 /* Execute any quirks before the call gates are initialised: */ 123#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
129 x86_quirk_pre_intr_init(); 124 init_bsp_APIC();
125#endif
126 init_8259A(0);
130 127
131 /* 128 /*
132 * Cover the whole vector space, no vector can escape 129 * 16 old-style INTA-cycle interrupts:
133 * us. (some of these will be overridden and become
134 * 'special' SMP interrupts)
135 */ 130 */
136 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { 131 for (i = 0; i < NR_IRQS_LEGACY; i++) {
137 /* SYSCALL_VECTOR was reserved in trap_init. */ 132 struct irq_desc *desc = irq_to_desc(i);
138 if (i != SYSCALL_VECTOR) 133
139 set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); 134 desc->status = IRQ_DISABLED;
135 desc->action = NULL;
136 desc->depth = 1;
137
138 set_irq_chip_and_handler_name(i, &i8259A_chip,
139 handle_level_irq, "XT");
140 } 140 }
141}
141 142
143/* Overridden in paravirt.c */
144void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
142 145
143#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP) 146static void __init smp_intr_init(void)
147{
148#ifdef CONFIG_SMP
149#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
144 /* 150 /*
145 * The reschedule interrupt is a CPU-to-CPU reschedule-helper 151 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
146 * IPI, driven by wakeup. 152 * IPI, driven by wakeup.
@@ -160,16 +166,27 @@ void __init native_init_IRQ(void)
160 /* IPI for generic function call */ 166 /* IPI for generic function call */
161 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 167 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
162 168
163 /* IPI for single call function */ 169 /* IPI for generic single function call */
164 alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, 170 alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
165 call_function_single_interrupt); 171 call_function_single_interrupt);
166 172
167 /* Low priority IPI to cleanup after moving an irq */ 173 /* Low priority IPI to cleanup after moving an irq */
168 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 174 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
169 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); 175 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
170#endif 176#endif
177#endif /* CONFIG_SMP */
178}
179
180static void __init apic_intr_init(void)
181{
182 smp_intr_init();
183
184#ifdef CONFIG_X86_64
185 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
186 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
187#endif
171 188
172#ifdef CONFIG_X86_LOCAL_APIC 189#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
173 /* self generated IPI for local APIC timer */ 190 /* self generated IPI for local APIC timer */
174 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); 191 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
175 192
@@ -179,16 +196,67 @@ void __init native_init_IRQ(void)
179 /* IPI vectors for APIC spurious and error interrupts */ 196 /* IPI vectors for APIC spurious and error interrupts */
180 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); 197 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
181 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 198 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
199
200 /* Performance monitoring interrupts: */
201# ifdef CONFIG_PERF_COUNTERS
202 alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt);
203 alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
204# endif
205
182#endif 206#endif
183 207
208#ifdef CONFIG_X86_32
184#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) 209#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL)
185 /* thermal monitor LVT interrupt */ 210 /* thermal monitor LVT interrupt */
186 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 211 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
187#endif 212#endif
213#endif
214}
215
216/**
217 * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
218 *
219 * Description:
220 * Perform any necessary interrupt initialisation prior to setting up
221 * the "ordinary" interrupt call gates. For legacy reasons, the ISA
222 * interrupts should be initialised here if the machine emulates a PC
223 * in any way.
224 **/
225static void __init x86_quirk_pre_intr_init(void)
226{
227#ifdef CONFIG_X86_32
228 if (x86_quirks->arch_pre_intr_init) {
229 if (x86_quirks->arch_pre_intr_init())
230 return;
231 }
232#endif
233 init_ISA_irqs();
234}
235
236void __init native_init_IRQ(void)
237{
238 int i;
239
240 /* Execute any quirks before the call gates are initialised: */
241 x86_quirk_pre_intr_init();
242
243 apic_intr_init();
244
245 /*
246 * Cover the whole vector space, no vector can escape
247 * us. (some of these will be overridden and become
248 * 'special' SMP interrupts)
249 */
250 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
251 /* IA32_SYSCALL_VECTOR could be used in trap_init already. */
252 if (!test_bit(i, used_vectors))
253 set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
254 }
188 255
189 if (!acpi_ioapic) 256 if (!acpi_ioapic)
190 setup_irq(2, &irq2); 257 setup_irq(2, &irq2);
191 258
259#ifdef CONFIG_X86_32
192 /* 260 /*
193 * Call quirks after call gates are initialised (usually add in 261 * Call quirks after call gates are initialised (usually add in
194 * the architecture specific gates): 262 * the architecture specific gates):
@@ -203,4 +271,5 @@ void __init native_init_IRQ(void)
203 setup_irq(FPU_IRQ, &fpu_irq); 271 setup_irq(FPU_IRQ, &fpu_irq);
204 272
205 irq_ctx_init(smp_processor_id()); 273 irq_ctx_init(smp_processor_id());
274#endif
206} 275}
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
deleted file mode 100644
index 8cd10537fd46..000000000000
--- a/arch/x86/kernel/irqinit_64.c
+++ /dev/null
@@ -1,177 +0,0 @@
1#include <linux/linkage.h>
2#include <linux/errno.h>
3#include <linux/signal.h>
4#include <linux/sched.h>
5#include <linux/ioport.h>
6#include <linux/interrupt.h>
7#include <linux/timex.h>
8#include <linux/slab.h>
9#include <linux/random.h>
10#include <linux/init.h>
11#include <linux/kernel_stat.h>
12#include <linux/sysdev.h>
13#include <linux/bitops.h>
14#include <linux/acpi.h>
15#include <linux/io.h>
16#include <linux/delay.h>
17
18#include <asm/atomic.h>
19#include <asm/system.h>
20#include <asm/hw_irq.h>
21#include <asm/pgtable.h>
22#include <asm/desc.h>
23#include <asm/apic.h>
24#include <asm/i8259.h>
25
26/*
27 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
28 * (these are usually mapped to vectors 0x30-0x3f)
29 */
30
31/*
32 * The IO-APIC gives us many more interrupt sources. Most of these
33 * are unused but an SMP system is supposed to have enough memory ...
34 * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
35 * across the spectrum, so we really want to be prepared to get all
36 * of these. Plus, more powerful systems might have more than 64
37 * IO-APIC registers.
38 *
39 * (these are usually mapped into the 0x30-0xff vector range)
40 */
41
42/*
43 * IRQ2 is cascade interrupt to second interrupt controller
44 */
45
46static struct irqaction irq2 = {
47 .handler = no_action,
48 .name = "cascade",
49};
50DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
51 [0 ... IRQ0_VECTOR - 1] = -1,
52 [IRQ0_VECTOR] = 0,
53 [IRQ1_VECTOR] = 1,
54 [IRQ2_VECTOR] = 2,
55 [IRQ3_VECTOR] = 3,
56 [IRQ4_VECTOR] = 4,
57 [IRQ5_VECTOR] = 5,
58 [IRQ6_VECTOR] = 6,
59 [IRQ7_VECTOR] = 7,
60 [IRQ8_VECTOR] = 8,
61 [IRQ9_VECTOR] = 9,
62 [IRQ10_VECTOR] = 10,
63 [IRQ11_VECTOR] = 11,
64 [IRQ12_VECTOR] = 12,
65 [IRQ13_VECTOR] = 13,
66 [IRQ14_VECTOR] = 14,
67 [IRQ15_VECTOR] = 15,
68 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
69};
70
71int vector_used_by_percpu_irq(unsigned int vector)
72{
73 int cpu;
74
75 for_each_online_cpu(cpu) {
76 if (per_cpu(vector_irq, cpu)[vector] != -1)
77 return 1;
78 }
79
80 return 0;
81}
82
83static void __init init_ISA_irqs(void)
84{
85 int i;
86
87 init_bsp_APIC();
88 init_8259A(0);
89
90 for (i = 0; i < NR_IRQS_LEGACY; i++) {
91 struct irq_desc *desc = irq_to_desc(i);
92
93 desc->status = IRQ_DISABLED;
94 desc->action = NULL;
95 desc->depth = 1;
96
97 /*
98 * 16 old-style INTA-cycle interrupts:
99 */
100 set_irq_chip_and_handler_name(i, &i8259A_chip,
101 handle_level_irq, "XT");
102 }
103}
104
105void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
106
107static void __init smp_intr_init(void)
108{
109#ifdef CONFIG_SMP
110 /*
111 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
112 * IPI, driven by wakeup.
113 */
114 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
115
116 /* IPIs for invalidation */
117 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
118 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
119 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
120 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
121 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
122 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
123 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
124 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
125
126 /* IPI for generic function call */
127 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
128
129 /* IPI for generic single function call */
130 alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
131 call_function_single_interrupt);
132
133 /* Low priority IPI to cleanup after moving an irq */
134 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
135 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
136#endif
137}
138
139static void __init apic_intr_init(void)
140{
141 smp_intr_init();
142
143 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
144 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
145
146 /* self generated IPI for local APIC timer */
147 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
148
149 /* generic IPI for platform specific use */
150 alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
151
152 /* IPI vectors for APIC spurious and error interrupts */
153 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
154 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
155}
156
157void __init native_init_IRQ(void)
158{
159 int i;
160
161 init_ISA_irqs();
162 /*
163 * Cover the whole vector space, no vector can escape
164 * us. (some of these will be overridden and become
165 * 'special' SMP interrupts)
166 */
167 for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
168 int vector = FIRST_EXTERNAL_VECTOR + i;
169 if (vector != IA32_SYSCALL_VECTOR)
170 set_intr_gate(vector, interrupt[i]);
171 }
172
173 apic_intr_init();
174
175 if (!acpi_ioapic)
176 setup_irq(2, &irq2);
177}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b4158439bf63..523bb697120d 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -997,24 +997,6 @@ void __init setup_arch(char **cmdline_p)
997#ifdef CONFIG_X86_32 997#ifdef CONFIG_X86_32
998 998
999/** 999/**
1000 * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
1001 *
1002 * Description:
1003 * Perform any necessary interrupt initialisation prior to setting up
1004 * the "ordinary" interrupt call gates. For legacy reasons, the ISA
1005 * interrupts should be initialised here if the machine emulates a PC
1006 * in any way.
1007 **/
1008void __init x86_quirk_pre_intr_init(void)
1009{
1010 if (x86_quirks->arch_pre_intr_init) {
1011 if (x86_quirks->arch_pre_intr_init())
1012 return;
1013 }
1014 init_ISA_irqs();
1015}
1016
1017/**
1018 * x86_quirk_intr_init - post gate setup interrupt initialisation 1000 * x86_quirk_intr_init - post gate setup interrupt initialisation
1019 * 1001 *
1020 * Description: 1002 * Description:
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 13f33ea8ccaa..f6db48c405b8 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -193,19 +193,19 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
193} 193}
194 194
195struct smp_ops smp_ops = { 195struct smp_ops smp_ops = {
196 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, 196 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
197 .smp_prepare_cpus = native_smp_prepare_cpus, 197 .smp_prepare_cpus = native_smp_prepare_cpus,
198 .smp_cpus_done = native_smp_cpus_done, 198 .smp_cpus_done = native_smp_cpus_done,
199 199
200 .smp_send_stop = native_smp_send_stop, 200 .smp_send_stop = native_smp_send_stop,
201 .smp_send_reschedule = native_smp_send_reschedule, 201 .smp_send_reschedule = native_smp_send_reschedule,
202 202
203 .cpu_up = native_cpu_up, 203 .cpu_up = native_cpu_up,
204 .cpu_die = native_cpu_die, 204 .cpu_die = native_cpu_die,
205 .cpu_disable = native_cpu_disable, 205 .cpu_disable = native_cpu_disable,
206 .play_dead = native_play_dead, 206 .play_dead = native_play_dead,
207 207
208 .send_call_func_ipi = native_send_call_func_ipi, 208 .send_call_func_ipi = native_send_call_func_ipi,
209 .send_call_func_single_ipi = native_send_call_func_single_ipi, 209 .send_call_func_single_ipi = native_send_call_func_single_ipi,
210}; 210};
211EXPORT_SYMBOL_GPL(smp_ops); 211EXPORT_SYMBOL_GPL(smp_ops);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 58d24ef917d8..d2e8de958156 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -504,7 +504,7 @@ void __inquire_remote_apic(int apicid)
504 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this 504 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
505 * won't ... remember to clear down the APIC, etc later. 505 * won't ... remember to clear down the APIC, etc later.
506 */ 506 */
507int __devinit 507int __cpuinit
508wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) 508wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
509{ 509{
510 unsigned long send_status, accept_status = 0; 510 unsigned long send_status, accept_status = 0;
@@ -538,7 +538,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
538 return (send_status | accept_status); 538 return (send_status | accept_status);
539} 539}
540 540
541int __devinit 541static int __cpuinit
542wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) 542wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
543{ 543{
544 unsigned long send_status, accept_status = 0; 544 unsigned long send_status, accept_status = 0;
@@ -822,10 +822,12 @@ do_rest:
822 /* mark "stuck" area as not stuck */ 822 /* mark "stuck" area as not stuck */
823 *((volatile unsigned long *)trampoline_base) = 0; 823 *((volatile unsigned long *)trampoline_base) = 0;
824 824
825 /* 825 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
826 * Cleanup possible dangling ends... 826 /*
827 */ 827 * Cleanup possible dangling ends...
828 smpboot_restore_warm_reset_vector(); 828 */
829 smpboot_restore_warm_reset_vector();
830 }
829 831
830 return boot_error; 832 return boot_error;
831} 833}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a1d288327ff0..2310700faca5 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -969,11 +969,8 @@ void __init trap_init(void)
969 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 969 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
970 set_bit(i, used_vectors); 970 set_bit(i, used_vectors);
971 971
972#ifdef CONFIG_X86_64
973 set_bit(IA32_SYSCALL_VECTOR, used_vectors); 972 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
974#else 973
975 set_bit(SYSCALL_VECTOR, used_vectors);
976#endif
977 /* 974 /*
978 * Should be a barrier for any external CPU state: 975 * Should be a barrier for any external CPU state:
979 */ 976 */