aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig21
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/bigsmp/apic.h2
-rw-r--r--arch/x86/include/asm/emergency-restart.h4
-rw-r--r--arch/x86/include/asm/es7000/apic.h79
-rw-r--r--arch/x86/include/asm/es7000/wakecpu.h41
-rw-r--r--arch/x86/include/asm/genapic_32.h19
-rw-r--r--arch/x86/include/asm/genapic_64.h2
-rw-r--r--arch/x86/include/asm/io_apic.h9
-rw-r--r--arch/x86/include/asm/irq_vectors.h11
-rw-r--r--arch/x86/include/asm/mach-default/mach_apic.h2
-rw-r--r--arch/x86/include/asm/mach-default/mach_wakecpu.h24
-rw-r--r--arch/x86/include/asm/mach-default/smpboot_hooks.h8
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apic.h1
-rw-r--r--arch/x86/include/asm/mach-generic/mach_wakecpu.h12
-rw-r--r--arch/x86/include/asm/numaq/wakecpu.h24
-rw-r--r--arch/x86/include/asm/setup.h3
-rw-r--r--arch/x86/include/asm/system.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c11
-rw-r--r--arch/x86/kernel/apm_32.c4
-rw-r--r--arch/x86/kernel/es7000_32.c62
-rw-r--r--arch/x86/kernel/genapic_64.c4
-rw-r--r--arch/x86/kernel/io_apic.c625
-rw-r--r--arch/x86/kernel/irq.c3
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/irqinit_32.c3
-rw-r--r--arch/x86/kernel/irqinit_64.c3
-rw-r--r--arch/x86/kernel/mpparse.c25
-rw-r--r--arch/x86/kernel/numaq_32.c10
-rw-r--r--arch/x86/kernel/process.c16
-rw-r--r--arch/x86/kernel/reboot.c31
-rw-r--r--arch/x86/kernel/setup.c17
-rw-r--r--arch/x86/kernel/smp.c13
-rw-r--r--arch/x86/kernel/smpboot.c17
-rw-r--r--arch/x86/mach-generic/bigsmp.c1
-rw-r--r--arch/x86/mach-generic/default.c1
-rw-r--r--arch/x86/mach-generic/es7000.c14
-rw-r--r--arch/x86/mach-generic/probe.c16
-rw-r--r--arch/x86/mach-generic/summit.c1
-rw-r--r--arch/x86/pci/direct.c4
-rw-r--r--arch/x86/pci/pci.h1
42 files changed, 724 insertions, 427 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c7235e643aff..d99eeb7915c6 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -241,6 +241,16 @@ config X86_HAS_BOOT_CPU_ID
241 def_bool y 241 def_bool y
242 depends on X86_VOYAGER 242 depends on X86_VOYAGER
243 243
244config SPARSE_IRQ
245 bool "Support sparse irq numbering"
246 depends on (PCI_MSI || HT_IRQ) && SMP
247 default y
248 help
249 This enables support for sparse irq, esp for msi/msi-x. You may need
250 if you have lots of cards supports msi-x installed.
251
252 If you don't know what to do here, say Y.
253
244config X86_FIND_SMP_CONFIG 254config X86_FIND_SMP_CONFIG
245 def_bool y 255 def_bool y
246 depends on X86_MPPARSE || X86_VOYAGER 256 depends on X86_MPPARSE || X86_VOYAGER
@@ -468,10 +478,6 @@ config X86_CYCLONE_TIMER
468 def_bool y 478 def_bool y
469 depends on X86_GENERICARCH 479 depends on X86_GENERICARCH
470 480
471config ES7000_CLUSTERED_APIC
472 def_bool y
473 depends on SMP && X86_ES7000 && MPENTIUMIII
474
475source "arch/x86/Kconfig.cpu" 481source "arch/x86/Kconfig.cpu"
476 482
477config HPET_TIMER 483config HPET_TIMER
@@ -1635,13 +1641,6 @@ config APM_ALLOW_INTS
1635 many of the newer IBM Thinkpads. If you experience hangs when you 1641 many of the newer IBM Thinkpads. If you experience hangs when you
1636 suspend, try setting this to Y. Otherwise, say N. 1642 suspend, try setting this to Y. Otherwise, say N.
1637 1643
1638config APM_REAL_MODE_POWER_OFF
1639 bool "Use real mode APM BIOS call to power off"
1640 help
1641 Use real mode APM BIOS calls to switch off the computer. This is
1642 a work-around for a number of buggy BIOSes. Switch this option on if
1643 your computer crashes instead of powering off properly.
1644
1645endif # APM 1644endif # APM
1646 1645
1647source "arch/x86/kernel/cpu/cpufreq/Kconfig" 1646source "arch/x86/kernel/cpu/cpufreq/Kconfig"
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 3b1510b4fc57..25caa0738af5 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -193,6 +193,7 @@ extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask);
193static inline void lapic_shutdown(void) { } 193static inline void lapic_shutdown(void) { }
194#define local_apic_timer_c2_ok 1 194#define local_apic_timer_c2_ok 1
195static inline void init_apic_mappings(void) { } 195static inline void init_apic_mappings(void) { }
196static inline void disable_local_APIC(void) { }
196 197
197#endif /* !CONFIG_X86_LOCAL_APIC */ 198#endif /* !CONFIG_X86_LOCAL_APIC */
198 199
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
index 1d9543b9d358..ce547f24a1cd 100644
--- a/arch/x86/include/asm/bigsmp/apic.h
+++ b/arch/x86/include/asm/bigsmp/apic.h
@@ -24,8 +24,6 @@ static inline cpumask_t target_cpus(void)
24#define INT_DELIVERY_MODE (dest_Fixed) 24#define INT_DELIVERY_MODE (dest_Fixed)
25#define INT_DEST_MODE (0) /* phys delivery to target proc */ 25#define INT_DEST_MODE (0) /* phys delivery to target proc */
26#define NO_BALANCE_IRQ (0) 26#define NO_BALANCE_IRQ (0)
27#define WAKE_SECONDARY_VIA_INIT
28
29 27
30static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) 28static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
31{ 29{
diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
index 94826cf87455..cc70c1c78ca4 100644
--- a/arch/x86/include/asm/emergency-restart.h
+++ b/arch/x86/include/asm/emergency-restart.h
@@ -8,7 +8,9 @@ enum reboot_type {
8 BOOT_BIOS = 'b', 8 BOOT_BIOS = 'b',
9#endif 9#endif
10 BOOT_ACPI = 'a', 10 BOOT_ACPI = 'a',
11 BOOT_EFI = 'e' 11 BOOT_EFI = 'e',
12 BOOT_CF9 = 'p',
13 BOOT_CF9_COND = 'q',
12}; 14};
13 15
14extern enum reboot_type reboot_type; 16extern enum reboot_type reboot_type;
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
index 380f0b4f17ed..e24ef876915f 100644
--- a/arch/x86/include/asm/es7000/apic.h
+++ b/arch/x86/include/asm/es7000/apic.h
@@ -9,31 +9,27 @@ static inline int apic_id_registered(void)
9 return (1); 9 return (1);
10} 10}
11 11
12static inline cpumask_t target_cpus(void) 12static inline cpumask_t target_cpus_cluster(void)
13{ 13{
14#if defined CONFIG_ES7000_CLUSTERED_APIC
15 return CPU_MASK_ALL; 14 return CPU_MASK_ALL;
16#else 15}
16
17static inline cpumask_t target_cpus(void)
18{
17 return cpumask_of_cpu(smp_processor_id()); 19 return cpumask_of_cpu(smp_processor_id());
18#endif
19} 20}
20 21
21#if defined CONFIG_ES7000_CLUSTERED_APIC 22#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
22#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) 23#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
23#define INT_DELIVERY_MODE (dest_LowestPrio) 24#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
24#define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */ 25#define NO_BALANCE_IRQ_CLUSTER (1)
25#define NO_BALANCE_IRQ (1) 26
26#undef WAKE_SECONDARY_VIA_INIT
27#define WAKE_SECONDARY_VIA_MIP
28#else
29#define APIC_DFR_VALUE (APIC_DFR_FLAT) 27#define APIC_DFR_VALUE (APIC_DFR_FLAT)
30#define INT_DELIVERY_MODE (dest_Fixed) 28#define INT_DELIVERY_MODE (dest_Fixed)
31#define INT_DEST_MODE (0) /* phys delivery to target procs */ 29#define INT_DEST_MODE (0) /* phys delivery to target procs */
32#define NO_BALANCE_IRQ (0) 30#define NO_BALANCE_IRQ (0)
33#undef APIC_DEST_LOGICAL 31#undef APIC_DEST_LOGICAL
34#define APIC_DEST_LOGICAL 0x0 32#define APIC_DEST_LOGICAL 0x0
35#define WAKE_SECONDARY_VIA_INIT
36#endif
37 33
38static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) 34static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
39{ 35{
@@ -60,6 +56,16 @@ static inline unsigned long calculate_ldr(int cpu)
60 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel 56 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
61 * document number 292116). So here it goes... 57 * document number 292116). So here it goes...
62 */ 58 */
59static inline void init_apic_ldr_cluster(void)
60{
61 unsigned long val;
62 int cpu = smp_processor_id();
63
64 apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
65 val = calculate_ldr(cpu);
66 apic_write(APIC_LDR, val);
67}
68
63static inline void init_apic_ldr(void) 69static inline void init_apic_ldr(void)
64{ 70{
65 unsigned long val; 71 unsigned long val;
@@ -70,10 +76,6 @@ static inline void init_apic_ldr(void)
70 apic_write(APIC_LDR, val); 76 apic_write(APIC_LDR, val);
71} 77}
72 78
73#ifndef CONFIG_X86_GENERICARCH
74extern void enable_apic_mode(void);
75#endif
76
77extern int apic_version [MAX_APICS]; 79extern int apic_version [MAX_APICS];
78static inline void setup_apic_routing(void) 80static inline void setup_apic_routing(void)
79{ 81{
@@ -144,7 +146,7 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid)
144 return (1); 146 return (1);
145} 147}
146 148
147static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 149static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
148{ 150{
149 int num_bits_set; 151 int num_bits_set;
150 int cpus_found = 0; 152 int cpus_found = 0;
@@ -154,11 +156,7 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
154 num_bits_set = cpus_weight(cpumask); 156 num_bits_set = cpus_weight(cpumask);
155 /* Return id to all */ 157 /* Return id to all */
156 if (num_bits_set == NR_CPUS) 158 if (num_bits_set == NR_CPUS)
157#if defined CONFIG_ES7000_CLUSTERED_APIC
158 return 0xFF; 159 return 0xFF;
159#else
160 return cpu_to_logical_apicid(0);
161#endif
162 /* 160 /*
163 * The cpus in the mask must all be on the apic cluster. If are not 161 * The cpus in the mask must all be on the apic cluster. If are not
164 * on the same apicid cluster return default value of TARGET_CPUS. 162 * on the same apicid cluster return default value of TARGET_CPUS.
@@ -171,11 +169,40 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
171 if (apicid_cluster(apicid) != 169 if (apicid_cluster(apicid) !=
172 apicid_cluster(new_apicid)){ 170 apicid_cluster(new_apicid)){
173 printk ("%s: Not a valid mask!\n", __func__); 171 printk ("%s: Not a valid mask!\n", __func__);
174#if defined CONFIG_ES7000_CLUSTERED_APIC
175 return 0xFF; 172 return 0xFF;
176#else 173 }
174 apicid = new_apicid;
175 cpus_found++;
176 }
177 cpu++;
178 }
179 return apicid;
180}
181
182static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
183{
184 int num_bits_set;
185 int cpus_found = 0;
186 int cpu;
187 int apicid;
188
189 num_bits_set = cpus_weight(cpumask);
190 /* Return id to all */
191 if (num_bits_set == NR_CPUS)
192 return cpu_to_logical_apicid(0);
193 /*
194 * The cpus in the mask must all be on the apic cluster. If are not
195 * on the same apicid cluster return default value of TARGET_CPUS.
196 */
197 cpu = first_cpu(cpumask);
198 apicid = cpu_to_logical_apicid(cpu);
199 while (cpus_found < num_bits_set) {
200 if (cpu_isset(cpu, cpumask)) {
201 int new_apicid = cpu_to_logical_apicid(cpu);
202 if (apicid_cluster(apicid) !=
203 apicid_cluster(new_apicid)){
204 printk ("%s: Not a valid mask!\n", __func__);
177 return cpu_to_logical_apicid(0); 205 return cpu_to_logical_apicid(0);
178#endif
179 } 206 }
180 apicid = new_apicid; 207 apicid = new_apicid;
181 cpus_found++; 208 cpus_found++;
diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h
index 398493461913..78f0daaee436 100644
--- a/arch/x86/include/asm/es7000/wakecpu.h
+++ b/arch/x86/include/asm/es7000/wakecpu.h
@@ -1,36 +1,12 @@
1#ifndef __ASM_ES7000_WAKECPU_H 1#ifndef __ASM_ES7000_WAKECPU_H
2#define __ASM_ES7000_WAKECPU_H 2#define __ASM_ES7000_WAKECPU_H
3 3
4/* 4#define TRAMPOLINE_PHYS_LOW 0x467
5 * This file copes with machines that wakeup secondary CPUs by the 5#define TRAMPOLINE_PHYS_HIGH 0x469
6 * INIT, INIT, STARTUP sequence.
7 */
8
9#ifdef CONFIG_ES7000_CLUSTERED_APIC
10#define WAKE_SECONDARY_VIA_MIP
11#else
12#define WAKE_SECONDARY_VIA_INIT
13#endif
14
15#ifdef WAKE_SECONDARY_VIA_MIP
16extern int es7000_start_cpu(int cpu, unsigned long eip);
17static inline int
18wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
19{
20 int boot_error = 0;
21 boot_error = es7000_start_cpu(phys_apicid, start_eip);
22 return boot_error;
23}
24#endif
25
26#define TRAMPOLINE_LOW phys_to_virt(0x467)
27#define TRAMPOLINE_HIGH phys_to_virt(0x469)
28
29#define boot_cpu_apicid boot_cpu_physical_apicid
30 6
31static inline void wait_for_init_deassert(atomic_t *deassert) 7static inline void wait_for_init_deassert(atomic_t *deassert)
32{ 8{
33#ifdef WAKE_SECONDARY_VIA_INIT 9#ifndef CONFIG_ES7000_CLUSTERED_APIC
34 while (!atomic_read(deassert)) 10 while (!atomic_read(deassert))
35 cpu_relax(); 11 cpu_relax();
36#endif 12#endif
@@ -50,9 +26,12 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
50{ 26{
51} 27}
52 28
53#define inquire_remote_apic(apicid) do { \ 29extern void __inquire_remote_apic(int apicid);
54 if (apic_verbosity >= APIC_DEBUG) \ 30
55 __inquire_remote_apic(apicid); \ 31static inline void inquire_remote_apic(int apicid)
56 } while (0) 32{
33 if (apic_verbosity >= APIC_DEBUG)
34 __inquire_remote_apic(apicid);
35}
57 36
58#endif /* __ASM_MACH_WAKECPU_H */ 37#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
index 5cbd4fcc06fd..0ac17d33a8c7 100644
--- a/arch/x86/include/asm/genapic_32.h
+++ b/arch/x86/include/asm/genapic_32.h
@@ -2,6 +2,7 @@
2#define _ASM_X86_GENAPIC_32_H 2#define _ASM_X86_GENAPIC_32_H
3 3
4#include <asm/mpspec.h> 4#include <asm/mpspec.h>
5#include <asm/atomic.h>
5 6
6/* 7/*
7 * Generic APIC driver interface. 8 * Generic APIC driver interface.
@@ -65,6 +66,14 @@ struct genapic {
65 void (*send_IPI_allbutself)(int vector); 66 void (*send_IPI_allbutself)(int vector);
66 void (*send_IPI_all)(int vector); 67 void (*send_IPI_all)(int vector);
67#endif 68#endif
69 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
70 int trampoline_phys_low;
71 int trampoline_phys_high;
72 void (*wait_for_init_deassert)(atomic_t *deassert);
73 void (*smp_callin_clear_local_apic)(void);
74 void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
75 void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
76 void (*inquire_remote_apic)(int apicid);
68}; 77};
69 78
70#define APICFUNC(x) .x = x, 79#define APICFUNC(x) .x = x,
@@ -105,16 +114,24 @@ struct genapic {
105 APICFUNC(get_apic_id) \ 114 APICFUNC(get_apic_id) \
106 .apic_id_mask = APIC_ID_MASK, \ 115 .apic_id_mask = APIC_ID_MASK, \
107 APICFUNC(cpu_mask_to_apicid) \ 116 APICFUNC(cpu_mask_to_apicid) \
108 APICFUNC(vector_allocation_domain) \ 117 APICFUNC(vector_allocation_domain) \
109 APICFUNC(acpi_madt_oem_check) \ 118 APICFUNC(acpi_madt_oem_check) \
110 IPIFUNC(send_IPI_mask) \ 119 IPIFUNC(send_IPI_mask) \
111 IPIFUNC(send_IPI_allbutself) \ 120 IPIFUNC(send_IPI_allbutself) \
112 IPIFUNC(send_IPI_all) \ 121 IPIFUNC(send_IPI_all) \
113 APICFUNC(enable_apic_mode) \ 122 APICFUNC(enable_apic_mode) \
114 APICFUNC(phys_pkg_id) \ 123 APICFUNC(phys_pkg_id) \
124 .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \
125 .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \
126 APICFUNC(wait_for_init_deassert) \
127 APICFUNC(smp_callin_clear_local_apic) \
128 APICFUNC(store_NMI_vector) \
129 APICFUNC(restore_NMI_vector) \
130 APICFUNC(inquire_remote_apic) \
115} 131}
116 132
117extern struct genapic *genapic; 133extern struct genapic *genapic;
134extern void es7000_update_genapic_to_cluster(void);
118 135
119enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; 136enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
120#define get_uv_system_type() UV_NONE 137#define get_uv_system_type() UV_NONE
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
index 13c4e96199ea..2cae011668b7 100644
--- a/arch/x86/include/asm/genapic_64.h
+++ b/arch/x86/include/asm/genapic_64.h
@@ -32,6 +32,8 @@ struct genapic {
32 unsigned int (*get_apic_id)(unsigned long x); 32 unsigned int (*get_apic_id)(unsigned long x);
33 unsigned long (*set_apic_id)(unsigned int id); 33 unsigned long (*set_apic_id)(unsigned int id);
34 unsigned long apic_id_mask; 34 unsigned long apic_id_mask;
35 /* wakeup_secondary_cpu */
36 int (*wakeup_cpu)(int apicid, unsigned long start_eip);
35}; 37};
36 38
37extern struct genapic *genapic; 39extern struct genapic *genapic;
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 6afd9933a7dd..25d527ca1362 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -188,17 +188,14 @@ extern void restore_IO_APIC_setup(void);
188extern void reinit_intr_remapped_IO_APIC(int); 188extern void reinit_intr_remapped_IO_APIC(int);
189#endif 189#endif
190 190
191extern int probe_nr_irqs(void); 191extern void probe_nr_irqs_gsi(void);
192 192
193#else /* !CONFIG_X86_IO_APIC */ 193#else /* !CONFIG_X86_IO_APIC */
194#define io_apic_assign_pci_irqs 0 194#define io_apic_assign_pci_irqs 0
195static const int timer_through_8259 = 0; 195static const int timer_through_8259 = 0;
196static inline void ioapic_init_mappings(void) { } 196static inline void ioapic_init_mappings(void) { }
197 197
198static inline int probe_nr_irqs(void) 198static inline void probe_nr_irqs_gsi(void) { }
199{
200 return NR_IRQS;
201}
202#endif 199#endif
203 200
204#endif /* _ASM_X86_IO_APIC_H */ 201#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 0005adb0f941..f7ff65032b9d 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -101,12 +101,23 @@
101#define LAST_VM86_IRQ 15 101#define LAST_VM86_IRQ 15
102#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) 102#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
103 103
104#define NR_IRQS_LEGACY 16
105
104#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) 106#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
107
108#ifndef CONFIG_SPARSE_IRQ
105# if NR_CPUS < MAX_IO_APICS 109# if NR_CPUS < MAX_IO_APICS
106# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) 110# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
107# else 111# else
108# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) 112# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
109# endif 113# endif
114#else
115# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
116# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
117# else
118# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
119# endif
120#endif
110 121
111#elif defined(CONFIG_X86_VOYAGER) 122#elif defined(CONFIG_X86_VOYAGER)
112 123
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
index ff3a6c236c00..6cb3a467e067 100644
--- a/arch/x86/include/asm/mach-default/mach_apic.h
+++ b/arch/x86/include/asm/mach-default/mach_apic.h
@@ -32,11 +32,13 @@ static inline cpumask_t target_cpus(void)
32#define vector_allocation_domain (genapic->vector_allocation_domain) 32#define vector_allocation_domain (genapic->vector_allocation_domain)
33#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) 33#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
34#define send_IPI_self (genapic->send_IPI_self) 34#define send_IPI_self (genapic->send_IPI_self)
35#define wakeup_secondary_cpu (genapic->wakeup_cpu)
35extern void setup_apic_routing(void); 36extern void setup_apic_routing(void);
36#else 37#else
37#define INT_DELIVERY_MODE dest_LowestPrio 38#define INT_DELIVERY_MODE dest_LowestPrio
38#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ 39#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
39#define TARGET_CPUS (target_cpus()) 40#define TARGET_CPUS (target_cpus())
41#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init
40/* 42/*
41 * Set up the logical destination ID. 43 * Set up the logical destination ID.
42 * 44 *
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h
index 9d80db91e992..ceb013660146 100644
--- a/arch/x86/include/asm/mach-default/mach_wakecpu.h
+++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h
@@ -1,17 +1,8 @@
1#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H 1#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
2#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H 2#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
3 3
4/* 4#define TRAMPOLINE_PHYS_LOW (0x467)
5 * This file copes with machines that wakeup secondary CPUs by the 5#define TRAMPOLINE_PHYS_HIGH (0x469)
6 * INIT, INIT, STARTUP sequence.
7 */
8
9#define WAKE_SECONDARY_VIA_INIT
10
11#define TRAMPOLINE_LOW phys_to_virt(0x467)
12#define TRAMPOLINE_HIGH phys_to_virt(0x469)
13
14#define boot_cpu_apicid boot_cpu_physical_apicid
15 6
16static inline void wait_for_init_deassert(atomic_t *deassert) 7static inline void wait_for_init_deassert(atomic_t *deassert)
17{ 8{
@@ -33,9 +24,12 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
33{ 24{
34} 25}
35 26
36#define inquire_remote_apic(apicid) do { \ 27extern void __inquire_remote_apic(int apicid);
37 if (apic_verbosity >= APIC_DEBUG) \ 28
38 __inquire_remote_apic(apicid); \ 29static inline void inquire_remote_apic(int apicid)
39 } while (0) 30{
31 if (apic_verbosity >= APIC_DEBUG)
32 __inquire_remote_apic(apicid);
33}
40 34
41#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */ 35#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/mach-default/smpboot_hooks.h b/arch/x86/include/asm/mach-default/smpboot_hooks.h
index dbab36d64d48..23bf52103b89 100644
--- a/arch/x86/include/asm/mach-default/smpboot_hooks.h
+++ b/arch/x86/include/asm/mach-default/smpboot_hooks.h
@@ -13,9 +13,11 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
13 CMOS_WRITE(0xa, 0xf); 13 CMOS_WRITE(0xa, 0xf);
14 local_flush_tlb(); 14 local_flush_tlb();
15 pr_debug("1.\n"); 15 pr_debug("1.\n");
16 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4; 16 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
17 start_eip >> 4;
17 pr_debug("2.\n"); 18 pr_debug("2.\n");
18 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; 19 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
20 start_eip & 0xf;
19 pr_debug("3.\n"); 21 pr_debug("3.\n");
20} 22}
21 23
@@ -32,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
32 */ 34 */
33 CMOS_WRITE(0, 0xf); 35 CMOS_WRITE(0, 0xf);
34 36
35 *((volatile long *) phys_to_virt(0x467)) = 0; 37 *((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
36} 38}
37 39
38static inline void __init smpboot_setup_io_apic(void) 40static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h
index 5180bd7478fb..e430f47df667 100644
--- a/arch/x86/include/asm/mach-generic/mach_apic.h
+++ b/arch/x86/include/asm/mach-generic/mach_apic.h
@@ -27,6 +27,7 @@
27#define vector_allocation_domain (genapic->vector_allocation_domain) 27#define vector_allocation_domain (genapic->vector_allocation_domain)
28#define enable_apic_mode (genapic->enable_apic_mode) 28#define enable_apic_mode (genapic->enable_apic_mode)
29#define phys_pkg_id (genapic->phys_pkg_id) 29#define phys_pkg_id (genapic->phys_pkg_id)
30#define wakeup_secondary_cpu (genapic->wakeup_cpu)
30 31
31extern void generic_bigsmp_probe(void); 32extern void generic_bigsmp_probe(void);
32 33
diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h
new file mode 100644
index 000000000000..1ab16b168c8a
--- /dev/null
+++ b/arch/x86/include/asm/mach-generic/mach_wakecpu.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
2#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
3
4#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low)
5#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high)
6#define wait_for_init_deassert (genapic->wait_for_init_deassert)
7#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic)
8#define store_NMI_vector (genapic->store_NMI_vector)
9#define restore_NMI_vector (genapic->restore_NMI_vector)
10#define inquire_remote_apic (genapic->inquire_remote_apic)
11
12#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h
index c577bda5b1c5..6f499df8eddb 100644
--- a/arch/x86/include/asm/numaq/wakecpu.h
+++ b/arch/x86/include/asm/numaq/wakecpu.h
@@ -3,12 +3,8 @@
3 3
4/* This file copes with machines that wakeup secondary CPUs by NMIs */ 4/* This file copes with machines that wakeup secondary CPUs by NMIs */
5 5
6#define WAKE_SECONDARY_VIA_NMI 6#define TRAMPOLINE_PHYS_LOW (0x8)
7 7#define TRAMPOLINE_PHYS_HIGH (0xa)
8#define TRAMPOLINE_LOW phys_to_virt(0x8)
9#define TRAMPOLINE_HIGH phys_to_virt(0xa)
10
11#define boot_cpu_apicid boot_cpu_logical_apicid
12 8
13/* We don't do anything here because we use NMI's to boot instead */ 9/* We don't do anything here because we use NMI's to boot instead */
14static inline void wait_for_init_deassert(atomic_t *deassert) 10static inline void wait_for_init_deassert(atomic_t *deassert)
@@ -27,17 +23,23 @@ static inline void smp_callin_clear_local_apic(void)
27static inline void store_NMI_vector(unsigned short *high, unsigned short *low) 23static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
28{ 24{
29 printk("Storing NMI vector\n"); 25 printk("Storing NMI vector\n");
30 *high = *((volatile unsigned short *) TRAMPOLINE_HIGH); 26 *high =
31 *low = *((volatile unsigned short *) TRAMPOLINE_LOW); 27 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH));
28 *low =
29 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW));
32} 30}
33 31
34static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) 32static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
35{ 33{
36 printk("Restoring NMI vector\n"); 34 printk("Restoring NMI vector\n");
37 *((volatile unsigned short *) TRAMPOLINE_HIGH) = *high; 35 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
38 *((volatile unsigned short *) TRAMPOLINE_LOW) = *low; 36 *high;
37 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
38 *low;
39} 39}
40 40
41#define inquire_remote_apic(apicid) {} 41static inline void inquire_remote_apic(int apicid)
42{
43}
42 44
43#endif /* __ASM_NUMAQ_WAKECPU_H */ 45#endif /* __ASM_NUMAQ_WAKECPU_H */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index f12d37237465..294daeb3a006 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -16,6 +16,8 @@ static inline void visws_early_detect(void) { }
16static inline int is_visws_box(void) { return 0; } 16static inline int is_visws_box(void) { return 0; }
17#endif 17#endif
18 18
19extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
20extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
19/* 21/*
20 * Any setup quirks to be performed? 22 * Any setup quirks to be performed?
21 */ 23 */
@@ -39,6 +41,7 @@ struct x86_quirks {
39 void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable, 41 void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable,
40 unsigned short oemsize); 42 unsigned short oemsize);
41 int (*setup_ioapic_ids)(void); 43 int (*setup_ioapic_ids)(void);
44 int (*update_genapic)(void);
42}; 45};
43 46
44extern struct x86_quirks *x86_quirks; 47extern struct x86_quirks *x86_quirks;
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 2ed3f0f44ff7..07c3e4048991 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -314,6 +314,8 @@ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
314 314
315void default_idle(void); 315void default_idle(void);
316 316
317void stop_this_cpu(void *dummy);
318
317/* 319/*
318 * Force strict CPU ordering. 320 * Force strict CPU ordering.
319 * And yes, this is required on UP too when we're talking 321 * And yes, this is required on UP too when we're talking
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 4c51a2f8fd31..65d0b72777ea 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1360,6 +1360,17 @@ static void __init acpi_process_madt(void)
1360 disable_acpi(); 1360 disable_acpi();
1361 } 1361 }
1362 } 1362 }
1363
1364 /*
1365 * ACPI supports both logical (e.g. Hyper-Threading) and physical
1366 * processors, where MPS only supports physical.
1367 */
1368 if (acpi_lapic && acpi_ioapic)
1369 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
1370 "information\n");
1371 else if (acpi_lapic)
1372 printk(KERN_INFO "Using ACPI for processor (LAPIC) "
1373 "configuration information\n");
1363#endif 1374#endif
1364 return; 1375 return;
1365} 1376}
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 5145a6e72bbb..3a26525a3f31 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -391,11 +391,7 @@ static int power_off;
391#else 391#else
392static int power_off = 1; 392static int power_off = 1;
393#endif 393#endif
394#ifdef CONFIG_APM_REAL_MODE_POWER_OFF
395static int realmode_power_off = 1;
396#else
397static int realmode_power_off; 394static int realmode_power_off;
398#endif
399#ifdef CONFIG_APM_ALLOW_INTS 395#ifdef CONFIG_APM_ALLOW_INTS
400static int allow_ints = 1; 396static int allow_ints = 1;
401#else 397#else
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c
index 0aa2c443d600..53699c931ad4 100644
--- a/arch/x86/kernel/es7000_32.c
+++ b/arch/x86/kernel/es7000_32.c
@@ -38,8 +38,11 @@
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/nmi.h> 39#include <asm/nmi.h>
40#include <asm/smp.h> 40#include <asm/smp.h>
41#include <asm/atomic.h>
41#include <asm/apicdef.h> 42#include <asm/apicdef.h>
42#include <mach_mpparse.h> 43#include <mach_mpparse.h>
44#include <asm/genapic.h>
45#include <asm/setup.h>
43 46
44/* 47/*
45 * ES7000 chipsets 48 * ES7000 chipsets
@@ -161,6 +164,43 @@ es7000_rename_gsi(int ioapic, int gsi)
161 return gsi; 164 return gsi;
162} 165}
163 166
167static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
168{
169 unsigned long vect = 0, psaival = 0;
170
171 if (psai == NULL)
172 return -1;
173
174 vect = ((unsigned long)__pa(eip)/0x1000) << 16;
175 psaival = (0x1000000 | vect | cpu);
176
177 while (*psai & 0x1000000)
178 ;
179
180 *psai = psaival;
181
182 return 0;
183}
184
185static void noop_wait_for_deassert(atomic_t *deassert_not_used)
186{
187}
188
189static int __init es7000_update_genapic(void)
190{
191 genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
192
193 /* MPENTIUMIII */
194 if (boot_cpu_data.x86 == 6 &&
195 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) {
196 es7000_update_genapic_to_cluster();
197 genapic->wait_for_init_deassert = noop_wait_for_deassert;
198 genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
199 }
200
201 return 0;
202}
203
164void __init 204void __init
165setup_unisys(void) 205setup_unisys(void)
166{ 206{
@@ -176,6 +216,8 @@ setup_unisys(void)
176 else 216 else
177 es7000_plat = ES7000_CLASSIC; 217 es7000_plat = ES7000_CLASSIC;
178 ioapic_renumber_irq = es7000_rename_gsi; 218 ioapic_renumber_irq = es7000_rename_gsi;
219
220 x86_quirks->update_genapic = es7000_update_genapic;
179} 221}
180 222
181/* 223/*
@@ -317,26 +359,6 @@ es7000_mip_write(struct mip_reg *mip_reg)
317 return status; 359 return status;
318} 360}
319 361
320int
321es7000_start_cpu(int cpu, unsigned long eip)
322{
323 unsigned long vect = 0, psaival = 0;
324
325 if (psai == NULL)
326 return -1;
327
328 vect = ((unsigned long)__pa(eip)/0x1000) << 16;
329 psaival = (0x1000000 | vect | cpu);
330
331 while (*psai & 0x1000000)
332 ;
333
334 *psai = psaival;
335
336 return 0;
337
338}
339
340void __init 362void __init
341es7000_sw_apic(void) 363es7000_sw_apic(void)
342{ 364{
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 6c9bfc9e1e95..2bced78b0b8e 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -21,6 +21,7 @@
21#include <asm/smp.h> 21#include <asm/smp.h>
22#include <asm/ipi.h> 22#include <asm/ipi.h>
23#include <asm/genapic.h> 23#include <asm/genapic.h>
24#include <asm/setup.h>
24 25
25extern struct genapic apic_flat; 26extern struct genapic apic_flat;
26extern struct genapic apic_physflat; 27extern struct genapic apic_physflat;
@@ -53,6 +54,9 @@ void __init setup_apic_routing(void)
53 genapic = &apic_physflat; 54 genapic = &apic_physflat;
54 printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name); 55 printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
55 } 56 }
57
58 if (x86_quirks->update_genapic)
59 x86_quirks->update_genapic();
56} 60}
57 61
58/* Same for both flat and physical. */ 62/* Same for both flat and physical. */
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 9043251210fb..a1a2e070f31a 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -108,8 +108,33 @@ static int __init parse_noapic(char *str)
108early_param("noapic", parse_noapic); 108early_param("noapic", parse_noapic);
109 109
110struct irq_pin_list; 110struct irq_pin_list;
111
112/*
113 * This is performance-critical, we want to do it O(1)
114 *
115 * the indexing order of this array favors 1:1 mappings
116 * between pins and IRQs.
117 */
118
119struct irq_pin_list {
120 int apic, pin;
121 struct irq_pin_list *next;
122};
123
124static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
125{
126 struct irq_pin_list *pin;
127 int node;
128
129 node = cpu_to_node(cpu);
130
131 pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
132 printk(KERN_DEBUG " alloc irq_2_pin on cpu %d node %d\n", cpu, node);
133
134 return pin;
135}
136
111struct irq_cfg { 137struct irq_cfg {
112 unsigned int irq;
113 struct irq_pin_list *irq_2_pin; 138 struct irq_pin_list *irq_2_pin;
114 cpumask_t domain; 139 cpumask_t domain;
115 cpumask_t old_domain; 140 cpumask_t old_domain;
@@ -119,81 +144,95 @@ struct irq_cfg {
119}; 144};
120 145
121/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 146/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
147#ifdef CONFIG_SPARSE_IRQ
148static struct irq_cfg irq_cfgx[] = {
149#else
122static struct irq_cfg irq_cfgx[NR_IRQS] = { 150static struct irq_cfg irq_cfgx[NR_IRQS] = {
123 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, 151#endif
124 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, 152 [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
125 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, 153 [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
126 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, 154 [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
127 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, 155 [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
128 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, 156 [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
129 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, 157 [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
130 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, 158 [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
131 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, 159 [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
132 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, 160 [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
133 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, 161 [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
134 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, 162 [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
135 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, 163 [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
136 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, 164 [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
137 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, 165 [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
138 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, 166 [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
167 [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
139}; 168};
140 169
141#define for_each_irq_cfg(irq, cfg) \ 170void __init arch_early_irq_init(void)
142 for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++)
143
144static struct irq_cfg *irq_cfg(unsigned int irq)
145{ 171{
146 return irq < nr_irqs ? irq_cfgx + irq : NULL; 172 struct irq_cfg *cfg;
173 struct irq_desc *desc;
174 int count;
175 int i;
176
177 cfg = irq_cfgx;
178 count = ARRAY_SIZE(irq_cfgx);
179
180 for (i = 0; i < count; i++) {
181 desc = irq_to_desc(i);
182 desc->chip_data = &cfg[i];
183 }
147} 184}
148 185
149static struct irq_cfg *irq_cfg_alloc(unsigned int irq) 186#ifdef CONFIG_SPARSE_IRQ
187static struct irq_cfg *irq_cfg(unsigned int irq)
150{ 188{
151 return irq_cfg(irq); 189 struct irq_cfg *cfg = NULL;
190 struct irq_desc *desc;
191
192 desc = irq_to_desc(irq);
193 if (desc)
194 cfg = desc->chip_data;
195
196 return cfg;
152} 197}
153 198
154/* 199static struct irq_cfg *get_one_free_irq_cfg(int cpu)
155 * Rough estimation of how many shared IRQs there are, can be changed 200{
156 * anytime. 201 struct irq_cfg *cfg;
157 */ 202 int node;
158#define MAX_PLUS_SHARED_IRQS NR_IRQS
159#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
160 203
161/* 204 node = cpu_to_node(cpu);
162 * This is performance-critical, we want to do it O(1)
163 *
164 * the indexing order of this array favors 1:1 mappings
165 * between pins and IRQs.
166 */
167 205
168struct irq_pin_list { 206 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
169 int apic, pin; 207 printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
170 struct irq_pin_list *next;
171};
172 208
173static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE]; 209 return cfg;
174static struct irq_pin_list *irq_2_pin_ptr; 210}
175 211
176static void __init irq_2_pin_init(void) 212void arch_init_chip_data(struct irq_desc *desc, int cpu)
177{ 213{
178 struct irq_pin_list *pin = irq_2_pin_head; 214 struct irq_cfg *cfg;
179 int i;
180
181 for (i = 1; i < PIN_MAP_SIZE; i++)
182 pin[i-1].next = &pin[i];
183 215
184 irq_2_pin_ptr = &pin[0]; 216 cfg = desc->chip_data;
217 if (!cfg) {
218 desc->chip_data = get_one_free_irq_cfg(cpu);
219 if (!desc->chip_data) {
220 printk(KERN_ERR "can not alloc irq_cfg\n");
221 BUG_ON(1);
222 }
223 }
185} 224}
186 225
187static struct irq_pin_list *get_one_free_irq_2_pin(void) 226#else
227static struct irq_cfg *irq_cfg(unsigned int irq)
188{ 228{
189 struct irq_pin_list *pin = irq_2_pin_ptr; 229 return irq < nr_irqs ? irq_cfgx + irq : NULL;
230}
190 231
191 if (!pin) 232#endif
192 panic("can not get more irq_2_pin\n");
193 233
194 irq_2_pin_ptr = pin->next; 234static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask)
195 pin->next = NULL; 235{
196 return pin;
197} 236}
198 237
199struct io_apic { 238struct io_apic {
@@ -237,11 +276,10 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
237 writel(value, &io_apic->data); 276 writel(value, &io_apic->data);
238} 277}
239 278
240static bool io_apic_level_ack_pending(unsigned int irq) 279static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
241{ 280{
242 struct irq_pin_list *entry; 281 struct irq_pin_list *entry;
243 unsigned long flags; 282 unsigned long flags;
244 struct irq_cfg *cfg = irq_cfg(irq);
245 283
246 spin_lock_irqsave(&ioapic_lock, flags); 284 spin_lock_irqsave(&ioapic_lock, flags);
247 entry = cfg->irq_2_pin; 285 entry = cfg->irq_2_pin;
@@ -323,13 +361,12 @@ static void ioapic_mask_entry(int apic, int pin)
323} 361}
324 362
325#ifdef CONFIG_SMP 363#ifdef CONFIG_SMP
326static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector) 364static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
327{ 365{
328 int apic, pin; 366 int apic, pin;
329 struct irq_cfg *cfg;
330 struct irq_pin_list *entry; 367 struct irq_pin_list *entry;
368 u8 vector = cfg->vector;
331 369
332 cfg = irq_cfg(irq);
333 entry = cfg->irq_2_pin; 370 entry = cfg->irq_2_pin;
334 for (;;) { 371 for (;;) {
335 unsigned int reg; 372 unsigned int reg;
@@ -359,24 +396,27 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
359 } 396 }
360} 397}
361 398
362static int assign_irq_vector(int irq, cpumask_t mask); 399static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask);
363 400
364static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 401static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
365{ 402{
366 struct irq_cfg *cfg; 403 struct irq_cfg *cfg;
367 unsigned long flags; 404 unsigned long flags;
368 unsigned int dest; 405 unsigned int dest;
369 cpumask_t tmp; 406 cpumask_t tmp;
370 struct irq_desc *desc; 407 unsigned int irq;
371 408
372 cpus_and(tmp, mask, cpu_online_map); 409 cpus_and(tmp, mask, cpu_online_map);
373 if (cpus_empty(tmp)) 410 if (cpus_empty(tmp))
374 return; 411 return;
375 412
376 cfg = irq_cfg(irq); 413 irq = desc->irq;
377 if (assign_irq_vector(irq, mask)) 414 cfg = desc->chip_data;
415 if (assign_irq_vector(irq, cfg, mask))
378 return; 416 return;
379 417
418 set_extra_move_desc(desc, mask);
419
380 cpus_and(tmp, cfg->domain, mask); 420 cpus_and(tmp, cfg->domain, mask);
381 dest = cpu_mask_to_apicid(tmp); 421 dest = cpu_mask_to_apicid(tmp);
382 /* 422 /*
@@ -384,12 +424,20 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
384 */ 424 */
385 dest = SET_APIC_LOGICAL_ID(dest); 425 dest = SET_APIC_LOGICAL_ID(dest);
386 426
387 desc = irq_to_desc(irq);
388 spin_lock_irqsave(&ioapic_lock, flags); 427 spin_lock_irqsave(&ioapic_lock, flags);
389 __target_IO_APIC_irq(irq, dest, cfg->vector); 428 __target_IO_APIC_irq(irq, dest, cfg);
390 desc->affinity = mask; 429 desc->affinity = mask;
391 spin_unlock_irqrestore(&ioapic_lock, flags); 430 spin_unlock_irqrestore(&ioapic_lock, flags);
392} 431}
432
433static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
434{
435 struct irq_desc *desc;
436
437 desc = irq_to_desc(irq);
438
439 set_ioapic_affinity_irq_desc(desc, mask);
440}
393#endif /* CONFIG_SMP */ 441#endif /* CONFIG_SMP */
394 442
395/* 443/*
@@ -397,16 +445,18 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
397 * shared ISA-space IRQs, so we have to support them. We are super 445 * shared ISA-space IRQs, so we have to support them. We are super
398 * fast in the common case, and fast for shared ISA-space IRQs. 446 * fast in the common case, and fast for shared ISA-space IRQs.
399 */ 447 */
400static void add_pin_to_irq(unsigned int irq, int apic, int pin) 448static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin)
401{ 449{
402 struct irq_cfg *cfg;
403 struct irq_pin_list *entry; 450 struct irq_pin_list *entry;
404 451
405 /* first time to refer irq_cfg, so with new */
406 cfg = irq_cfg_alloc(irq);
407 entry = cfg->irq_2_pin; 452 entry = cfg->irq_2_pin;
408 if (!entry) { 453 if (!entry) {
409 entry = get_one_free_irq_2_pin(); 454 entry = get_one_free_irq_2_pin(cpu);
455 if (!entry) {
456 printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n",
457 apic, pin);
458 return;
459 }
410 cfg->irq_2_pin = entry; 460 cfg->irq_2_pin = entry;
411 entry->apic = apic; 461 entry->apic = apic;
412 entry->pin = pin; 462 entry->pin = pin;
@@ -421,7 +471,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
421 entry = entry->next; 471 entry = entry->next;
422 } 472 }
423 473
424 entry->next = get_one_free_irq_2_pin(); 474 entry->next = get_one_free_irq_2_pin(cpu);
425 entry = entry->next; 475 entry = entry->next;
426 entry->apic = apic; 476 entry->apic = apic;
427 entry->pin = pin; 477 entry->pin = pin;
@@ -430,11 +480,10 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
430/* 480/*
431 * Reroute an IRQ to a different pin. 481 * Reroute an IRQ to a different pin.
432 */ 482 */
433static void __init replace_pin_at_irq(unsigned int irq, 483static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu,
434 int oldapic, int oldpin, 484 int oldapic, int oldpin,
435 int newapic, int newpin) 485 int newapic, int newpin)
436{ 486{
437 struct irq_cfg *cfg = irq_cfg(irq);
438 struct irq_pin_list *entry = cfg->irq_2_pin; 487 struct irq_pin_list *entry = cfg->irq_2_pin;
439 int replaced = 0; 488 int replaced = 0;
440 489
@@ -451,18 +500,16 @@ static void __init replace_pin_at_irq(unsigned int irq,
451 500
452 /* why? call replace before add? */ 501 /* why? call replace before add? */
453 if (!replaced) 502 if (!replaced)
454 add_pin_to_irq(irq, newapic, newpin); 503 add_pin_to_irq_cpu(cfg, cpu, newapic, newpin);
455} 504}
456 505
457static inline void io_apic_modify_irq(unsigned int irq, 506static inline void io_apic_modify_irq(struct irq_cfg *cfg,
458 int mask_and, int mask_or, 507 int mask_and, int mask_or,
459 void (*final)(struct irq_pin_list *entry)) 508 void (*final)(struct irq_pin_list *entry))
460{ 509{
461 int pin; 510 int pin;
462 struct irq_cfg *cfg;
463 struct irq_pin_list *entry; 511 struct irq_pin_list *entry;
464 512
465 cfg = irq_cfg(irq);
466 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) { 513 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
467 unsigned int reg; 514 unsigned int reg;
468 pin = entry->pin; 515 pin = entry->pin;
@@ -475,9 +522,9 @@ static inline void io_apic_modify_irq(unsigned int irq,
475 } 522 }
476} 523}
477 524
478static void __unmask_IO_APIC_irq(unsigned int irq) 525static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
479{ 526{
480 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 0, NULL); 527 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
481} 528}
482 529
483#ifdef CONFIG_X86_64 530#ifdef CONFIG_X86_64
@@ -492,47 +539,64 @@ void io_apic_sync(struct irq_pin_list *entry)
492 readl(&io_apic->data); 539 readl(&io_apic->data);
493} 540}
494 541
495static void __mask_IO_APIC_irq(unsigned int irq) 542static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
496{ 543{
497 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 544 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
498} 545}
499#else /* CONFIG_X86_32 */ 546#else /* CONFIG_X86_32 */
500static void __mask_IO_APIC_irq(unsigned int irq) 547static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
501{ 548{
502 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, NULL); 549 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL);
503} 550}
504 551
505static void __mask_and_edge_IO_APIC_irq(unsigned int irq) 552static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
506{ 553{
507 io_apic_modify_irq(irq, ~IO_APIC_REDIR_LEVEL_TRIGGER, 554 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER,
508 IO_APIC_REDIR_MASKED, NULL); 555 IO_APIC_REDIR_MASKED, NULL);
509} 556}
510 557
511static void __unmask_and_level_IO_APIC_irq(unsigned int irq) 558static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
512{ 559{
513 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 560 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
514 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 561 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
515} 562}
516#endif /* CONFIG_X86_32 */ 563#endif /* CONFIG_X86_32 */
517 564
518static void mask_IO_APIC_irq (unsigned int irq) 565static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
519{ 566{
567 struct irq_cfg *cfg = desc->chip_data;
520 unsigned long flags; 568 unsigned long flags;
521 569
570 BUG_ON(!cfg);
571
522 spin_lock_irqsave(&ioapic_lock, flags); 572 spin_lock_irqsave(&ioapic_lock, flags);
523 __mask_IO_APIC_irq(irq); 573 __mask_IO_APIC_irq(cfg);
524 spin_unlock_irqrestore(&ioapic_lock, flags); 574 spin_unlock_irqrestore(&ioapic_lock, flags);
525} 575}
526 576
527static void unmask_IO_APIC_irq (unsigned int irq) 577static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
528{ 578{
579 struct irq_cfg *cfg = desc->chip_data;
529 unsigned long flags; 580 unsigned long flags;
530 581
531 spin_lock_irqsave(&ioapic_lock, flags); 582 spin_lock_irqsave(&ioapic_lock, flags);
532 __unmask_IO_APIC_irq(irq); 583 __unmask_IO_APIC_irq(cfg);
533 spin_unlock_irqrestore(&ioapic_lock, flags); 584 spin_unlock_irqrestore(&ioapic_lock, flags);
534} 585}
535 586
587static void mask_IO_APIC_irq(unsigned int irq)
588{
589 struct irq_desc *desc = irq_to_desc(irq);
590
591 mask_IO_APIC_irq_desc(desc);
592}
593static void unmask_IO_APIC_irq(unsigned int irq)
594{
595 struct irq_desc *desc = irq_to_desc(irq);
596
597 unmask_IO_APIC_irq_desc(desc);
598}
599
536static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 600static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
537{ 601{
538 struct IO_APIC_route_entry entry; 602 struct IO_APIC_route_entry entry;
@@ -809,7 +873,7 @@ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
809 */ 873 */
810static int EISA_ELCR(unsigned int irq) 874static int EISA_ELCR(unsigned int irq)
811{ 875{
812 if (irq < 16) { 876 if (irq < NR_IRQS_LEGACY) {
813 unsigned int port = 0x4d0 + (irq >> 3); 877 unsigned int port = 0x4d0 + (irq >> 3);
814 return (inb(port) >> (irq & 7)) & 1; 878 return (inb(port) >> (irq & 7)) & 1;
815 } 879 }
@@ -1034,7 +1098,7 @@ void unlock_vector_lock(void)
1034 spin_unlock(&vector_lock); 1098 spin_unlock(&vector_lock);
1035} 1099}
1036 1100
1037static int __assign_irq_vector(int irq, cpumask_t mask) 1101static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
1038{ 1102{
1039 /* 1103 /*
1040 * NOTE! The local APIC isn't very good at handling 1104 * NOTE! The local APIC isn't very good at handling
@@ -1050,16 +1114,13 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
1050 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1114 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1051 unsigned int old_vector; 1115 unsigned int old_vector;
1052 int cpu; 1116 int cpu;
1053 struct irq_cfg *cfg;
1054 1117
1055 cfg = irq_cfg(irq); 1118 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1119 return -EBUSY;
1056 1120
1057 /* Only try and allocate irqs on cpus that are present */ 1121 /* Only try and allocate irqs on cpus that are present */
1058 cpus_and(mask, mask, cpu_online_map); 1122 cpus_and(mask, mask, cpu_online_map);
1059 1123
1060 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1061 return -EBUSY;
1062
1063 old_vector = cfg->vector; 1124 old_vector = cfg->vector;
1064 if (old_vector) { 1125 if (old_vector) {
1065 cpumask_t tmp; 1126 cpumask_t tmp;
@@ -1113,24 +1174,22 @@ next:
1113 return -ENOSPC; 1174 return -ENOSPC;
1114} 1175}
1115 1176
1116static int assign_irq_vector(int irq, cpumask_t mask) 1177static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
1117{ 1178{
1118 int err; 1179 int err;
1119 unsigned long flags; 1180 unsigned long flags;
1120 1181
1121 spin_lock_irqsave(&vector_lock, flags); 1182 spin_lock_irqsave(&vector_lock, flags);
1122 err = __assign_irq_vector(irq, mask); 1183 err = __assign_irq_vector(irq, cfg, mask);
1123 spin_unlock_irqrestore(&vector_lock, flags); 1184 spin_unlock_irqrestore(&vector_lock, flags);
1124 return err; 1185 return err;
1125} 1186}
1126 1187
1127static void __clear_irq_vector(int irq) 1188static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1128{ 1189{
1129 struct irq_cfg *cfg;
1130 cpumask_t mask; 1190 cpumask_t mask;
1131 int cpu, vector; 1191 int cpu, vector;
1132 1192
1133 cfg = irq_cfg(irq);
1134 BUG_ON(!cfg->vector); 1193 BUG_ON(!cfg->vector);
1135 1194
1136 vector = cfg->vector; 1195 vector = cfg->vector;
@@ -1162,9 +1221,13 @@ void __setup_vector_irq(int cpu)
1162 /* This function must be called with vector_lock held */ 1221 /* This function must be called with vector_lock held */
1163 int irq, vector; 1222 int irq, vector;
1164 struct irq_cfg *cfg; 1223 struct irq_cfg *cfg;
1224 struct irq_desc *desc;
1165 1225
1166 /* Mark the inuse vectors */ 1226 /* Mark the inuse vectors */
1167 for_each_irq_cfg(irq, cfg) { 1227 for_each_irq_desc(irq, desc) {
1228 if (!desc)
1229 continue;
1230 cfg = desc->chip_data;
1168 if (!cpu_isset(cpu, cfg->domain)) 1231 if (!cpu_isset(cpu, cfg->domain))
1169 continue; 1232 continue;
1170 vector = cfg->vector; 1233 vector = cfg->vector;
@@ -1215,11 +1278,8 @@ static inline int IO_APIC_irq_trigger(int irq)
1215} 1278}
1216#endif 1279#endif
1217 1280
1218static void ioapic_register_intr(int irq, unsigned long trigger) 1281static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
1219{ 1282{
1220 struct irq_desc *desc;
1221
1222 desc = irq_to_desc(irq);
1223 1283
1224 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1284 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1225 trigger == IOAPIC_LEVEL) 1285 trigger == IOAPIC_LEVEL)
@@ -1311,7 +1371,7 @@ static int setup_ioapic_entry(int apic, int irq,
1311 return 0; 1371 return 0;
1312} 1372}
1313 1373
1314static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, 1374static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc,
1315 int trigger, int polarity) 1375 int trigger, int polarity)
1316{ 1376{
1317 struct irq_cfg *cfg; 1377 struct irq_cfg *cfg;
@@ -1321,10 +1381,10 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1321 if (!IO_APIC_IRQ(irq)) 1381 if (!IO_APIC_IRQ(irq))
1322 return; 1382 return;
1323 1383
1324 cfg = irq_cfg(irq); 1384 cfg = desc->chip_data;
1325 1385
1326 mask = TARGET_CPUS; 1386 mask = TARGET_CPUS;
1327 if (assign_irq_vector(irq, mask)) 1387 if (assign_irq_vector(irq, cfg, mask))
1328 return; 1388 return;
1329 1389
1330 cpus_and(mask, cfg->domain, mask); 1390 cpus_and(mask, cfg->domain, mask);
@@ -1341,12 +1401,12 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1341 cfg->vector)) { 1401 cfg->vector)) {
1342 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1402 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1343 mp_ioapics[apic].mp_apicid, pin); 1403 mp_ioapics[apic].mp_apicid, pin);
1344 __clear_irq_vector(irq); 1404 __clear_irq_vector(irq, cfg);
1345 return; 1405 return;
1346 } 1406 }
1347 1407
1348 ioapic_register_intr(irq, trigger); 1408 ioapic_register_intr(irq, desc, trigger);
1349 if (irq < 16) 1409 if (irq < NR_IRQS_LEGACY)
1350 disable_8259A_irq(irq); 1410 disable_8259A_irq(irq);
1351 1411
1352 ioapic_write_entry(apic, pin, entry); 1412 ioapic_write_entry(apic, pin, entry);
@@ -1356,6 +1416,9 @@ static void __init setup_IO_APIC_irqs(void)
1356{ 1416{
1357 int apic, pin, idx, irq; 1417 int apic, pin, idx, irq;
1358 int notcon = 0; 1418 int notcon = 0;
1419 struct irq_desc *desc;
1420 struct irq_cfg *cfg;
1421 int cpu = boot_cpu_id;
1359 1422
1360 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1423 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1361 1424
@@ -1387,9 +1450,15 @@ static void __init setup_IO_APIC_irqs(void)
1387 if (multi_timer_check(apic, irq)) 1450 if (multi_timer_check(apic, irq))
1388 continue; 1451 continue;
1389#endif 1452#endif
1390 add_pin_to_irq(irq, apic, pin); 1453 desc = irq_to_desc_alloc_cpu(irq, cpu);
1454 if (!desc) {
1455 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1456 continue;
1457 }
1458 cfg = desc->chip_data;
1459 add_pin_to_irq_cpu(cfg, cpu, apic, pin);
1391 1460
1392 setup_IO_APIC_irq(apic, pin, irq, 1461 setup_IO_APIC_irq(apic, pin, irq, desc,
1393 irq_trigger(idx), irq_polarity(idx)); 1462 irq_trigger(idx), irq_polarity(idx));
1394 } 1463 }
1395 } 1464 }
@@ -1448,6 +1517,7 @@ __apicdebuginit(void) print_IO_APIC(void)
1448 union IO_APIC_reg_03 reg_03; 1517 union IO_APIC_reg_03 reg_03;
1449 unsigned long flags; 1518 unsigned long flags;
1450 struct irq_cfg *cfg; 1519 struct irq_cfg *cfg;
1520 struct irq_desc *desc;
1451 unsigned int irq; 1521 unsigned int irq;
1452 1522
1453 if (apic_verbosity == APIC_QUIET) 1523 if (apic_verbosity == APIC_QUIET)
@@ -1537,8 +1607,13 @@ __apicdebuginit(void) print_IO_APIC(void)
1537 } 1607 }
1538 } 1608 }
1539 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1609 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1540 for_each_irq_cfg(irq, cfg) { 1610 for_each_irq_desc(irq, desc) {
1541 struct irq_pin_list *entry = cfg->irq_2_pin; 1611 struct irq_pin_list *entry;
1612
1613 if (!desc)
1614 continue;
1615 cfg = desc->chip_data;
1616 entry = cfg->irq_2_pin;
1542 if (!entry) 1617 if (!entry)
1543 continue; 1618 continue;
1544 printk(KERN_DEBUG "IRQ%d ", irq); 1619 printk(KERN_DEBUG "IRQ%d ", irq);
@@ -2022,14 +2097,16 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2022{ 2097{
2023 int was_pending = 0; 2098 int was_pending = 0;
2024 unsigned long flags; 2099 unsigned long flags;
2100 struct irq_cfg *cfg;
2025 2101
2026 spin_lock_irqsave(&ioapic_lock, flags); 2102 spin_lock_irqsave(&ioapic_lock, flags);
2027 if (irq < 16) { 2103 if (irq < NR_IRQS_LEGACY) {
2028 disable_8259A_irq(irq); 2104 disable_8259A_irq(irq);
2029 if (i8259A_irq_pending(irq)) 2105 if (i8259A_irq_pending(irq))
2030 was_pending = 1; 2106 was_pending = 1;
2031 } 2107 }
2032 __unmask_IO_APIC_irq(irq); 2108 cfg = irq_cfg(irq);
2109 __unmask_IO_APIC_irq(cfg);
2033 spin_unlock_irqrestore(&ioapic_lock, flags); 2110 spin_unlock_irqrestore(&ioapic_lock, flags);
2034 2111
2035 return was_pending; 2112 return was_pending;
@@ -2092,35 +2169,37 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2092 * as simple as edge triggered migration and we can do the irq migration 2169 * as simple as edge triggered migration and we can do the irq migration
2093 * with a simple atomic update to IO-APIC RTE. 2170 * with a simple atomic update to IO-APIC RTE.
2094 */ 2171 */
2095static void migrate_ioapic_irq(int irq, cpumask_t mask) 2172static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
2096{ 2173{
2097 struct irq_cfg *cfg; 2174 struct irq_cfg *cfg;
2098 struct irq_desc *desc;
2099 cpumask_t tmp, cleanup_mask; 2175 cpumask_t tmp, cleanup_mask;
2100 struct irte irte; 2176 struct irte irte;
2101 int modify_ioapic_rte; 2177 int modify_ioapic_rte;
2102 unsigned int dest; 2178 unsigned int dest;
2103 unsigned long flags; 2179 unsigned long flags;
2180 unsigned int irq;
2104 2181
2105 cpus_and(tmp, mask, cpu_online_map); 2182 cpus_and(tmp, mask, cpu_online_map);
2106 if (cpus_empty(tmp)) 2183 if (cpus_empty(tmp))
2107 return; 2184 return;
2108 2185
2186 irq = desc->irq;
2109 if (get_irte(irq, &irte)) 2187 if (get_irte(irq, &irte))
2110 return; 2188 return;
2111 2189
2112 if (assign_irq_vector(irq, mask)) 2190 cfg = desc->chip_data;
2191 if (assign_irq_vector(irq, cfg, mask))
2113 return; 2192 return;
2114 2193
2115 cfg = irq_cfg(irq); 2194 set_extra_move_desc(desc, mask);
2195
2116 cpus_and(tmp, cfg->domain, mask); 2196 cpus_and(tmp, cfg->domain, mask);
2117 dest = cpu_mask_to_apicid(tmp); 2197 dest = cpu_mask_to_apicid(tmp);
2118 2198
2119 desc = irq_to_desc(irq);
2120 modify_ioapic_rte = desc->status & IRQ_LEVEL; 2199 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2121 if (modify_ioapic_rte) { 2200 if (modify_ioapic_rte) {
2122 spin_lock_irqsave(&ioapic_lock, flags); 2201 spin_lock_irqsave(&ioapic_lock, flags);
2123 __target_IO_APIC_irq(irq, dest, cfg->vector); 2202 __target_IO_APIC_irq(irq, dest, cfg);
2124 spin_unlock_irqrestore(&ioapic_lock, flags); 2203 spin_unlock_irqrestore(&ioapic_lock, flags);
2125 } 2204 }
2126 2205
@@ -2142,14 +2221,14 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask)
2142 desc->affinity = mask; 2221 desc->affinity = mask;
2143} 2222}
2144 2223
2145static int migrate_irq_remapped_level(int irq) 2224static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2146{ 2225{
2147 int ret = -1; 2226 int ret = -1;
2148 struct irq_desc *desc = irq_to_desc(irq); 2227 struct irq_cfg *cfg = desc->chip_data;
2149 2228
2150 mask_IO_APIC_irq(irq); 2229 mask_IO_APIC_irq_desc(desc);
2151 2230
2152 if (io_apic_level_ack_pending(irq)) { 2231 if (io_apic_level_ack_pending(cfg)) {
2153 /* 2232 /*
2154 * Interrupt in progress. Migrating irq now will change the 2233 * Interrupt in progress. Migrating irq now will change the
2155 * vector information in the IO-APIC RTE and that will confuse 2234 * vector information in the IO-APIC RTE and that will confuse
@@ -2161,14 +2240,15 @@ static int migrate_irq_remapped_level(int irq)
2161 } 2240 }
2162 2241
2163 /* everthing is clear. we have right of way */ 2242 /* everthing is clear. we have right of way */
2164 migrate_ioapic_irq(irq, desc->pending_mask); 2243 migrate_ioapic_irq_desc(desc, desc->pending_mask);
2165 2244
2166 ret = 0; 2245 ret = 0;
2167 desc->status &= ~IRQ_MOVE_PENDING; 2246 desc->status &= ~IRQ_MOVE_PENDING;
2168 cpus_clear(desc->pending_mask); 2247 cpus_clear(desc->pending_mask);
2169 2248
2170unmask: 2249unmask:
2171 unmask_IO_APIC_irq(irq); 2250 unmask_IO_APIC_irq_desc(desc);
2251
2172 return ret; 2252 return ret;
2173} 2253}
2174 2254
@@ -2178,6 +2258,9 @@ static void ir_irq_migration(struct work_struct *work)
2178 struct irq_desc *desc; 2258 struct irq_desc *desc;
2179 2259
2180 for_each_irq_desc(irq, desc) { 2260 for_each_irq_desc(irq, desc) {
2261 if (!desc)
2262 continue;
2263
2181 if (desc->status & IRQ_MOVE_PENDING) { 2264 if (desc->status & IRQ_MOVE_PENDING) {
2182 unsigned long flags; 2265 unsigned long flags;
2183 2266
@@ -2198,18 +2281,22 @@ static void ir_irq_migration(struct work_struct *work)
2198/* 2281/*
2199 * Migrates the IRQ destination in the process context. 2282 * Migrates the IRQ destination in the process context.
2200 */ 2283 */
2201static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 2284static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
2202{ 2285{
2203 struct irq_desc *desc = irq_to_desc(irq);
2204
2205 if (desc->status & IRQ_LEVEL) { 2286 if (desc->status & IRQ_LEVEL) {
2206 desc->status |= IRQ_MOVE_PENDING; 2287 desc->status |= IRQ_MOVE_PENDING;
2207 desc->pending_mask = mask; 2288 desc->pending_mask = mask;
2208 migrate_irq_remapped_level(irq); 2289 migrate_irq_remapped_level_desc(desc);
2209 return; 2290 return;
2210 } 2291 }
2211 2292
2212 migrate_ioapic_irq(irq, mask); 2293 migrate_ioapic_irq_desc(desc, mask);
2294}
2295static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
2296{
2297 struct irq_desc *desc = irq_to_desc(irq);
2298
2299 set_ir_ioapic_affinity_irq_desc(desc, mask);
2213} 2300}
2214#endif 2301#endif
2215 2302
@@ -2229,6 +2316,9 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2229 struct irq_cfg *cfg; 2316 struct irq_cfg *cfg;
2230 irq = __get_cpu_var(vector_irq)[vector]; 2317 irq = __get_cpu_var(vector_irq)[vector];
2231 2318
2319 if (irq == -1)
2320 continue;
2321
2232 desc = irq_to_desc(irq); 2322 desc = irq_to_desc(irq);
2233 if (!desc) 2323 if (!desc)
2234 continue; 2324 continue;
@@ -2250,9 +2340,10 @@ unlock:
2250 irq_exit(); 2340 irq_exit();
2251} 2341}
2252 2342
2253static void irq_complete_move(unsigned int irq) 2343static void irq_complete_move(struct irq_desc **descp)
2254{ 2344{
2255 struct irq_cfg *cfg = irq_cfg(irq); 2345 struct irq_desc *desc = *descp;
2346 struct irq_cfg *cfg = desc->chip_data;
2256 unsigned vector, me; 2347 unsigned vector, me;
2257 2348
2258 if (likely(!cfg->move_in_progress)) 2349 if (likely(!cfg->move_in_progress))
@@ -2270,8 +2361,9 @@ static void irq_complete_move(unsigned int irq)
2270 } 2361 }
2271} 2362}
2272#else 2363#else
2273static inline void irq_complete_move(unsigned int irq) {} 2364static inline void irq_complete_move(struct irq_desc **descp) {}
2274#endif 2365#endif
2366
2275#ifdef CONFIG_INTR_REMAP 2367#ifdef CONFIG_INTR_REMAP
2276static void ack_x2apic_level(unsigned int irq) 2368static void ack_x2apic_level(unsigned int irq)
2277{ 2369{
@@ -2282,11 +2374,14 @@ static void ack_x2apic_edge(unsigned int irq)
2282{ 2374{
2283 ack_x2APIC_irq(); 2375 ack_x2APIC_irq();
2284} 2376}
2377
2285#endif 2378#endif
2286 2379
2287static void ack_apic_edge(unsigned int irq) 2380static void ack_apic_edge(unsigned int irq)
2288{ 2381{
2289 irq_complete_move(irq); 2382 struct irq_desc *desc = irq_to_desc(irq);
2383
2384 irq_complete_move(&desc);
2290 move_native_irq(irq); 2385 move_native_irq(irq);
2291 ack_APIC_irq(); 2386 ack_APIC_irq();
2292} 2387}
@@ -2295,18 +2390,21 @@ atomic_t irq_mis_count;
2295 2390
2296static void ack_apic_level(unsigned int irq) 2391static void ack_apic_level(unsigned int irq)
2297{ 2392{
2393 struct irq_desc *desc = irq_to_desc(irq);
2394
2298#ifdef CONFIG_X86_32 2395#ifdef CONFIG_X86_32
2299 unsigned long v; 2396 unsigned long v;
2300 int i; 2397 int i;
2301#endif 2398#endif
2399 struct irq_cfg *cfg;
2302 int do_unmask_irq = 0; 2400 int do_unmask_irq = 0;
2303 2401
2304 irq_complete_move(irq); 2402 irq_complete_move(&desc);
2305#ifdef CONFIG_GENERIC_PENDING_IRQ 2403#ifdef CONFIG_GENERIC_PENDING_IRQ
2306 /* If we are moving the irq we need to mask it */ 2404 /* If we are moving the irq we need to mask it */
2307 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) { 2405 if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2308 do_unmask_irq = 1; 2406 do_unmask_irq = 1;
2309 mask_IO_APIC_irq(irq); 2407 mask_IO_APIC_irq_desc(desc);
2310 } 2408 }
2311#endif 2409#endif
2312 2410
@@ -2330,7 +2428,8 @@ static void ack_apic_level(unsigned int irq)
2330 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2428 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2331 * The idea is from Manfred Spraul. --macro 2429 * The idea is from Manfred Spraul. --macro
2332 */ 2430 */
2333 i = irq_cfg(irq)->vector; 2431 cfg = desc->chip_data;
2432 i = cfg->vector;
2334 2433
2335 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2434 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2336#endif 2435#endif
@@ -2369,17 +2468,18 @@ static void ack_apic_level(unsigned int irq)
2369 * accurate and is causing problems then it is a hardware bug 2468 * accurate and is causing problems then it is a hardware bug
2370 * and you can go talk to the chipset vendor about it. 2469 * and you can go talk to the chipset vendor about it.
2371 */ 2470 */
2372 if (!io_apic_level_ack_pending(irq)) 2471 cfg = desc->chip_data;
2472 if (!io_apic_level_ack_pending(cfg))
2373 move_masked_irq(irq); 2473 move_masked_irq(irq);
2374 unmask_IO_APIC_irq(irq); 2474 unmask_IO_APIC_irq_desc(desc);
2375 } 2475 }
2376 2476
2377#ifdef CONFIG_X86_32 2477#ifdef CONFIG_X86_32
2378 if (!(v & (1 << (i & 0x1f)))) { 2478 if (!(v & (1 << (i & 0x1f)))) {
2379 atomic_inc(&irq_mis_count); 2479 atomic_inc(&irq_mis_count);
2380 spin_lock(&ioapic_lock); 2480 spin_lock(&ioapic_lock);
2381 __mask_and_edge_IO_APIC_irq(irq); 2481 __mask_and_edge_IO_APIC_irq(cfg);
2382 __unmask_and_level_IO_APIC_irq(irq); 2482 __unmask_and_level_IO_APIC_irq(cfg);
2383 spin_unlock(&ioapic_lock); 2483 spin_unlock(&ioapic_lock);
2384 } 2484 }
2385#endif 2485#endif
@@ -2430,20 +2530,22 @@ static inline void init_IO_APIC_traps(void)
2430 * Also, we've got to be careful not to trash gate 2530 * Also, we've got to be careful not to trash gate
2431 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2531 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2432 */ 2532 */
2433 for_each_irq_cfg(irq, cfg) { 2533 for_each_irq_desc(irq, desc) {
2434 if (IO_APIC_IRQ(irq) && !cfg->vector) { 2534 if (!desc)
2535 continue;
2536
2537 cfg = desc->chip_data;
2538 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2435 /* 2539 /*
2436 * Hmm.. We don't have an entry for this, 2540 * Hmm.. We don't have an entry for this,
2437 * so default to an old-fashioned 8259 2541 * so default to an old-fashioned 8259
2438 * interrupt if we can.. 2542 * interrupt if we can..
2439 */ 2543 */
2440 if (irq < 16) 2544 if (irq < NR_IRQS_LEGACY)
2441 make_8259A_irq(irq); 2545 make_8259A_irq(irq);
2442 else { 2546 else
2443 desc = irq_to_desc(irq);
2444 /* Strange. Oh, well.. */ 2547 /* Strange. Oh, well.. */
2445 desc->chip = &no_irq_chip; 2548 desc->chip = &no_irq_chip;
2446 }
2447 } 2549 }
2448 } 2550 }
2449} 2551}
@@ -2468,7 +2570,7 @@ static void unmask_lapic_irq(unsigned int irq)
2468 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2570 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2469} 2571}
2470 2572
2471static void ack_lapic_irq (unsigned int irq) 2573static void ack_lapic_irq(unsigned int irq)
2472{ 2574{
2473 ack_APIC_irq(); 2575 ack_APIC_irq();
2474} 2576}
@@ -2480,11 +2582,8 @@ static struct irq_chip lapic_chip __read_mostly = {
2480 .ack = ack_lapic_irq, 2582 .ack = ack_lapic_irq,
2481}; 2583};
2482 2584
2483static void lapic_register_intr(int irq) 2585static void lapic_register_intr(int irq, struct irq_desc *desc)
2484{ 2586{
2485 struct irq_desc *desc;
2486
2487 desc = irq_to_desc(irq);
2488 desc->status &= ~IRQ_LEVEL; 2587 desc->status &= ~IRQ_LEVEL;
2489 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2588 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2490 "edge"); 2589 "edge");
@@ -2588,7 +2687,9 @@ int timer_through_8259 __initdata;
2588 */ 2687 */
2589static inline void __init check_timer(void) 2688static inline void __init check_timer(void)
2590{ 2689{
2591 struct irq_cfg *cfg = irq_cfg(0); 2690 struct irq_desc *desc = irq_to_desc(0);
2691 struct irq_cfg *cfg = desc->chip_data;
2692 int cpu = boot_cpu_id;
2592 int apic1, pin1, apic2, pin2; 2693 int apic1, pin1, apic2, pin2;
2593 unsigned long flags; 2694 unsigned long flags;
2594 unsigned int ver; 2695 unsigned int ver;
@@ -2603,7 +2704,7 @@ static inline void __init check_timer(void)
2603 * get/set the timer IRQ vector: 2704 * get/set the timer IRQ vector:
2604 */ 2705 */
2605 disable_8259A_irq(0); 2706 disable_8259A_irq(0);
2606 assign_irq_vector(0, TARGET_CPUS); 2707 assign_irq_vector(0, cfg, TARGET_CPUS);
2607 2708
2608 /* 2709 /*
2609 * As IRQ0 is to be enabled in the 8259A, the virtual 2710 * As IRQ0 is to be enabled in the 8259A, the virtual
@@ -2654,10 +2755,10 @@ static inline void __init check_timer(void)
2654 * Ok, does IRQ0 through the IOAPIC work? 2755 * Ok, does IRQ0 through the IOAPIC work?
2655 */ 2756 */
2656 if (no_pin1) { 2757 if (no_pin1) {
2657 add_pin_to_irq(0, apic1, pin1); 2758 add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
2658 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2759 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2659 } 2760 }
2660 unmask_IO_APIC_irq(0); 2761 unmask_IO_APIC_irq_desc(desc);
2661 if (timer_irq_works()) { 2762 if (timer_irq_works()) {
2662 if (nmi_watchdog == NMI_IO_APIC) { 2763 if (nmi_watchdog == NMI_IO_APIC) {
2663 setup_nmi(); 2764 setup_nmi();
@@ -2683,9 +2784,9 @@ static inline void __init check_timer(void)
2683 /* 2784 /*
2684 * legacy devices should be connected to IO APIC #0 2785 * legacy devices should be connected to IO APIC #0
2685 */ 2786 */
2686 replace_pin_at_irq(0, apic1, pin1, apic2, pin2); 2787 replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
2687 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2788 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2688 unmask_IO_APIC_irq(0); 2789 unmask_IO_APIC_irq_desc(desc);
2689 enable_8259A_irq(0); 2790 enable_8259A_irq(0);
2690 if (timer_irq_works()) { 2791 if (timer_irq_works()) {
2691 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2792 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2717,7 +2818,7 @@ static inline void __init check_timer(void)
2717 apic_printk(APIC_QUIET, KERN_INFO 2818 apic_printk(APIC_QUIET, KERN_INFO
2718 "...trying to set up timer as Virtual Wire IRQ...\n"); 2819 "...trying to set up timer as Virtual Wire IRQ...\n");
2719 2820
2720 lapic_register_intr(0); 2821 lapic_register_intr(0, desc);
2721 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2822 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2722 enable_8259A_irq(0); 2823 enable_8259A_irq(0);
2723 2824
@@ -2902,22 +3003,26 @@ unsigned int create_irq_nr(unsigned int irq_want)
2902 unsigned int irq; 3003 unsigned int irq;
2903 unsigned int new; 3004 unsigned int new;
2904 unsigned long flags; 3005 unsigned long flags;
2905 struct irq_cfg *cfg_new; 3006 struct irq_cfg *cfg_new = NULL;
2906 3007 int cpu = boot_cpu_id;
2907 irq_want = nr_irqs - 1; 3008 struct irq_desc *desc_new = NULL;
2908 3009
2909 irq = 0; 3010 irq = 0;
2910 spin_lock_irqsave(&vector_lock, flags); 3011 spin_lock_irqsave(&vector_lock, flags);
2911 for (new = irq_want; new > 0; new--) { 3012 for (new = irq_want; new < NR_IRQS; new++) {
2912 if (platform_legacy_irq(new)) 3013 if (platform_legacy_irq(new))
2913 continue; 3014 continue;
2914 cfg_new = irq_cfg(new); 3015
2915 if (cfg_new && cfg_new->vector != 0) 3016 desc_new = irq_to_desc_alloc_cpu(new, cpu);
3017 if (!desc_new) {
3018 printk(KERN_INFO "can not get irq_desc for %d\n", new);
3019 continue;
3020 }
3021 cfg_new = desc_new->chip_data;
3022
3023 if (cfg_new->vector != 0)
2916 continue; 3024 continue;
2917 /* check if need to create one */ 3025 if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0)
2918 if (!cfg_new)
2919 cfg_new = irq_cfg_alloc(new);
2920 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
2921 irq = new; 3026 irq = new;
2922 break; 3027 break;
2923 } 3028 }
@@ -2925,15 +3030,21 @@ unsigned int create_irq_nr(unsigned int irq_want)
2925 3030
2926 if (irq > 0) { 3031 if (irq > 0) {
2927 dynamic_irq_init(irq); 3032 dynamic_irq_init(irq);
3033 /* restore it, in case dynamic_irq_init clear it */
3034 if (desc_new)
3035 desc_new->chip_data = cfg_new;
2928 } 3036 }
2929 return irq; 3037 return irq;
2930} 3038}
2931 3039
3040static int nr_irqs_gsi = NR_IRQS_LEGACY;
2932int create_irq(void) 3041int create_irq(void)
2933{ 3042{
3043 unsigned int irq_want;
2934 int irq; 3044 int irq;
2935 3045
2936 irq = create_irq_nr(nr_irqs - 1); 3046 irq_want = nr_irqs_gsi;
3047 irq = create_irq_nr(irq_want);
2937 3048
2938 if (irq == 0) 3049 if (irq == 0)
2939 irq = -1; 3050 irq = -1;
@@ -2944,14 +3055,22 @@ int create_irq(void)
2944void destroy_irq(unsigned int irq) 3055void destroy_irq(unsigned int irq)
2945{ 3056{
2946 unsigned long flags; 3057 unsigned long flags;
3058 struct irq_cfg *cfg;
3059 struct irq_desc *desc;
2947 3060
3061 /* store it, in case dynamic_irq_cleanup clear it */
3062 desc = irq_to_desc(irq);
3063 cfg = desc->chip_data;
2948 dynamic_irq_cleanup(irq); 3064 dynamic_irq_cleanup(irq);
3065 /* connect back irq_cfg */
3066 if (desc)
3067 desc->chip_data = cfg;
2949 3068
2950#ifdef CONFIG_INTR_REMAP 3069#ifdef CONFIG_INTR_REMAP
2951 free_irte(irq); 3070 free_irte(irq);
2952#endif 3071#endif
2953 spin_lock_irqsave(&vector_lock, flags); 3072 spin_lock_irqsave(&vector_lock, flags);
2954 __clear_irq_vector(irq); 3073 __clear_irq_vector(irq, cfg);
2955 spin_unlock_irqrestore(&vector_lock, flags); 3074 spin_unlock_irqrestore(&vector_lock, flags);
2956} 3075}
2957 3076
@@ -2966,12 +3085,12 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
2966 unsigned dest; 3085 unsigned dest;
2967 cpumask_t tmp; 3086 cpumask_t tmp;
2968 3087
3088 cfg = irq_cfg(irq);
2969 tmp = TARGET_CPUS; 3089 tmp = TARGET_CPUS;
2970 err = assign_irq_vector(irq, tmp); 3090 err = assign_irq_vector(irq, cfg, tmp);
2971 if (err) 3091 if (err)
2972 return err; 3092 return err;
2973 3093
2974 cfg = irq_cfg(irq);
2975 cpus_and(tmp, cfg->domain, tmp); 3094 cpus_and(tmp, cfg->domain, tmp);
2976 dest = cpu_mask_to_apicid(tmp); 3095 dest = cpu_mask_to_apicid(tmp);
2977 3096
@@ -3029,35 +3148,35 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3029#ifdef CONFIG_SMP 3148#ifdef CONFIG_SMP
3030static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3149static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3031{ 3150{
3151 struct irq_desc *desc = irq_to_desc(irq);
3032 struct irq_cfg *cfg; 3152 struct irq_cfg *cfg;
3033 struct msi_msg msg; 3153 struct msi_msg msg;
3034 unsigned int dest; 3154 unsigned int dest;
3035 cpumask_t tmp; 3155 cpumask_t tmp;
3036 struct irq_desc *desc;
3037 3156
3038 cpus_and(tmp, mask, cpu_online_map); 3157 cpus_and(tmp, mask, cpu_online_map);
3039 if (cpus_empty(tmp)) 3158 if (cpus_empty(tmp))
3040 return; 3159 return;
3041 3160
3042 if (assign_irq_vector(irq, mask)) 3161 cfg = desc->chip_data;
3162 if (assign_irq_vector(irq, cfg, mask))
3043 return; 3163 return;
3044 3164
3045 cfg = irq_cfg(irq); 3165 set_extra_move_desc(desc, mask);
3166
3046 cpus_and(tmp, cfg->domain, mask); 3167 cpus_and(tmp, cfg->domain, mask);
3047 dest = cpu_mask_to_apicid(tmp); 3168 dest = cpu_mask_to_apicid(tmp);
3048 3169
3049 read_msi_msg(irq, &msg); 3170 read_msi_msg_desc(desc, &msg);
3050 3171
3051 msg.data &= ~MSI_DATA_VECTOR_MASK; 3172 msg.data &= ~MSI_DATA_VECTOR_MASK;
3052 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3173 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3053 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3174 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3054 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3175 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3055 3176
3056 write_msi_msg(irq, &msg); 3177 write_msi_msg_desc(desc, &msg);
3057 desc = irq_to_desc(irq);
3058 desc->affinity = mask; 3178 desc->affinity = mask;
3059} 3179}
3060
3061#ifdef CONFIG_INTR_REMAP 3180#ifdef CONFIG_INTR_REMAP
3062/* 3181/*
3063 * Migrate the MSI irq to another cpumask. This migration is 3182 * Migrate the MSI irq to another cpumask. This migration is
@@ -3065,11 +3184,11 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3065 */ 3184 */
3066static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) 3185static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3067{ 3186{
3187 struct irq_desc *desc = irq_to_desc(irq);
3068 struct irq_cfg *cfg; 3188 struct irq_cfg *cfg;
3069 unsigned int dest; 3189 unsigned int dest;
3070 cpumask_t tmp, cleanup_mask; 3190 cpumask_t tmp, cleanup_mask;
3071 struct irte irte; 3191 struct irte irte;
3072 struct irq_desc *desc;
3073 3192
3074 cpus_and(tmp, mask, cpu_online_map); 3193 cpus_and(tmp, mask, cpu_online_map);
3075 if (cpus_empty(tmp)) 3194 if (cpus_empty(tmp))
@@ -3078,10 +3197,12 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3078 if (get_irte(irq, &irte)) 3197 if (get_irte(irq, &irte))
3079 return; 3198 return;
3080 3199
3081 if (assign_irq_vector(irq, mask)) 3200 cfg = desc->chip_data;
3201 if (assign_irq_vector(irq, cfg, mask))
3082 return; 3202 return;
3083 3203
3084 cfg = irq_cfg(irq); 3204 set_extra_move_desc(desc, mask);
3205
3085 cpus_and(tmp, cfg->domain, mask); 3206 cpus_and(tmp, cfg->domain, mask);
3086 dest = cpu_mask_to_apicid(tmp); 3207 dest = cpu_mask_to_apicid(tmp);
3087 3208
@@ -3105,9 +3226,9 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3105 cfg->move_in_progress = 0; 3226 cfg->move_in_progress = 0;
3106 } 3227 }
3107 3228
3108 desc = irq_to_desc(irq);
3109 desc->affinity = mask; 3229 desc->affinity = mask;
3110} 3230}
3231
3111#endif 3232#endif
3112#endif /* CONFIG_SMP */ 3233#endif /* CONFIG_SMP */
3113 3234
@@ -3166,7 +3287,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3166} 3287}
3167#endif 3288#endif
3168 3289
3169static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq) 3290static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3170{ 3291{
3171 int ret; 3292 int ret;
3172 struct msi_msg msg; 3293 struct msi_msg msg;
@@ -3175,7 +3296,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
3175 if (ret < 0) 3296 if (ret < 0)
3176 return ret; 3297 return ret;
3177 3298
3178 set_irq_msi(irq, desc); 3299 set_irq_msi(irq, msidesc);
3179 write_msi_msg(irq, &msg); 3300 write_msi_msg(irq, &msg);
3180 3301
3181#ifdef CONFIG_INTR_REMAP 3302#ifdef CONFIG_INTR_REMAP
@@ -3195,26 +3316,13 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
3195 return 0; 3316 return 0;
3196} 3317}
3197 3318
3198static unsigned int build_irq_for_pci_dev(struct pci_dev *dev) 3319int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
3199{
3200 unsigned int irq;
3201
3202 irq = dev->bus->number;
3203 irq <<= 8;
3204 irq |= dev->devfn;
3205 irq <<= 12;
3206
3207 return irq;
3208}
3209
3210int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
3211{ 3320{
3212 unsigned int irq; 3321 unsigned int irq;
3213 int ret; 3322 int ret;
3214 unsigned int irq_want; 3323 unsigned int irq_want;
3215 3324
3216 irq_want = build_irq_for_pci_dev(dev) + 0x100; 3325 irq_want = nr_irqs_gsi;
3217
3218 irq = create_irq_nr(irq_want); 3326 irq = create_irq_nr(irq_want);
3219 if (irq == 0) 3327 if (irq == 0)
3220 return -1; 3328 return -1;
@@ -3228,7 +3336,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
3228 goto error; 3336 goto error;
3229no_ir: 3337no_ir:
3230#endif 3338#endif
3231 ret = setup_msi_irq(dev, desc, irq); 3339 ret = setup_msi_irq(dev, msidesc, irq);
3232 if (ret < 0) { 3340 if (ret < 0) {
3233 destroy_irq(irq); 3341 destroy_irq(irq);
3234 return ret; 3342 return ret;
@@ -3246,7 +3354,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3246{ 3354{
3247 unsigned int irq; 3355 unsigned int irq;
3248 int ret, sub_handle; 3356 int ret, sub_handle;
3249 struct msi_desc *desc; 3357 struct msi_desc *msidesc;
3250 unsigned int irq_want; 3358 unsigned int irq_want;
3251 3359
3252#ifdef CONFIG_INTR_REMAP 3360#ifdef CONFIG_INTR_REMAP
@@ -3254,10 +3362,11 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3254 int index = 0; 3362 int index = 0;
3255#endif 3363#endif
3256 3364
3257 irq_want = build_irq_for_pci_dev(dev) + 0x100; 3365 irq_want = nr_irqs_gsi;
3258 sub_handle = 0; 3366 sub_handle = 0;
3259 list_for_each_entry(desc, &dev->msi_list, list) { 3367 list_for_each_entry(msidesc, &dev->msi_list, list) {
3260 irq = create_irq_nr(irq_want--); 3368 irq = create_irq_nr(irq_want);
3369 irq_want++;
3261 if (irq == 0) 3370 if (irq == 0)
3262 return -1; 3371 return -1;
3263#ifdef CONFIG_INTR_REMAP 3372#ifdef CONFIG_INTR_REMAP
@@ -3289,7 +3398,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3289 } 3398 }
3290no_ir: 3399no_ir:
3291#endif 3400#endif
3292 ret = setup_msi_irq(dev, desc, irq); 3401 ret = setup_msi_irq(dev, msidesc, irq);
3293 if (ret < 0) 3402 if (ret < 0)
3294 goto error; 3403 goto error;
3295 sub_handle++; 3404 sub_handle++;
@@ -3310,20 +3419,22 @@ void arch_teardown_msi_irq(unsigned int irq)
3310#ifdef CONFIG_SMP 3419#ifdef CONFIG_SMP
3311static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) 3420static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3312{ 3421{
3422 struct irq_desc *desc = irq_to_desc(irq);
3313 struct irq_cfg *cfg; 3423 struct irq_cfg *cfg;
3314 struct msi_msg msg; 3424 struct msi_msg msg;
3315 unsigned int dest; 3425 unsigned int dest;
3316 cpumask_t tmp; 3426 cpumask_t tmp;
3317 struct irq_desc *desc;
3318 3427
3319 cpus_and(tmp, mask, cpu_online_map); 3428 cpus_and(tmp, mask, cpu_online_map);
3320 if (cpus_empty(tmp)) 3429 if (cpus_empty(tmp))
3321 return; 3430 return;
3322 3431
3323 if (assign_irq_vector(irq, mask)) 3432 cfg = desc->chip_data;
3433 if (assign_irq_vector(irq, cfg, mask))
3324 return; 3434 return;
3325 3435
3326 cfg = irq_cfg(irq); 3436 set_extra_move_desc(desc, mask);
3437
3327 cpus_and(tmp, cfg->domain, mask); 3438 cpus_and(tmp, cfg->domain, mask);
3328 dest = cpu_mask_to_apicid(tmp); 3439 dest = cpu_mask_to_apicid(tmp);
3329 3440
@@ -3335,9 +3446,9 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3335 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3446 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3336 3447
3337 dmar_msi_write(irq, &msg); 3448 dmar_msi_write(irq, &msg);
3338 desc = irq_to_desc(irq);
3339 desc->affinity = mask; 3449 desc->affinity = mask;
3340} 3450}
3451
3341#endif /* CONFIG_SMP */ 3452#endif /* CONFIG_SMP */
3342 3453
3343struct irq_chip dmar_msi_type = { 3454struct irq_chip dmar_msi_type = {
@@ -3371,8 +3482,8 @@ int arch_setup_dmar_msi(unsigned int irq)
3371#ifdef CONFIG_SMP 3482#ifdef CONFIG_SMP
3372static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) 3483static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3373{ 3484{
3485 struct irq_desc *desc = irq_to_desc(irq);
3374 struct irq_cfg *cfg; 3486 struct irq_cfg *cfg;
3375 struct irq_desc *desc;
3376 struct msi_msg msg; 3487 struct msi_msg msg;
3377 unsigned int dest; 3488 unsigned int dest;
3378 cpumask_t tmp; 3489 cpumask_t tmp;
@@ -3381,10 +3492,12 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3381 if (cpus_empty(tmp)) 3492 if (cpus_empty(tmp))
3382 return; 3493 return;
3383 3494
3384 if (assign_irq_vector(irq, mask)) 3495 cfg = desc->chip_data;
3496 if (assign_irq_vector(irq, cfg, mask))
3385 return; 3497 return;
3386 3498
3387 cfg = irq_cfg(irq); 3499 set_extra_move_desc(desc, mask);
3500
3388 cpus_and(tmp, cfg->domain, mask); 3501 cpus_and(tmp, cfg->domain, mask);
3389 dest = cpu_mask_to_apicid(tmp); 3502 dest = cpu_mask_to_apicid(tmp);
3390 3503
@@ -3396,9 +3509,9 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3396 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3509 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3397 3510
3398 hpet_msi_write(irq, &msg); 3511 hpet_msi_write(irq, &msg);
3399 desc = irq_to_desc(irq);
3400 desc->affinity = mask; 3512 desc->affinity = mask;
3401} 3513}
3514
3402#endif /* CONFIG_SMP */ 3515#endif /* CONFIG_SMP */
3403 3516
3404struct irq_chip hpet_msi_type = { 3517struct irq_chip hpet_msi_type = {
@@ -3453,26 +3566,28 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3453 3566
3454static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) 3567static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
3455{ 3568{
3569 struct irq_desc *desc = irq_to_desc(irq);
3456 struct irq_cfg *cfg; 3570 struct irq_cfg *cfg;
3457 unsigned int dest; 3571 unsigned int dest;
3458 cpumask_t tmp; 3572 cpumask_t tmp;
3459 struct irq_desc *desc;
3460 3573
3461 cpus_and(tmp, mask, cpu_online_map); 3574 cpus_and(tmp, mask, cpu_online_map);
3462 if (cpus_empty(tmp)) 3575 if (cpus_empty(tmp))
3463 return; 3576 return;
3464 3577
3465 if (assign_irq_vector(irq, mask)) 3578 cfg = desc->chip_data;
3579 if (assign_irq_vector(irq, cfg, mask))
3466 return; 3580 return;
3467 3581
3468 cfg = irq_cfg(irq); 3582 set_extra_move_desc(desc, mask);
3583
3469 cpus_and(tmp, cfg->domain, mask); 3584 cpus_and(tmp, cfg->domain, mask);
3470 dest = cpu_mask_to_apicid(tmp); 3585 dest = cpu_mask_to_apicid(tmp);
3471 3586
3472 target_ht_irq(irq, dest, cfg->vector); 3587 target_ht_irq(irq, dest, cfg->vector);
3473 desc = irq_to_desc(irq);
3474 desc->affinity = mask; 3588 desc->affinity = mask;
3475} 3589}
3590
3476#endif 3591#endif
3477 3592
3478static struct irq_chip ht_irq_chip = { 3593static struct irq_chip ht_irq_chip = {
@@ -3492,13 +3607,13 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3492 int err; 3607 int err;
3493 cpumask_t tmp; 3608 cpumask_t tmp;
3494 3609
3610 cfg = irq_cfg(irq);
3495 tmp = TARGET_CPUS; 3611 tmp = TARGET_CPUS;
3496 err = assign_irq_vector(irq, tmp); 3612 err = assign_irq_vector(irq, cfg, tmp);
3497 if (!err) { 3613 if (!err) {
3498 struct ht_irq_msg msg; 3614 struct ht_irq_msg msg;
3499 unsigned dest; 3615 unsigned dest;
3500 3616
3501 cfg = irq_cfg(irq);
3502 cpus_and(tmp, cfg->domain, tmp); 3617 cpus_and(tmp, cfg->domain, tmp);
3503 dest = cpu_mask_to_apicid(tmp); 3618 dest = cpu_mask_to_apicid(tmp);
3504 3619
@@ -3544,7 +3659,9 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3544 unsigned long flags; 3659 unsigned long flags;
3545 int err; 3660 int err;
3546 3661
3547 err = assign_irq_vector(irq, *eligible_cpu); 3662 cfg = irq_cfg(irq);
3663
3664 err = assign_irq_vector(irq, cfg, *eligible_cpu);
3548 if (err != 0) 3665 if (err != 0)
3549 return err; 3666 return err;
3550 3667
@@ -3553,8 +3670,6 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3553 irq_name); 3670 irq_name);
3554 spin_unlock_irqrestore(&vector_lock, flags); 3671 spin_unlock_irqrestore(&vector_lock, flags);
3555 3672
3556 cfg = irq_cfg(irq);
3557
3558 mmr_value = 0; 3673 mmr_value = 0;
3559 entry = (struct uv_IO_APIC_route_entry *)&mmr_value; 3674 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3560 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); 3675 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
@@ -3606,9 +3721,16 @@ int __init io_apic_get_redir_entries (int ioapic)
3606 return reg_01.bits.entries; 3721 return reg_01.bits.entries;
3607} 3722}
3608 3723
3609int __init probe_nr_irqs(void) 3724void __init probe_nr_irqs_gsi(void)
3610{ 3725{
3611 return NR_IRQS; 3726 int idx;
3727 int nr = 0;
3728
3729 for (idx = 0; idx < nr_ioapics; idx++)
3730 nr += io_apic_get_redir_entries(idx) + 1;
3731
3732 if (nr > nr_irqs_gsi)
3733 nr_irqs_gsi = nr;
3612} 3734}
3613 3735
3614/* -------------------------------------------------------------------------- 3736/* --------------------------------------------------------------------------
@@ -3707,19 +3829,31 @@ int __init io_apic_get_version(int ioapic)
3707 3829
3708int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) 3830int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
3709{ 3831{
3832 struct irq_desc *desc;
3833 struct irq_cfg *cfg;
3834 int cpu = boot_cpu_id;
3835
3710 if (!IO_APIC_IRQ(irq)) { 3836 if (!IO_APIC_IRQ(irq)) {
3711 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 3837 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3712 ioapic); 3838 ioapic);
3713 return -EINVAL; 3839 return -EINVAL;
3714 } 3840 }
3715 3841
3842 desc = irq_to_desc_alloc_cpu(irq, cpu);
3843 if (!desc) {
3844 printk(KERN_INFO "can not get irq_desc %d\n", irq);
3845 return 0;
3846 }
3847
3716 /* 3848 /*
3717 * IRQs < 16 are already in the irq_2_pin[] map 3849 * IRQs < 16 are already in the irq_2_pin[] map
3718 */ 3850 */
3719 if (irq >= 16) 3851 if (irq >= NR_IRQS_LEGACY) {
3720 add_pin_to_irq(irq, ioapic, pin); 3852 cfg = desc->chip_data;
3853 add_pin_to_irq_cpu(cfg, cpu, ioapic, pin);
3854 }
3721 3855
3722 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity); 3856 setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity);
3723 3857
3724 return 0; 3858 return 0;
3725} 3859}
@@ -3773,9 +3907,10 @@ void __init setup_ioapic_dest(void)
3773 * when you have too many devices, because at that time only boot 3907 * when you have too many devices, because at that time only boot
3774 * cpu is online. 3908 * cpu is online.
3775 */ 3909 */
3776 cfg = irq_cfg(irq); 3910 desc = irq_to_desc(irq);
3911 cfg = desc->chip_data;
3777 if (!cfg->vector) { 3912 if (!cfg->vector) {
3778 setup_IO_APIC_irq(ioapic, pin, irq, 3913 setup_IO_APIC_irq(ioapic, pin, irq, desc,
3779 irq_trigger(irq_entry), 3914 irq_trigger(irq_entry),
3780 irq_polarity(irq_entry)); 3915 irq_polarity(irq_entry));
3781 continue; 3916 continue;
@@ -3785,7 +3920,6 @@ void __init setup_ioapic_dest(void)
3785 /* 3920 /*
3786 * Honour affinities which have been set in early boot 3921 * Honour affinities which have been set in early boot
3787 */ 3922 */
3788 desc = irq_to_desc(irq);
3789 if (desc->status & 3923 if (desc->status &
3790 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 3924 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3791 mask = desc->affinity; 3925 mask = desc->affinity;
@@ -3794,10 +3928,10 @@ void __init setup_ioapic_dest(void)
3794 3928
3795#ifdef CONFIG_INTR_REMAP 3929#ifdef CONFIG_INTR_REMAP
3796 if (intr_remapping_enabled) 3930 if (intr_remapping_enabled)
3797 set_ir_ioapic_affinity_irq(irq, mask); 3931 set_ir_ioapic_affinity_irq_desc(desc, mask);
3798 else 3932 else
3799#endif 3933#endif
3800 set_ioapic_affinity_irq(irq, mask); 3934 set_ioapic_affinity_irq_desc(desc, mask);
3801 } 3935 }
3802 3936
3803 } 3937 }
@@ -3846,7 +3980,6 @@ void __init ioapic_init_mappings(void)
3846 struct resource *ioapic_res; 3980 struct resource *ioapic_res;
3847 int i; 3981 int i;
3848 3982
3849 irq_2_pin_init();
3850 ioapic_res = ioapic_setup_resources(); 3983 ioapic_res = ioapic_setup_resources();
3851 for (i = 0; i < nr_ioapics; i++) { 3984 for (i = 0; i < nr_ioapics; i++) {
3852 if (smp_found_config) { 3985 if (smp_found_config) {
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index d1d4dc52f649..3f1d9d18df67 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -118,6 +118,9 @@ int show_interrupts(struct seq_file *p, void *v)
118 } 118 }
119 119
120 desc = irq_to_desc(i); 120 desc = irq_to_desc(i);
121 if (!desc)
122 return 0;
123
121 spin_lock_irqsave(&desc->lock, flags); 124 spin_lock_irqsave(&desc->lock, flags);
122#ifndef CONFIG_SMP 125#ifndef CONFIG_SMP
123 any_count = kstat_irqs(i); 126 any_count = kstat_irqs(i);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index a51382672de0..119fc9c8ff7f 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -242,6 +242,8 @@ void fixup_irqs(cpumask_t map)
242 for_each_irq_desc(irq, desc) { 242 for_each_irq_desc(irq, desc) {
243 cpumask_t mask; 243 cpumask_t mask;
244 244
245 if (!desc)
246 continue;
245 if (irq == 2) 247 if (irq == 2)
246 continue; 248 continue;
247 249
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 60eb84eb77a0..900009c70591 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -94,6 +94,8 @@ void fixup_irqs(cpumask_t map)
94 int break_affinity = 0; 94 int break_affinity = 0;
95 int set_affinity = 1; 95 int set_affinity = 1;
96 96
97 if (!desc)
98 continue;
97 if (irq == 2) 99 if (irq == 2)
98 continue; 100 continue;
99 101
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 845aa9803e80..6a92f47c52e7 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -68,8 +68,7 @@ void __init init_ISA_irqs (void)
68 /* 68 /*
69 * 16 old-style INTA-cycle interrupts: 69 * 16 old-style INTA-cycle interrupts:
70 */ 70 */
71 for (i = 0; i < 16; i++) { 71 for (i = 0; i < NR_IRQS_LEGACY; i++) {
72 /* first time call this irq_desc */
73 struct irq_desc *desc = irq_to_desc(i); 72 struct irq_desc *desc = irq_to_desc(i);
74 73
75 desc->status = IRQ_DISABLED; 74 desc->status = IRQ_DISABLED;
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index ff0235391285..40c1e62ec785 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -142,8 +142,7 @@ void __init init_ISA_irqs(void)
142 init_bsp_APIC(); 142 init_bsp_APIC();
143 init_8259A(0); 143 init_8259A(0);
144 144
145 for (i = 0; i < 16; i++) { 145 for (i = 0; i < NR_IRQS_LEGACY; i++) {
146 /* first time call this irq_desc */
147 struct irq_desc *desc = irq_to_desc(i); 146 struct irq_desc *desc = irq_to_desc(i);
148 147
149 desc->status = IRQ_DISABLED; 148 desc->status = IRQ_DISABLED;
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 0f4c1fd5a1f4..45e3b69808ba 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -586,26 +586,23 @@ static void __init __get_smp_config(unsigned int early)
586{ 586{
587 struct intel_mp_floating *mpf = mpf_found; 587 struct intel_mp_floating *mpf = mpf_found;
588 588
589 if (x86_quirks->mach_get_smp_config) { 589 if (!mpf)
590 if (x86_quirks->mach_get_smp_config(early)) 590 return;
591 return; 591
592 }
593 if (acpi_lapic && early) 592 if (acpi_lapic && early)
594 return; 593 return;
594
595 /* 595 /*
596 * ACPI supports both logical (e.g. Hyper-Threading) and physical 596 * MPS doesn't support hyperthreading, aka only have
597 * processors, where MPS only supports physical. 597 * thread 0 apic id in MPS table
598 */ 598 */
599 if (acpi_lapic && acpi_ioapic) { 599 if (acpi_lapic && acpi_ioapic)
600 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
601 "information\n");
602 return; 600 return;
603 } else if (acpi_lapic)
604 printk(KERN_INFO "Using ACPI for processor (LAPIC) "
605 "configuration information\n");
606 601
607 if (!mpf) 602 if (x86_quirks->mach_get_smp_config) {
608 return; 603 if (x86_quirks->mach_get_smp_config(early))
604 return;
605 }
609 606
610 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", 607 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
611 mpf->mpf_specification); 608 mpf->mpf_specification);
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
index 4caff39078e0..0deea37a53cf 100644
--- a/arch/x86/kernel/numaq_32.c
+++ b/arch/x86/kernel/numaq_32.c
@@ -31,7 +31,7 @@
31#include <asm/numaq.h> 31#include <asm/numaq.h>
32#include <asm/topology.h> 32#include <asm/topology.h>
33#include <asm/processor.h> 33#include <asm/processor.h>
34#include <asm/mpspec.h> 34#include <asm/genapic.h>
35#include <asm/e820.h> 35#include <asm/e820.h>
36#include <asm/setup.h> 36#include <asm/setup.h>
37 37
@@ -235,6 +235,13 @@ static int __init numaq_setup_ioapic_ids(void)
235 return 1; 235 return 1;
236} 236}
237 237
238static int __init numaq_update_genapic(void)
239{
240 genapic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
241
242 return 0;
243}
244
238static struct x86_quirks numaq_x86_quirks __initdata = { 245static struct x86_quirks numaq_x86_quirks __initdata = {
239 .arch_pre_time_init = numaq_pre_time_init, 246 .arch_pre_time_init = numaq_pre_time_init,
240 .arch_time_init = NULL, 247 .arch_time_init = NULL,
@@ -250,6 +257,7 @@ static struct x86_quirks numaq_x86_quirks __initdata = {
250 .mpc_oem_pci_bus = mpc_oem_pci_bus, 257 .mpc_oem_pci_bus = mpc_oem_pci_bus,
251 .smp_read_mpc_oem = smp_read_mpc_oem, 258 .smp_read_mpc_oem = smp_read_mpc_oem,
252 .setup_ioapic_ids = numaq_setup_ioapic_ids, 259 .setup_ioapic_ids = numaq_setup_ioapic_ids,
260 .update_genapic = numaq_update_genapic,
253}; 261};
254 262
255void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, 263void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c27af49a4ede..95d811a9594f 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -9,6 +9,7 @@
9#include <linux/clockchips.h> 9#include <linux/clockchips.h>
10#include <linux/ftrace.h> 10#include <linux/ftrace.h>
11#include <asm/system.h> 11#include <asm/system.h>
12#include <asm/apic.h>
12 13
13unsigned long idle_halt; 14unsigned long idle_halt;
14EXPORT_SYMBOL(idle_halt); 15EXPORT_SYMBOL(idle_halt);
@@ -127,6 +128,21 @@ void default_idle(void)
127EXPORT_SYMBOL(default_idle); 128EXPORT_SYMBOL(default_idle);
128#endif 129#endif
129 130
131void stop_this_cpu(void *dummy)
132{
133 local_irq_disable();
134 /*
135 * Remove this CPU:
136 */
137 cpu_clear(smp_processor_id(), cpu_online_map);
138 disable_local_APIC();
139
140 for (;;) {
141 if (hlt_works(smp_processor_id()))
142 halt();
143 }
144}
145
130static void do_nothing(void *unused) 146static void do_nothing(void *unused)
131{ 147{
132} 148}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index cc5a2545dd41..0e3dbc7b2bdb 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -36,7 +36,10 @@ int reboot_force;
36static int reboot_cpu = -1; 36static int reboot_cpu = -1;
37#endif 37#endif
38 38
39/* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] 39/* This is set by the PCI code if either type 1 or type 2 PCI is detected */
40bool port_cf9_safe = false;
41
42/* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci]
40 warm Don't set the cold reboot flag 43 warm Don't set the cold reboot flag
41 cold Set the cold reboot flag 44 cold Set the cold reboot flag
42 bios Reboot by jumping through the BIOS (only for X86_32) 45 bios Reboot by jumping through the BIOS (only for X86_32)
@@ -45,6 +48,7 @@ static int reboot_cpu = -1;
45 kbd Use the keyboard controller. cold reset (default) 48 kbd Use the keyboard controller. cold reset (default)
46 acpi Use the RESET_REG in the FADT 49 acpi Use the RESET_REG in the FADT
47 efi Use efi reset_system runtime service 50 efi Use efi reset_system runtime service
51 pci Use the so-called "PCI reset register", CF9
48 force Avoid anything that could hang. 52 force Avoid anything that could hang.
49 */ 53 */
50static int __init reboot_setup(char *str) 54static int __init reboot_setup(char *str)
@@ -79,6 +83,7 @@ static int __init reboot_setup(char *str)
79 case 'k': 83 case 'k':
80 case 't': 84 case 't':
81 case 'e': 85 case 'e':
86 case 'p':
82 reboot_type = *str; 87 reboot_type = *str;
83 break; 88 break;
84 89
@@ -404,12 +409,27 @@ static void native_machine_emergency_restart(void)
404 reboot_type = BOOT_KBD; 409 reboot_type = BOOT_KBD;
405 break; 410 break;
406 411
407
408 case BOOT_EFI: 412 case BOOT_EFI:
409 if (efi_enabled) 413 if (efi_enabled)
410 efi.reset_system(reboot_mode ? EFI_RESET_WARM : EFI_RESET_COLD, 414 efi.reset_system(reboot_mode ?
415 EFI_RESET_WARM :
416 EFI_RESET_COLD,
411 EFI_SUCCESS, 0, NULL); 417 EFI_SUCCESS, 0, NULL);
418 reboot_type = BOOT_KBD;
419 break;
420
421 case BOOT_CF9:
422 port_cf9_safe = true;
423 /* fall through */
412 424
425 case BOOT_CF9_COND:
426 if (port_cf9_safe) {
427 u8 cf9 = inb(0xcf9) & ~6;
428 outb(cf9|2, 0xcf9); /* Request hard reset */
429 udelay(50);
430 outb(cf9|6, 0xcf9); /* Actually do the reset */
431 udelay(50);
432 }
413 reboot_type = BOOT_KBD; 433 reboot_type = BOOT_KBD;
414 break; 434 break;
415 } 435 }
@@ -470,6 +490,11 @@ static void native_machine_restart(char *__unused)
470 490
471static void native_machine_halt(void) 491static void native_machine_halt(void)
472{ 492{
493 /* stop other cpus and apics */
494 machine_shutdown();
495
496 /* stop this cpu */
497 stop_this_cpu(NULL);
473} 498}
474 499
475static void native_machine_power_off(void) 500static void native_machine_power_off(void)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9d5674f7b6cc..b9018955a04f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -583,7 +583,20 @@ static int __init setup_elfcorehdr(char *arg)
583early_param("elfcorehdr", setup_elfcorehdr); 583early_param("elfcorehdr", setup_elfcorehdr);
584#endif 584#endif
585 585
586static struct x86_quirks default_x86_quirks __initdata; 586static int __init default_update_genapic(void)
587{
588#ifdef CONFIG_X86_SMP
589# if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64)
590 genapic->wakeup_cpu = wakeup_secondary_cpu_via_init;
591# endif
592#endif
593
594 return 0;
595}
596
597static struct x86_quirks default_x86_quirks __initdata = {
598 .update_genapic = default_update_genapic,
599};
587 600
588struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; 601struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
589 602
@@ -1082,7 +1095,7 @@ void __init setup_arch(char **cmdline_p)
1082 ioapic_init_mappings(); 1095 ioapic_init_mappings();
1083 1096
1084 /* need to wait for io_apic is mapped */ 1097 /* need to wait for io_apic is mapped */
1085 nr_irqs = probe_nr_irqs(); 1098 probe_nr_irqs_gsi();
1086 1099
1087 kvm_guest_init(); 1100 kvm_guest_init();
1088 1101
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 18f9b19f5f8f..3f92b134ab90 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -140,19 +140,6 @@ void native_send_call_func_ipi(cpumask_t mask)
140 send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 140 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
141} 141}
142 142
143static void stop_this_cpu(void *dummy)
144{
145 local_irq_disable();
146 /*
147 * Remove this CPU:
148 */
149 cpu_clear(smp_processor_id(), cpu_online_map);
150 disable_local_APIC();
151 if (hlt_works(smp_processor_id()))
152 for (;;) halt();
153 for (;;);
154}
155
156/* 143/*
157 * this function calls the 'stop' function on all other CPUs in the system. 144 * this function calls the 'stop' function on all other CPUs in the system.
158 */ 145 */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7b1093397319..0e9f446269f4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -62,6 +62,7 @@
62#include <asm/mtrr.h> 62#include <asm/mtrr.h>
63#include <asm/vmi.h> 63#include <asm/vmi.h>
64#include <asm/genapic.h> 64#include <asm/genapic.h>
65#include <asm/setup.h>
65#include <linux/mc146818rtc.h> 66#include <linux/mc146818rtc.h>
66 67
67#include <mach_apic.h> 68#include <mach_apic.h>
@@ -536,7 +537,7 @@ static void impress_friends(void)
536 pr_debug("Before bogocount - setting activated=1.\n"); 537 pr_debug("Before bogocount - setting activated=1.\n");
537} 538}
538 539
539static inline void __inquire_remote_apic(int apicid) 540void __inquire_remote_apic(int apicid)
540{ 541{
541 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; 542 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
542 char *names[] = { "ID", "VERSION", "SPIV" }; 543 char *names[] = { "ID", "VERSION", "SPIV" };
@@ -575,14 +576,13 @@ static inline void __inquire_remote_apic(int apicid)
575 } 576 }
576} 577}
577 578
578#ifdef WAKE_SECONDARY_VIA_NMI
579/* 579/*
580 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal 580 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
581 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this 581 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
582 * won't ... remember to clear down the APIC, etc later. 582 * won't ... remember to clear down the APIC, etc later.
583 */ 583 */
584static int __devinit 584int __devinit
585wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) 585wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
586{ 586{
587 unsigned long send_status, accept_status = 0; 587 unsigned long send_status, accept_status = 0;
588 int maxlvt; 588 int maxlvt;
@@ -599,7 +599,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
599 * Give the other CPU some time to accept the IPI. 599 * Give the other CPU some time to accept the IPI.
600 */ 600 */
601 udelay(200); 601 udelay(200);
602 if (APIC_INTEGRATED(apic_version[phys_apicid])) { 602 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
603 maxlvt = lapic_get_maxlvt(); 603 maxlvt = lapic_get_maxlvt();
604 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 604 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
605 apic_write(APIC_ESR, 0); 605 apic_write(APIC_ESR, 0);
@@ -614,11 +614,9 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
614 614
615 return (send_status | accept_status); 615 return (send_status | accept_status);
616} 616}
617#endif /* WAKE_SECONDARY_VIA_NMI */
618 617
619#ifdef WAKE_SECONDARY_VIA_INIT 618int __devinit
620static int __devinit 619wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
621wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
622{ 620{
623 unsigned long send_status, accept_status = 0; 621 unsigned long send_status, accept_status = 0;
624 int maxlvt, num_starts, j; 622 int maxlvt, num_starts, j;
@@ -737,7 +735,6 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
737 735
738 return (send_status | accept_status); 736 return (send_status | accept_status);
739} 737}
740#endif /* WAKE_SECONDARY_VIA_INIT */
741 738
742struct create_idle { 739struct create_idle {
743 struct work_struct work; 740 struct work_struct work;
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
index 3c3b471ea496..3624a364b7f3 100644
--- a/arch/x86/mach-generic/bigsmp.c
+++ b/arch/x86/mach-generic/bigsmp.c
@@ -17,6 +17,7 @@
17#include <asm/bigsmp/apic.h> 17#include <asm/bigsmp/apic.h>
18#include <asm/bigsmp/ipi.h> 18#include <asm/bigsmp/ipi.h>
19#include <asm/mach-default/mach_mpparse.h> 19#include <asm/mach-default/mach_mpparse.h>
20#include <asm/mach-default/mach_wakecpu.h>
20 21
21static int dmi_bigsmp; /* can be set by dmi scanners */ 22static int dmi_bigsmp; /* can be set by dmi scanners */
22 23
diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c
index 9e835a11a13a..e63a4a76d8cd 100644
--- a/arch/x86/mach-generic/default.c
+++ b/arch/x86/mach-generic/default.c
@@ -16,6 +16,7 @@
16#include <asm/mach-default/mach_apic.h> 16#include <asm/mach-default/mach_apic.h>
17#include <asm/mach-default/mach_ipi.h> 17#include <asm/mach-default/mach_ipi.h>
18#include <asm/mach-default/mach_mpparse.h> 18#include <asm/mach-default/mach_mpparse.h>
19#include <asm/mach-default/mach_wakecpu.h>
19 20
20/* should be called last. */ 21/* should be called last. */
21static int probe_default(void) 22static int probe_default(void)
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
index 28459cab3ddb..7b4e6d0d1690 100644
--- a/arch/x86/mach-generic/es7000.c
+++ b/arch/x86/mach-generic/es7000.c
@@ -16,7 +16,19 @@
16#include <asm/es7000/apic.h> 16#include <asm/es7000/apic.h>
17#include <asm/es7000/ipi.h> 17#include <asm/es7000/ipi.h>
18#include <asm/es7000/mpparse.h> 18#include <asm/es7000/mpparse.h>
19#include <asm/es7000/wakecpu.h> 19#include <asm/mach-default/mach_wakecpu.h>
20
21void __init es7000_update_genapic_to_cluster(void)
22{
23 genapic->target_cpus = target_cpus_cluster;
24 genapic->int_delivery_mode = INT_DELIVERY_MODE_CLUSTER;
25 genapic->int_dest_mode = INT_DEST_MODE_CLUSTER;
26 genapic->no_balance_irq = NO_BALANCE_IRQ_CLUSTER;
27
28 genapic->init_apic_ldr = init_apic_ldr_cluster;
29
30 genapic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster;
31}
20 32
21static int probe_es7000(void) 33static int probe_es7000(void)
22{ 34{
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c
index 5a7e4619e1c4..c346d9d0226f 100644
--- a/arch/x86/mach-generic/probe.c
+++ b/arch/x86/mach-generic/probe.c
@@ -15,6 +15,7 @@
15#include <asm/mpspec.h> 15#include <asm/mpspec.h>
16#include <asm/apicdef.h> 16#include <asm/apicdef.h>
17#include <asm/genapic.h> 17#include <asm/genapic.h>
18#include <asm/setup.h>
18 19
19extern struct genapic apic_numaq; 20extern struct genapic apic_numaq;
20extern struct genapic apic_summit; 21extern struct genapic apic_summit;
@@ -57,6 +58,9 @@ static int __init parse_apic(char *arg)
57 } 58 }
58 } 59 }
59 60
61 if (x86_quirks->update_genapic)
62 x86_quirks->update_genapic();
63
60 /* Parsed again by __setup for debug/verbose */ 64 /* Parsed again by __setup for debug/verbose */
61 return 0; 65 return 0;
62} 66}
@@ -72,12 +76,15 @@ void __init generic_bigsmp_probe(void)
72 * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support 76 * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
73 */ 77 */
74 78
75 if (!cmdline_apic && genapic == &apic_default) 79 if (!cmdline_apic && genapic == &apic_default) {
76 if (apic_bigsmp.probe()) { 80 if (apic_bigsmp.probe()) {
77 genapic = &apic_bigsmp; 81 genapic = &apic_bigsmp;
82 if (x86_quirks->update_genapic)
83 x86_quirks->update_genapic();
78 printk(KERN_INFO "Overriding APIC driver with %s\n", 84 printk(KERN_INFO "Overriding APIC driver with %s\n",
79 genapic->name); 85 genapic->name);
80 } 86 }
87 }
81#endif 88#endif
82} 89}
83 90
@@ -94,6 +101,9 @@ void __init generic_apic_probe(void)
94 /* Not visible without early console */ 101 /* Not visible without early console */
95 if (!apic_probe[i]) 102 if (!apic_probe[i])
96 panic("Didn't find an APIC driver"); 103 panic("Didn't find an APIC driver");
104
105 if (x86_quirks->update_genapic)
106 x86_quirks->update_genapic();
97 } 107 }
98 printk(KERN_INFO "Using APIC driver %s\n", genapic->name); 108 printk(KERN_INFO "Using APIC driver %s\n", genapic->name);
99} 109}
@@ -108,6 +118,8 @@ int __init mps_oem_check(struct mp_config_table *mpc, char *oem,
108 if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) { 118 if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) {
109 if (!cmdline_apic) { 119 if (!cmdline_apic) {
110 genapic = apic_probe[i]; 120 genapic = apic_probe[i];
121 if (x86_quirks->update_genapic)
122 x86_quirks->update_genapic();
111 printk(KERN_INFO "Switched to APIC driver `%s'.\n", 123 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
112 genapic->name); 124 genapic->name);
113 } 125 }
@@ -124,6 +136,8 @@ int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
124 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { 136 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
125 if (!cmdline_apic) { 137 if (!cmdline_apic) {
126 genapic = apic_probe[i]; 138 genapic = apic_probe[i];
139 if (x86_quirks->update_genapic)
140 x86_quirks->update_genapic();
127 printk(KERN_INFO "Switched to APIC driver `%s'.\n", 141 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
128 genapic->name); 142 genapic->name);
129 } 143 }
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c
index 6272b5e69da6..2c6d234e0009 100644
--- a/arch/x86/mach-generic/summit.c
+++ b/arch/x86/mach-generic/summit.c
@@ -16,6 +16,7 @@
16#include <asm/summit/apic.h> 16#include <asm/summit/apic.h>
17#include <asm/summit/ipi.h> 17#include <asm/summit/ipi.h>
18#include <asm/summit/mpparse.h> 18#include <asm/summit/mpparse.h>
19#include <asm/mach-default/mach_wakecpu.h>
19 20
20static int probe_summit(void) 21static int probe_summit(void)
21{ 22{
diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
index 9915293500fb..9a5af6c8fbe9 100644
--- a/arch/x86/pci/direct.c
+++ b/arch/x86/pci/direct.c
@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
173 173
174#undef PCI_CONF2_ADDRESS 174#undef PCI_CONF2_ADDRESS
175 175
176static struct pci_raw_ops pci_direct_conf2 = { 176struct pci_raw_ops pci_direct_conf2 = {
177 .read = pci_conf2_read, 177 .read = pci_conf2_read,
178 .write = pci_conf2_write, 178 .write = pci_conf2_write,
179}; 179};
@@ -289,6 +289,7 @@ int __init pci_direct_probe(void)
289 289
290 if (pci_check_type1()) { 290 if (pci_check_type1()) {
291 raw_pci_ops = &pci_direct_conf1; 291 raw_pci_ops = &pci_direct_conf1;
292 port_cf9_safe = true;
292 return 1; 293 return 1;
293 } 294 }
294 release_resource(region); 295 release_resource(region);
@@ -305,6 +306,7 @@ int __init pci_direct_probe(void)
305 306
306 if (pci_check_type2()) { 307 if (pci_check_type2()) {
307 raw_pci_ops = &pci_direct_conf2; 308 raw_pci_ops = &pci_direct_conf2;
309 port_cf9_safe = true;
308 return 2; 310 return 2;
309 } 311 }
310 312
diff --git a/arch/x86/pci/pci.h b/arch/x86/pci/pci.h
index 15b9cf6be729..1959018aac02 100644
--- a/arch/x86/pci/pci.h
+++ b/arch/x86/pci/pci.h
@@ -96,6 +96,7 @@ extern struct pci_raw_ops *raw_pci_ops;
96extern struct pci_raw_ops *raw_pci_ext_ops; 96extern struct pci_raw_ops *raw_pci_ext_ops;
97 97
98extern struct pci_raw_ops pci_direct_conf1; 98extern struct pci_raw_ops pci_direct_conf1;
99extern bool port_cf9_safe;
99 100
100/* arch_initcall level */ 101/* arch_initcall level */
101extern int pci_direct_probe(void); 102extern int pci_direct_probe(void);