diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 18:25:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 18:25:41 -0400 |
commit | f0d5e12bd42b7173ebbbf59279c867605f859814 (patch) | |
tree | f4018a726ecd2cf520afdf27210bfec1f3181718 /arch | |
parent | 0fea615e526b4b7eff0363ee02d5753e5f924089 (diff) | |
parent | 103428e57be323c3c5545db8ad12667099bc6005 (diff) |
Merge branch 'irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (76 commits)
x86, apic: Fix dummy apic read operation together with broken MP handling
x86, apic: Restore irqs on fail paths
x86: Print real IOAPIC version for x86-64
x86: enable_update_mptable should be a macro
sparseirq: Allow early irq_desc allocation
x86, io-apic: Don't mark pin_programmed early
x86, irq: don't call mp_config_acpi_gsi() if update_mptable is not enabled
x86, irq: update_mptable needs pci_routeirq
x86: don't call read_apic_id if !cpu_has_apic
x86, apic: introduce io_apic_irq_attr
x86/pci: add 4 more return parameters to IO_APIC_get_PCI_irq_vector(), fix
x86: read apic ID in the !acpi_lapic case
x86: apic: Fixmap apic address even if apic disabled
x86: display extended apic registers with print_local_APIC and cpu_debug code
x86: read apic ID in the !acpi_lapic case
x86: clean up and fix setup_clear/force_cpu_cap handling
x86: apic: Check rev 3 fadt correctly for physical_apic bit
x86/pci: update pirq_enable_irq() to setup io apic routing
x86/acpi: move setup io apic routing out of CONFIG_ACPI scope
x86/pci: add 4 more return parameters to IO_APIC_get_PCI_irq_vector()
...
Diffstat (limited to 'arch')
58 files changed, 1072 insertions, 1143 deletions
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index 9c9d1fd4155f..5bd5259324b7 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c | |||
@@ -176,22 +176,26 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
176 | } | 176 | } |
177 | } | 177 | } |
178 | 178 | ||
179 | static void | 179 | static int |
180 | dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) | 180 | dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) |
181 | { | 181 | { |
182 | spin_lock(&dp264_irq_lock); | 182 | spin_lock(&dp264_irq_lock); |
183 | cpu_set_irq_affinity(irq, *affinity); | 183 | cpu_set_irq_affinity(irq, *affinity); |
184 | tsunami_update_irq_hw(cached_irq_mask); | 184 | tsunami_update_irq_hw(cached_irq_mask); |
185 | spin_unlock(&dp264_irq_lock); | 185 | spin_unlock(&dp264_irq_lock); |
186 | |||
187 | return 0; | ||
186 | } | 188 | } |
187 | 189 | ||
188 | static void | 190 | static int |
189 | clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) | 191 | clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) |
190 | { | 192 | { |
191 | spin_lock(&dp264_irq_lock); | 193 | spin_lock(&dp264_irq_lock); |
192 | cpu_set_irq_affinity(irq - 16, *affinity); | 194 | cpu_set_irq_affinity(irq - 16, *affinity); |
193 | tsunami_update_irq_hw(cached_irq_mask); | 195 | tsunami_update_irq_hw(cached_irq_mask); |
194 | spin_unlock(&dp264_irq_lock); | 196 | spin_unlock(&dp264_irq_lock); |
197 | |||
198 | return 0; | ||
195 | } | 199 | } |
196 | 200 | ||
197 | static struct hw_interrupt_type dp264_irq_type = { | 201 | static struct hw_interrupt_type dp264_irq_type = { |
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 27f840a4ad3d..8dd239ebdb9e 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c | |||
@@ -157,13 +157,15 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
157 | 157 | ||
158 | } | 158 | } |
159 | 159 | ||
160 | static void | 160 | static int |
161 | titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) | 161 | titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) |
162 | { | 162 | { |
163 | spin_lock(&titan_irq_lock); | 163 | spin_lock(&titan_irq_lock); |
164 | titan_cpu_set_irq_affinity(irq - 16, *affinity); | 164 | titan_cpu_set_irq_affinity(irq - 16, *affinity); |
165 | titan_update_irq_hw(titan_cached_irq_mask); | 165 | titan_update_irq_hw(titan_cached_irq_mask); |
166 | spin_unlock(&titan_irq_lock); | 166 | spin_unlock(&titan_irq_lock); |
167 | |||
168 | return 0; | ||
167 | } | 169 | } |
168 | 170 | ||
169 | static void | 171 | static void |
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 3e1714c6523f..664c7b8b1ba8 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -109,7 +109,7 @@ static void gic_unmask_irq(unsigned int irq) | |||
109 | } | 109 | } |
110 | 110 | ||
111 | #ifdef CONFIG_SMP | 111 | #ifdef CONFIG_SMP |
112 | static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) | 112 | static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) |
113 | { | 113 | { |
114 | void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); | 114 | void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); |
115 | unsigned int shift = (irq % 4) * 8; | 115 | unsigned int shift = (irq % 4) * 8; |
@@ -122,6 +122,8 @@ static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) | |||
122 | val |= 1 << (cpu + shift); | 122 | val |= 1 << (cpu + shift); |
123 | writel(val, reg); | 123 | writel(val, reg); |
124 | spin_unlock(&irq_controller_lock); | 124 | spin_unlock(&irq_controller_lock); |
125 | |||
126 | return 0; | ||
125 | } | 127 | } |
126 | #endif | 128 | #endif |
127 | 129 | ||
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c index df3925cb1c7f..d70b445f4a8f 100644 --- a/arch/cris/arch-v32/kernel/irq.c +++ b/arch/cris/arch-v32/kernel/irq.c | |||
@@ -325,12 +325,14 @@ static void end_crisv32_irq(unsigned int irq) | |||
325 | { | 325 | { |
326 | } | 326 | } |
327 | 327 | ||
328 | void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest) | 328 | int set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest) |
329 | { | 329 | { |
330 | unsigned long flags; | 330 | unsigned long flags; |
331 | spin_lock_irqsave(&irq_lock, flags); | 331 | spin_lock_irqsave(&irq_lock, flags); |
332 | irq_allocations[irq - FIRST_IRQ].mask = *dest; | 332 | irq_allocations[irq - FIRST_IRQ].mask = *dest; |
333 | spin_unlock_irqrestore(&irq_lock, flags); | 333 | spin_unlock_irqrestore(&irq_lock, flags); |
334 | |||
335 | return 0; | ||
334 | } | 336 | } |
335 | 337 | ||
336 | static struct irq_chip crisv32_irq_type = { | 338 | static struct irq_chip crisv32_irq_type = { |
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c index cc0a3182db3c..acb5047ab573 100644 --- a/arch/ia64/hp/sim/hpsim_irq.c +++ b/arch/ia64/hp/sim/hpsim_irq.c | |||
@@ -21,9 +21,10 @@ hpsim_irq_noop (unsigned int irq) | |||
21 | { | 21 | { |
22 | } | 22 | } |
23 | 23 | ||
24 | static void | 24 | static int |
25 | hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b) | 25 | hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b) |
26 | { | 26 | { |
27 | return 0; | ||
27 | } | 28 | } |
28 | 29 | ||
29 | static struct hw_interrupt_type irq_type_hp_sim = { | 30 | static struct hw_interrupt_type irq_type_hp_sim = { |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 5510317db37b..baec6f00f7f3 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -636,7 +636,7 @@ void __init acpi_numa_arch_fixup(void) | |||
636 | * success: return IRQ number (>=0) | 636 | * success: return IRQ number (>=0) |
637 | * failure: return < 0 | 637 | * failure: return < 0 |
638 | */ | 638 | */ |
639 | int acpi_register_gsi(u32 gsi, int triggering, int polarity) | 639 | int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity) |
640 | { | 640 | { |
641 | if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) | 641 | if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) |
642 | return gsi; | 642 | return gsi; |
@@ -678,7 +678,8 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) | |||
678 | 678 | ||
679 | fadt = (struct acpi_table_fadt *)fadt_header; | 679 | fadt = (struct acpi_table_fadt *)fadt_header; |
680 | 680 | ||
681 | acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); | 681 | acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, |
682 | ACPI_ACTIVE_LOW); | ||
682 | return 0; | 683 | return 0; |
683 | } | 684 | } |
684 | 685 | ||
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 166e0d839fa0..f92cef47bf86 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -329,7 +329,7 @@ unmask_irq (unsigned int irq) | |||
329 | } | 329 | } |
330 | 330 | ||
331 | 331 | ||
332 | static void | 332 | static int |
333 | iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) | 333 | iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) |
334 | { | 334 | { |
335 | #ifdef CONFIG_SMP | 335 | #ifdef CONFIG_SMP |
@@ -343,15 +343,15 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
343 | 343 | ||
344 | cpu = cpumask_first_and(cpu_online_mask, mask); | 344 | cpu = cpumask_first_and(cpu_online_mask, mask); |
345 | if (cpu >= nr_cpu_ids) | 345 | if (cpu >= nr_cpu_ids) |
346 | return; | 346 | return -1; |
347 | 347 | ||
348 | if (irq_prepare_move(irq, cpu)) | 348 | if (irq_prepare_move(irq, cpu)) |
349 | return; | 349 | return -1; |
350 | 350 | ||
351 | dest = cpu_physical_id(cpu); | 351 | dest = cpu_physical_id(cpu); |
352 | 352 | ||
353 | if (!iosapic_intr_info[irq].count) | 353 | if (!iosapic_intr_info[irq].count) |
354 | return; /* not an IOSAPIC interrupt */ | 354 | return -1; /* not an IOSAPIC interrupt */ |
355 | 355 | ||
356 | set_irq_affinity_info(irq, dest, redir); | 356 | set_irq_affinity_info(irq, dest, redir); |
357 | 357 | ||
@@ -376,7 +376,9 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
376 | iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32); | 376 | iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32); |
377 | iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32); | 377 | iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32); |
378 | } | 378 | } |
379 | |||
379 | #endif | 380 | #endif |
381 | return 0; | ||
380 | } | 382 | } |
381 | 383 | ||
382 | /* | 384 | /* |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 2b15e233f7fe..0f8ade9331ba 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -12,7 +12,7 @@ | |||
12 | static struct irq_chip ia64_msi_chip; | 12 | static struct irq_chip ia64_msi_chip; |
13 | 13 | ||
14 | #ifdef CONFIG_SMP | 14 | #ifdef CONFIG_SMP |
15 | static void ia64_set_msi_irq_affinity(unsigned int irq, | 15 | static int ia64_set_msi_irq_affinity(unsigned int irq, |
16 | const cpumask_t *cpu_mask) | 16 | const cpumask_t *cpu_mask) |
17 | { | 17 | { |
18 | struct msi_msg msg; | 18 | struct msi_msg msg; |
@@ -20,10 +20,10 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, | |||
20 | int cpu = first_cpu(*cpu_mask); | 20 | int cpu = first_cpu(*cpu_mask); |
21 | 21 | ||
22 | if (!cpu_online(cpu)) | 22 | if (!cpu_online(cpu)) |
23 | return; | 23 | return -1; |
24 | 24 | ||
25 | if (irq_prepare_move(irq, cpu)) | 25 | if (irq_prepare_move(irq, cpu)) |
26 | return; | 26 | return -1; |
27 | 27 | ||
28 | read_msi_msg(irq, &msg); | 28 | read_msi_msg(irq, &msg); |
29 | 29 | ||
@@ -39,6 +39,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, | |||
39 | 39 | ||
40 | write_msi_msg(irq, &msg); | 40 | write_msi_msg(irq, &msg); |
41 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); | 41 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); |
42 | |||
43 | return 0; | ||
42 | } | 44 | } |
43 | #endif /* CONFIG_SMP */ | 45 | #endif /* CONFIG_SMP */ |
44 | 46 | ||
@@ -130,17 +132,17 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
130 | 132 | ||
131 | #ifdef CONFIG_DMAR | 133 | #ifdef CONFIG_DMAR |
132 | #ifdef CONFIG_SMP | 134 | #ifdef CONFIG_SMP |
133 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | 135 | static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
134 | { | 136 | { |
135 | struct irq_cfg *cfg = irq_cfg + irq; | 137 | struct irq_cfg *cfg = irq_cfg + irq; |
136 | struct msi_msg msg; | 138 | struct msi_msg msg; |
137 | int cpu = cpumask_first(mask); | 139 | int cpu = cpumask_first(mask); |
138 | 140 | ||
139 | if (!cpu_online(cpu)) | 141 | if (!cpu_online(cpu)) |
140 | return; | 142 | return -1; |
141 | 143 | ||
142 | if (irq_prepare_move(irq, cpu)) | 144 | if (irq_prepare_move(irq, cpu)) |
143 | return; | 145 | return -1; |
144 | 146 | ||
145 | dmar_msi_read(irq, &msg); | 147 | dmar_msi_read(irq, &msg); |
146 | 148 | ||
@@ -151,6 +153,8 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
151 | 153 | ||
152 | dmar_msi_write(irq, &msg); | 154 | dmar_msi_write(irq, &msg); |
153 | cpumask_copy(irq_desc[irq].affinity, mask); | 155 | cpumask_copy(irq_desc[irq].affinity, mask); |
156 | |||
157 | return 0; | ||
154 | } | 158 | } |
155 | #endif /* CONFIG_SMP */ | 159 | #endif /* CONFIG_SMP */ |
156 | 160 | ||
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 66fd705e82c0..764f26abac05 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -227,7 +227,7 @@ finish_up: | |||
227 | return new_irq_info; | 227 | return new_irq_info; |
228 | } | 228 | } |
229 | 229 | ||
230 | static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) | 230 | static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) |
231 | { | 231 | { |
232 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; | 232 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
233 | nasid_t nasid; | 233 | nasid_t nasid; |
@@ -239,6 +239,8 @@ static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) | |||
239 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, | 239 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, |
240 | sn_irq_lh[irq], list) | 240 | sn_irq_lh[irq], list) |
241 | (void)sn_retarget_vector(sn_irq_info, nasid, slice); | 241 | (void)sn_retarget_vector(sn_irq_info, nasid, slice); |
242 | |||
243 | return 0; | ||
242 | } | 244 | } |
243 | 245 | ||
244 | #ifdef CONFIG_SMP | 246 | #ifdef CONFIG_SMP |
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index 81e428943d73..fbbfb9701201 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c | |||
@@ -151,7 +151,7 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) | |||
151 | } | 151 | } |
152 | 152 | ||
153 | #ifdef CONFIG_SMP | 153 | #ifdef CONFIG_SMP |
154 | static void sn_set_msi_irq_affinity(unsigned int irq, | 154 | static int sn_set_msi_irq_affinity(unsigned int irq, |
155 | const struct cpumask *cpu_mask) | 155 | const struct cpumask *cpu_mask) |
156 | { | 156 | { |
157 | struct msi_msg msg; | 157 | struct msi_msg msg; |
@@ -168,7 +168,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, | |||
168 | cpu = cpumask_first(cpu_mask); | 168 | cpu = cpumask_first(cpu_mask); |
169 | sn_irq_info = sn_msi_info[irq].sn_irq_info; | 169 | sn_irq_info = sn_msi_info[irq].sn_irq_info; |
170 | if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) | 170 | if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) |
171 | return; | 171 | return -1; |
172 | 172 | ||
173 | /* | 173 | /* |
174 | * Release XIO resources for the old MSI PCI address | 174 | * Release XIO resources for the old MSI PCI address |
@@ -189,7 +189,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, | |||
189 | new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice); | 189 | new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice); |
190 | sn_msi_info[irq].sn_irq_info = new_irq_info; | 190 | sn_msi_info[irq].sn_irq_info = new_irq_info; |
191 | if (new_irq_info == NULL) | 191 | if (new_irq_info == NULL) |
192 | return; | 192 | return -1; |
193 | 193 | ||
194 | /* | 194 | /* |
195 | * Map the xio address into bus space | 195 | * Map the xio address into bus space |
@@ -206,6 +206,8 @@ static void sn_set_msi_irq_affinity(unsigned int irq, | |||
206 | 206 | ||
207 | write_msi_msg(irq, &msg); | 207 | write_msi_msg(irq, &msg); |
208 | cpumask_copy(irq_desc[irq].affinity, cpu_mask); | 208 | cpumask_copy(irq_desc[irq].affinity, cpu_mask); |
209 | |||
210 | return 0; | ||
209 | } | 211 | } |
210 | #endif /* CONFIG_SMP */ | 212 | #endif /* CONFIG_SMP */ |
211 | 213 | ||
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 1c19af8daa62..d3a0c8154bec 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -177,7 +177,7 @@ static void octeon_irq_ciu0_disable(unsigned int irq) | |||
177 | } | 177 | } |
178 | 178 | ||
179 | #ifdef CONFIG_SMP | 179 | #ifdef CONFIG_SMP |
180 | static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) | 180 | static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) |
181 | { | 181 | { |
182 | int cpu; | 182 | int cpu; |
183 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 183 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
@@ -199,6 +199,8 @@ static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask | |||
199 | */ | 199 | */ |
200 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); | 200 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); |
201 | write_unlock(&octeon_irq_ciu0_rwlock); | 201 | write_unlock(&octeon_irq_ciu0_rwlock); |
202 | |||
203 | return 0; | ||
202 | } | 204 | } |
203 | #endif | 205 | #endif |
204 | 206 | ||
@@ -292,7 +294,7 @@ static void octeon_irq_ciu1_disable(unsigned int irq) | |||
292 | } | 294 | } |
293 | 295 | ||
294 | #ifdef CONFIG_SMP | 296 | #ifdef CONFIG_SMP |
295 | static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest) | 297 | static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest) |
296 | { | 298 | { |
297 | int cpu; | 299 | int cpu; |
298 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 300 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
@@ -315,6 +317,8 @@ static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask | |||
315 | */ | 317 | */ |
316 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); | 318 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); |
317 | write_unlock(&octeon_irq_ciu1_rwlock); | 319 | write_unlock(&octeon_irq_ciu1_rwlock); |
320 | |||
321 | return 0; | ||
318 | } | 322 | } |
319 | #endif | 323 | #endif |
320 | 324 | ||
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index 3214ade02d10..4f1eed107b08 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h | |||
@@ -49,7 +49,7 @@ static inline void smtc_im_ack_irq(unsigned int irq) | |||
49 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | 49 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
50 | #include <linux/cpumask.h> | 50 | #include <linux/cpumask.h> |
51 | 51 | ||
52 | extern void plat_set_irq_affinity(unsigned int irq, | 52 | extern int plat_set_irq_affinity(unsigned int irq, |
53 | const struct cpumask *affinity); | 53 | const struct cpumask *affinity); |
54 | extern void smtc_forward_irq(unsigned int irq); | 54 | extern void smtc_forward_irq(unsigned int irq); |
55 | 55 | ||
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 87deb8f6c458..3f43c2e3aa5a 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
@@ -155,7 +155,7 @@ static void gic_unmask_irq(unsigned int irq) | |||
155 | 155 | ||
156 | static DEFINE_SPINLOCK(gic_lock); | 156 | static DEFINE_SPINLOCK(gic_lock); |
157 | 157 | ||
158 | static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | 158 | static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
159 | { | 159 | { |
160 | cpumask_t tmp = CPU_MASK_NONE; | 160 | cpumask_t tmp = CPU_MASK_NONE; |
161 | unsigned long flags; | 161 | unsigned long flags; |
@@ -166,7 +166,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
166 | 166 | ||
167 | cpumask_and(&tmp, cpumask, cpu_online_mask); | 167 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
168 | if (cpus_empty(tmp)) | 168 | if (cpus_empty(tmp)) |
169 | return; | 169 | return -1; |
170 | 170 | ||
171 | /* Assumption : cpumask refers to a single CPU */ | 171 | /* Assumption : cpumask refers to a single CPU */ |
172 | spin_lock_irqsave(&gic_lock, flags); | 172 | spin_lock_irqsave(&gic_lock, flags); |
@@ -190,6 +190,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
190 | cpumask_copy(irq_desc[irq].affinity, cpumask); | 190 | cpumask_copy(irq_desc[irq].affinity, cpumask); |
191 | spin_unlock_irqrestore(&gic_lock, flags); | 191 | spin_unlock_irqrestore(&gic_lock, flags); |
192 | 192 | ||
193 | return 0; | ||
193 | } | 194 | } |
194 | #endif | 195 | #endif |
195 | 196 | ||
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index 5ba31888fefb..499ffe5475df 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c | |||
@@ -114,7 +114,7 @@ struct plat_smp_ops msmtc_smp_ops = { | |||
114 | */ | 114 | */ |
115 | 115 | ||
116 | 116 | ||
117 | void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) | 117 | int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) |
118 | { | 118 | { |
119 | cpumask_t tmask; | 119 | cpumask_t tmask; |
120 | int cpu = 0; | 120 | int cpu = 0; |
@@ -156,5 +156,7 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) | |||
156 | 156 | ||
157 | /* Do any generic SMTC IRQ affinity setup */ | 157 | /* Do any generic SMTC IRQ affinity setup */ |
158 | smtc_set_irq_affinity(irq, tmask); | 158 | smtc_set_irq_affinity(irq, tmask); |
159 | |||
160 | return 0; | ||
159 | } | 161 | } |
160 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | 162 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c index c147c4b35d3f..690de06bde90 100644 --- a/arch/mips/sibyte/bcm1480/irq.c +++ b/arch/mips/sibyte/bcm1480/irq.c | |||
@@ -50,7 +50,7 @@ static void enable_bcm1480_irq(unsigned int irq); | |||
50 | static void disable_bcm1480_irq(unsigned int irq); | 50 | static void disable_bcm1480_irq(unsigned int irq); |
51 | static void ack_bcm1480_irq(unsigned int irq); | 51 | static void ack_bcm1480_irq(unsigned int irq); |
52 | #ifdef CONFIG_SMP | 52 | #ifdef CONFIG_SMP |
53 | static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask); | 53 | static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask); |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef CONFIG_PCI | 56 | #ifdef CONFIG_PCI |
@@ -109,7 +109,7 @@ void bcm1480_unmask_irq(int cpu, int irq) | |||
109 | } | 109 | } |
110 | 110 | ||
111 | #ifdef CONFIG_SMP | 111 | #ifdef CONFIG_SMP |
112 | static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) | 112 | static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) |
113 | { | 113 | { |
114 | int i = 0, old_cpu, cpu, int_on, k; | 114 | int i = 0, old_cpu, cpu, int_on, k; |
115 | u64 cur_ints; | 115 | u64 cur_ints; |
@@ -118,7 +118,7 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
118 | 118 | ||
119 | if (cpumask_weight(mask) != 1) { | 119 | if (cpumask_weight(mask) != 1) { |
120 | printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); | 120 | printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); |
121 | return; | 121 | return -1; |
122 | } | 122 | } |
123 | i = cpumask_first(mask); | 123 | i = cpumask_first(mask); |
124 | 124 | ||
@@ -152,6 +152,8 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
152 | } | 152 | } |
153 | } | 153 | } |
154 | spin_unlock_irqrestore(&bcm1480_imr_lock, flags); | 154 | spin_unlock_irqrestore(&bcm1480_imr_lock, flags); |
155 | |||
156 | return 0; | ||
155 | } | 157 | } |
156 | #endif | 158 | #endif |
157 | 159 | ||
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c index 38cb998ade22..409dec798863 100644 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c | |||
@@ -50,7 +50,7 @@ static void enable_sb1250_irq(unsigned int irq); | |||
50 | static void disable_sb1250_irq(unsigned int irq); | 50 | static void disable_sb1250_irq(unsigned int irq); |
51 | static void ack_sb1250_irq(unsigned int irq); | 51 | static void ack_sb1250_irq(unsigned int irq); |
52 | #ifdef CONFIG_SMP | 52 | #ifdef CONFIG_SMP |
53 | static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask); | 53 | static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask); |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef CONFIG_SIBYTE_HAS_LDT | 56 | #ifdef CONFIG_SIBYTE_HAS_LDT |
@@ -103,7 +103,7 @@ void sb1250_unmask_irq(int cpu, int irq) | |||
103 | } | 103 | } |
104 | 104 | ||
105 | #ifdef CONFIG_SMP | 105 | #ifdef CONFIG_SMP |
106 | static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) | 106 | static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) |
107 | { | 107 | { |
108 | int i = 0, old_cpu, cpu, int_on; | 108 | int i = 0, old_cpu, cpu, int_on; |
109 | u64 cur_ints; | 109 | u64 cur_ints; |
@@ -113,7 +113,7 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
113 | 113 | ||
114 | if (cpumask_weight(mask) > 1) { | 114 | if (cpumask_weight(mask) > 1) { |
115 | printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); | 115 | printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); |
116 | return; | 116 | return -1; |
117 | } | 117 | } |
118 | 118 | ||
119 | /* Convert logical CPU to physical CPU */ | 119 | /* Convert logical CPU to physical CPU */ |
@@ -143,6 +143,8 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
143 | R_IMR_INTERRUPT_MASK)); | 143 | R_IMR_INTERRUPT_MASK)); |
144 | } | 144 | } |
145 | spin_unlock_irqrestore(&sb1250_imr_lock, flags); | 145 | spin_unlock_irqrestore(&sb1250_imr_lock, flags); |
146 | |||
147 | return 0; | ||
146 | } | 148 | } |
147 | #endif | 149 | #endif |
148 | 150 | ||
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 4ea4229d765c..8007f1e65729 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -130,15 +130,17 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) | |||
130 | return cpu_dest; | 130 | return cpu_dest; |
131 | } | 131 | } |
132 | 132 | ||
133 | static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) | 133 | static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) |
134 | { | 134 | { |
135 | int cpu_dest; | 135 | int cpu_dest; |
136 | 136 | ||
137 | cpu_dest = cpu_check_affinity(irq, dest); | 137 | cpu_dest = cpu_check_affinity(irq, dest); |
138 | if (cpu_dest < 0) | 138 | if (cpu_dest < 0) |
139 | return; | 139 | return -1; |
140 | 140 | ||
141 | cpumask_copy(&irq_desc[irq].affinity, dest); | 141 | cpumask_copy(&irq_desc[irq].affinity, dest); |
142 | |||
143 | return 0; | ||
142 | } | 144 | } |
143 | #endif | 145 | #endif |
144 | 146 | ||
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 80b513449f4c..be3581a8c294 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -333,7 +333,7 @@ static void xics_eoi_lpar(unsigned int virq) | |||
333 | lpar_xirr_info_set((0xff << 24) | irq); | 333 | lpar_xirr_info_set((0xff << 24) | irq); |
334 | } | 334 | } |
335 | 335 | ||
336 | static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) | 336 | static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) |
337 | { | 337 | { |
338 | unsigned int irq; | 338 | unsigned int irq; |
339 | int status; | 339 | int status; |
@@ -342,14 +342,14 @@ static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) | |||
342 | 342 | ||
343 | irq = (unsigned int)irq_map[virq].hwirq; | 343 | irq = (unsigned int)irq_map[virq].hwirq; |
344 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | 344 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) |
345 | return; | 345 | return -1; |
346 | 346 | ||
347 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | 347 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); |
348 | 348 | ||
349 | if (status) { | 349 | if (status) { |
350 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | 350 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", |
351 | __func__, irq, status); | 351 | __func__, irq, status); |
352 | return; | 352 | return -1; |
353 | } | 353 | } |
354 | 354 | ||
355 | /* | 355 | /* |
@@ -363,7 +363,7 @@ static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) | |||
363 | printk(KERN_WARNING | 363 | printk(KERN_WARNING |
364 | "%s: No online cpus in the mask %s for irq %d\n", | 364 | "%s: No online cpus in the mask %s for irq %d\n", |
365 | __func__, cpulist, virq); | 365 | __func__, cpulist, virq); |
366 | return; | 366 | return -1; |
367 | } | 367 | } |
368 | 368 | ||
369 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | 369 | status = rtas_call(ibm_set_xive, 3, 1, NULL, |
@@ -372,8 +372,10 @@ static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) | |||
372 | if (status) { | 372 | if (status) { |
373 | printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", | 373 | printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", |
374 | __func__, irq, status); | 374 | __func__, irq, status); |
375 | return; | 375 | return -1; |
376 | } | 376 | } |
377 | |||
378 | return 0; | ||
377 | } | 379 | } |
378 | 380 | ||
379 | static struct irq_chip xics_pic_direct = { | 381 | static struct irq_chip xics_pic_direct = { |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 0efc12d1a3d7..352d8c3ef526 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -807,7 +807,7 @@ static void mpic_end_ipi(unsigned int irq) | |||
807 | 807 | ||
808 | #endif /* CONFIG_SMP */ | 808 | #endif /* CONFIG_SMP */ |
809 | 809 | ||
810 | void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | 810 | int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
811 | { | 811 | { |
812 | struct mpic *mpic = mpic_from_irq(irq); | 812 | struct mpic *mpic = mpic_from_irq(irq); |
813 | unsigned int src = mpic_irq_to_hw(irq); | 813 | unsigned int src = mpic_irq_to_hw(irq); |
@@ -824,6 +824,8 @@ void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
824 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), | 824 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), |
825 | mpic_physmask(cpus_addr(tmp)[0])); | 825 | mpic_physmask(cpus_addr(tmp)[0])); |
826 | } | 826 | } |
827 | |||
828 | return 0; | ||
827 | } | 829 | } |
828 | 830 | ||
829 | static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) | 831 | static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) |
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h index 3cef2af10f42..eff433c322a0 100644 --- a/arch/powerpc/sysdev/mpic.h +++ b/arch/powerpc/sysdev/mpic.h | |||
@@ -36,6 +36,6 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic) | |||
36 | 36 | ||
37 | extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); | 37 | extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); |
38 | extern void mpic_set_vector(unsigned int virq, unsigned int vector); | 38 | extern void mpic_set_vector(unsigned int virq, unsigned int vector); |
39 | extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask); | 39 | extern int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
40 | 40 | ||
41 | #endif /* _POWERPC_SYSDEV_MPIC_H */ | 41 | #endif /* _POWERPC_SYSDEV_MPIC_H */ |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 5deabe921a47..e5e78f9cfc95 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -318,10 +318,12 @@ static void sun4u_irq_enable(unsigned int virt_irq) | |||
318 | } | 318 | } |
319 | } | 319 | } |
320 | 320 | ||
321 | static void sun4u_set_affinity(unsigned int virt_irq, | 321 | static int sun4u_set_affinity(unsigned int virt_irq, |
322 | const struct cpumask *mask) | 322 | const struct cpumask *mask) |
323 | { | 323 | { |
324 | sun4u_irq_enable(virt_irq); | 324 | sun4u_irq_enable(virt_irq); |
325 | |||
326 | return 0; | ||
325 | } | 327 | } |
326 | 328 | ||
327 | /* Don't do anything. The desc->status check for IRQ_DISABLED in | 329 | /* Don't do anything. The desc->status check for IRQ_DISABLED in |
@@ -377,7 +379,7 @@ static void sun4v_irq_enable(unsigned int virt_irq) | |||
377 | ino, err); | 379 | ino, err); |
378 | } | 380 | } |
379 | 381 | ||
380 | static void sun4v_set_affinity(unsigned int virt_irq, | 382 | static int sun4v_set_affinity(unsigned int virt_irq, |
381 | const struct cpumask *mask) | 383 | const struct cpumask *mask) |
382 | { | 384 | { |
383 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | 385 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; |
@@ -388,6 +390,8 @@ static void sun4v_set_affinity(unsigned int virt_irq, | |||
388 | if (err != HV_EOK) | 390 | if (err != HV_EOK) |
389 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | 391 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " |
390 | "err(%d)\n", ino, cpuid, err); | 392 | "err(%d)\n", ino, cpuid, err); |
393 | |||
394 | return 0; | ||
391 | } | 395 | } |
392 | 396 | ||
393 | static void sun4v_irq_disable(unsigned int virt_irq) | 397 | static void sun4v_irq_disable(unsigned int virt_irq) |
@@ -445,7 +449,7 @@ static void sun4v_virq_enable(unsigned int virt_irq) | |||
445 | dev_handle, dev_ino, err); | 449 | dev_handle, dev_ino, err); |
446 | } | 450 | } |
447 | 451 | ||
448 | static void sun4v_virt_set_affinity(unsigned int virt_irq, | 452 | static int sun4v_virt_set_affinity(unsigned int virt_irq, |
449 | const struct cpumask *mask) | 453 | const struct cpumask *mask) |
450 | { | 454 | { |
451 | unsigned long cpuid, dev_handle, dev_ino; | 455 | unsigned long cpuid, dev_handle, dev_ino; |
@@ -461,6 +465,8 @@ static void sun4v_virt_set_affinity(unsigned int virt_irq, | |||
461 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | 465 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " |
462 | "err(%d)\n", | 466 | "err(%d)\n", |
463 | dev_handle, dev_ino, cpuid, err); | 467 | dev_handle, dev_ino, cpuid, err); |
468 | |||
469 | return 0; | ||
464 | } | 470 | } |
465 | 471 | ||
466 | static void sun4v_virq_disable(unsigned int virt_irq) | 472 | static void sun4v_virq_disable(unsigned int virt_irq) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a6efe0a2e9ae..b1d3f60525c0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -274,15 +274,9 @@ config SPARSE_IRQ | |||
274 | 274 | ||
275 | If you don't know what to do here, say N. | 275 | If you don't know what to do here, say N. |
276 | 276 | ||
277 | config NUMA_MIGRATE_IRQ_DESC | 277 | config NUMA_IRQ_DESC |
278 | bool "Move irq desc when changing irq smp_affinity" | 278 | def_bool y |
279 | depends on SPARSE_IRQ && NUMA | 279 | depends on SPARSE_IRQ && NUMA |
280 | depends on BROKEN | ||
281 | default n | ||
282 | ---help--- | ||
283 | This enables moving irq_desc to cpu/node that irq will use handled. | ||
284 | |||
285 | If you don't know what to do here, say N. | ||
286 | 280 | ||
287 | config X86_MPPARSE | 281 | config X86_MPPARSE |
288 | bool "Enable MPS table" if ACPI | 282 | bool "Enable MPS table" if ACPI |
@@ -355,7 +349,7 @@ config X86_UV | |||
355 | depends on X86_64 | 349 | depends on X86_64 |
356 | depends on X86_EXTENDED_PLATFORM | 350 | depends on X86_EXTENDED_PLATFORM |
357 | depends on NUMA | 351 | depends on NUMA |
358 | select X86_X2APIC | 352 | depends on X86_X2APIC |
359 | ---help--- | 353 | ---help--- |
360 | This option is needed in order to support SGI Ultraviolet systems. | 354 | This option is needed in order to support SGI Ultraviolet systems. |
361 | If you don't have one of these, you should say N here. | 355 | If you don't have one of these, you should say N here. |
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 9fe5d212ab4c..27b8ce0f5908 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
@@ -195,7 +195,6 @@ CONFIG_HIGH_RES_TIMERS=y | |||
195 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | 195 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y |
196 | CONFIG_SMP=y | 196 | CONFIG_SMP=y |
197 | CONFIG_SPARSE_IRQ=y | 197 | CONFIG_SPARSE_IRQ=y |
198 | # CONFIG_NUMA_MIGRATE_IRQ_DESC is not set | ||
199 | CONFIG_X86_FIND_SMP_CONFIG=y | 198 | CONFIG_X86_FIND_SMP_CONFIG=y |
200 | CONFIG_X86_MPPARSE=y | 199 | CONFIG_X86_MPPARSE=y |
201 | # CONFIG_X86_ELAN is not set | 200 | # CONFIG_X86_ELAN is not set |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 42f2f8377422..3738438a91f5 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -107,8 +107,7 @@ extern u32 native_safe_apic_wait_icr_idle(void); | |||
107 | extern void native_apic_icr_write(u32 low, u32 id); | 107 | extern void native_apic_icr_write(u32 low, u32 id); |
108 | extern u64 native_apic_icr_read(void); | 108 | extern u64 native_apic_icr_read(void); |
109 | 109 | ||
110 | #define EIM_8BIT_APIC_ID 0 | 110 | extern int x2apic_mode; |
111 | #define EIM_32BIT_APIC_ID 1 | ||
112 | 111 | ||
113 | #ifdef CONFIG_X86_X2APIC | 112 | #ifdef CONFIG_X86_X2APIC |
114 | /* | 113 | /* |
@@ -166,10 +165,9 @@ static inline u64 native_x2apic_icr_read(void) | |||
166 | return val; | 165 | return val; |
167 | } | 166 | } |
168 | 167 | ||
169 | extern int x2apic, x2apic_phys; | 168 | extern int x2apic_phys; |
170 | extern void check_x2apic(void); | 169 | extern void check_x2apic(void); |
171 | extern void enable_x2apic(void); | 170 | extern void enable_x2apic(void); |
172 | extern void enable_IR_x2apic(void); | ||
173 | extern void x2apic_icr_write(u32 low, u32 id); | 171 | extern void x2apic_icr_write(u32 low, u32 id); |
174 | static inline int x2apic_enabled(void) | 172 | static inline int x2apic_enabled(void) |
175 | { | 173 | { |
@@ -183,6 +181,8 @@ static inline int x2apic_enabled(void) | |||
183 | return 1; | 181 | return 1; |
184 | return 0; | 182 | return 0; |
185 | } | 183 | } |
184 | |||
185 | #define x2apic_supported() (cpu_has_x2apic) | ||
186 | #else | 186 | #else |
187 | static inline void check_x2apic(void) | 187 | static inline void check_x2apic(void) |
188 | { | 188 | { |
@@ -190,28 +190,20 @@ static inline void check_x2apic(void) | |||
190 | static inline void enable_x2apic(void) | 190 | static inline void enable_x2apic(void) |
191 | { | 191 | { |
192 | } | 192 | } |
193 | static inline void enable_IR_x2apic(void) | ||
194 | { | ||
195 | } | ||
196 | static inline int x2apic_enabled(void) | 193 | static inline int x2apic_enabled(void) |
197 | { | 194 | { |
198 | return 0; | 195 | return 0; |
199 | } | 196 | } |
200 | 197 | ||
201 | #define x2apic 0 | 198 | #define x2apic_preenabled 0 |
202 | 199 | #define x2apic_supported() 0 | |
203 | #endif | 200 | #endif |
204 | 201 | ||
205 | extern int get_physical_broadcast(void); | 202 | extern void enable_IR_x2apic(void); |
206 | 203 | ||
207 | #ifdef CONFIG_X86_X2APIC | 204 | extern int get_physical_broadcast(void); |
208 | static inline void ack_x2APIC_irq(void) | ||
209 | { | ||
210 | /* Docs say use 0 for future compatibility */ | ||
211 | native_apic_msr_write(APIC_EOI, 0); | ||
212 | } | ||
213 | #endif | ||
214 | 205 | ||
206 | extern void apic_disable(void); | ||
215 | extern int lapic_get_maxlvt(void); | 207 | extern int lapic_get_maxlvt(void); |
216 | extern void clear_local_APIC(void); | 208 | extern void clear_local_APIC(void); |
217 | extern void connect_bsp_APIC(void); | 209 | extern void connect_bsp_APIC(void); |
@@ -252,7 +244,7 @@ static inline void lapic_shutdown(void) { } | |||
252 | #define local_apic_timer_c2_ok 1 | 244 | #define local_apic_timer_c2_ok 1 |
253 | static inline void init_apic_mappings(void) { } | 245 | static inline void init_apic_mappings(void) { } |
254 | static inline void disable_local_APIC(void) { } | 246 | static inline void disable_local_APIC(void) { } |
255 | 247 | static inline void apic_disable(void) { } | |
256 | #endif /* !CONFIG_X86_LOCAL_APIC */ | 248 | #endif /* !CONFIG_X86_LOCAL_APIC */ |
257 | 249 | ||
258 | #ifdef CONFIG_X86_64 | 250 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index bc9514fb3b13..7ddb36ab933b 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -22,6 +22,7 @@ | |||
22 | # define APIC_INTEGRATED(x) (1) | 22 | # define APIC_INTEGRATED(x) (1) |
23 | #endif | 23 | #endif |
24 | #define APIC_XAPIC(x) ((x) >= 0x14) | 24 | #define APIC_XAPIC(x) ((x) >= 0x14) |
25 | #define APIC_EXT_SPACE(x) ((x) & 0x80000000) | ||
25 | #define APIC_TASKPRI 0x80 | 26 | #define APIC_TASKPRI 0x80 |
26 | #define APIC_TPRI_MASK 0xFFu | 27 | #define APIC_TPRI_MASK 0xFFu |
27 | #define APIC_ARBPRI 0x90 | 28 | #define APIC_ARBPRI 0x90 |
@@ -116,7 +117,9 @@ | |||
116 | #define APIC_TDR_DIV_32 0x8 | 117 | #define APIC_TDR_DIV_32 0x8 |
117 | #define APIC_TDR_DIV_64 0x9 | 118 | #define APIC_TDR_DIV_64 0x9 |
118 | #define APIC_TDR_DIV_128 0xA | 119 | #define APIC_TDR_DIV_128 0xA |
119 | #define APIC_EILVT0 0x500 | 120 | #define APIC_EFEAT 0x400 |
121 | #define APIC_ECTRL 0x410 | ||
122 | #define APIC_EILVTn(n) (0x500 + 0x10 * n) | ||
120 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ | 123 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ |
121 | #define APIC_EILVT_NR_AMD_10H 4 | 124 | #define APIC_EILVT_NR_AMD_10H 4 |
122 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) | 125 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) |
@@ -125,9 +128,6 @@ | |||
125 | #define APIC_EILVT_MSG_NMI 0x4 | 128 | #define APIC_EILVT_MSG_NMI 0x4 |
126 | #define APIC_EILVT_MSG_EXT 0x7 | 129 | #define APIC_EILVT_MSG_EXT 0x7 |
127 | #define APIC_EILVT_MASKED (1 << 16) | 130 | #define APIC_EILVT_MASKED (1 << 16) |
128 | #define APIC_EILVT1 0x510 | ||
129 | #define APIC_EILVT2 0x520 | ||
130 | #define APIC_EILVT3 0x530 | ||
131 | 131 | ||
132 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) | 132 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) |
133 | #define APIC_BASE_MSR 0x800 | 133 | #define APIC_BASE_MSR 0x800 |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index bb83b1c397aa..13cc6a503a02 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -22,7 +22,7 @@ | |||
22 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ | 22 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ |
23 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */ | 23 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */ |
24 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ | 24 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ |
25 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ | 25 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Exception */ |
26 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ | 26 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ |
27 | #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ | 27 | #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ |
28 | #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ | 28 | #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ |
@@ -192,11 +192,11 @@ extern const char * const x86_power_flags[32]; | |||
192 | #define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) | 192 | #define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) |
193 | #define setup_clear_cpu_cap(bit) do { \ | 193 | #define setup_clear_cpu_cap(bit) do { \ |
194 | clear_cpu_cap(&boot_cpu_data, bit); \ | 194 | clear_cpu_cap(&boot_cpu_data, bit); \ |
195 | set_bit(bit, (unsigned long *)cleared_cpu_caps); \ | 195 | set_bit(bit, (unsigned long *)cpu_caps_cleared); \ |
196 | } while (0) | 196 | } while (0) |
197 | #define setup_force_cpu_cap(bit) do { \ | 197 | #define setup_force_cpu_cap(bit) do { \ |
198 | set_cpu_cap(&boot_cpu_data, bit); \ | 198 | set_cpu_cap(&boot_cpu_data, bit); \ |
199 | clear_bit(bit, (unsigned long *)cleared_cpu_caps); \ | 199 | set_bit(bit, (unsigned long *)cpu_caps_set); \ |
200 | } while (0) | 200 | } while (0) |
201 | 201 | ||
202 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) | 202 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index b762ea49bd70..a7d14bbae110 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -63,7 +63,26 @@ extern unsigned long io_apic_irqs; | |||
63 | extern void init_VISWS_APIC_irqs(void); | 63 | extern void init_VISWS_APIC_irqs(void); |
64 | extern void setup_IO_APIC(void); | 64 | extern void setup_IO_APIC(void); |
65 | extern void disable_IO_APIC(void); | 65 | extern void disable_IO_APIC(void); |
66 | extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); | 66 | |
67 | struct io_apic_irq_attr { | ||
68 | int ioapic; | ||
69 | int ioapic_pin; | ||
70 | int trigger; | ||
71 | int polarity; | ||
72 | }; | ||
73 | |||
74 | static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | ||
75 | int ioapic, int ioapic_pin, | ||
76 | int trigger, int polarity) | ||
77 | { | ||
78 | irq_attr->ioapic = ioapic; | ||
79 | irq_attr->ioapic_pin = ioapic_pin; | ||
80 | irq_attr->trigger = trigger; | ||
81 | irq_attr->polarity = polarity; | ||
82 | } | ||
83 | |||
84 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, | ||
85 | struct io_apic_irq_attr *irq_attr); | ||
67 | extern void setup_ioapic_dest(void); | 86 | extern void setup_ioapic_dest(void); |
68 | 87 | ||
69 | extern void enable_IO_APIC(void); | 88 | extern void enable_IO_APIC(void); |
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 1a99e6c092af..58d7091eeb1f 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h | |||
@@ -60,8 +60,4 @@ extern struct irq_chip i8259A_chip; | |||
60 | extern void mask_8259A(void); | 60 | extern void mask_8259A(void); |
61 | extern void unmask_8259A(void); | 61 | extern void unmask_8259A(void); |
62 | 62 | ||
63 | #ifdef CONFIG_X86_32 | ||
64 | extern void init_ISA_irqs(void); | ||
65 | #endif | ||
66 | |||
67 | #endif /* _ASM_X86_I8259_H */ | 63 | #endif /* _ASM_X86_I8259_H */ |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 9d826e436010..daf866ed0612 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -154,22 +154,19 @@ extern int timer_through_8259; | |||
154 | extern int io_apic_get_unique_id(int ioapic, int apic_id); | 154 | extern int io_apic_get_unique_id(int ioapic, int apic_id); |
155 | extern int io_apic_get_version(int ioapic); | 155 | extern int io_apic_get_version(int ioapic); |
156 | extern int io_apic_get_redir_entries(int ioapic); | 156 | extern int io_apic_get_redir_entries(int ioapic); |
157 | extern int io_apic_set_pci_routing(int ioapic, int pin, int irq, | ||
158 | int edge_level, int active_high_low); | ||
159 | #endif /* CONFIG_ACPI */ | 157 | #endif /* CONFIG_ACPI */ |
160 | 158 | ||
159 | struct io_apic_irq_attr; | ||
160 | extern int io_apic_set_pci_routing(struct device *dev, int irq, | ||
161 | struct io_apic_irq_attr *irq_attr); | ||
161 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); | 162 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); |
162 | extern void ioapic_init_mappings(void); | 163 | extern void ioapic_init_mappings(void); |
163 | 164 | ||
164 | #ifdef CONFIG_X86_64 | ||
165 | extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); | 165 | extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); |
166 | extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); | 166 | extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); |
167 | extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); | 167 | extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); |
168 | extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); | 168 | extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); |
169 | extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); | 169 | extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); |
170 | extern void reinit_intr_remapped_IO_APIC(int intr_remapping, | ||
171 | struct IO_APIC_route_entry **ioapic_entries); | ||
172 | #endif | ||
173 | 170 | ||
174 | extern void probe_nr_irqs_gsi(void); | 171 | extern void probe_nr_irqs_gsi(void); |
175 | 172 | ||
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 0396760fccb8..f275e2244505 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h | |||
@@ -1,6 +1,6 @@ | |||
1 | #ifndef _ASM_X86_IRQ_REMAPPING_H | 1 | #ifndef _ASM_X86_IRQ_REMAPPING_H |
2 | #define _ASM_X86_IRQ_REMAPPING_H | 2 | #define _ASM_X86_IRQ_REMAPPING_H |
3 | 3 | ||
4 | #define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8) | 4 | #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) |
5 | 5 | ||
6 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ | 6 | #endif /* _ASM_X86_IRQ_REMAPPING_H */ |
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 3cbd79bbb47c..910b5a3d6751 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #ifdef CONFIG_X86_32 | 35 | #ifdef CONFIG_X86_32 |
36 | # define SYSCALL_VECTOR 0x80 | 36 | # define SYSCALL_VECTOR 0x80 |
37 | # define IA32_SYSCALL_VECTOR 0x80 | ||
37 | #else | 38 | #else |
38 | # define IA32_SYSCALL_VECTOR 0x80 | 39 | # define IA32_SYSCALL_VECTOR 0x80 |
39 | #endif | 40 | #endif |
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 642fc7fc8cdc..e2a1bb6d71ea 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -61,9 +61,11 @@ extern void get_smp_config(void); | |||
61 | #ifdef CONFIG_X86_MPPARSE | 61 | #ifdef CONFIG_X86_MPPARSE |
62 | extern void find_smp_config(void); | 62 | extern void find_smp_config(void); |
63 | extern void early_reserve_e820_mpc_new(void); | 63 | extern void early_reserve_e820_mpc_new(void); |
64 | extern int enable_update_mptable; | ||
64 | #else | 65 | #else |
65 | static inline void find_smp_config(void) { } | 66 | static inline void find_smp_config(void) { } |
66 | static inline void early_reserve_e820_mpc_new(void) { } | 67 | static inline void early_reserve_e820_mpc_new(void) { } |
68 | #define enable_update_mptable 0 | ||
67 | #endif | 69 | #endif |
68 | 70 | ||
69 | void __cpuinit generic_processor_info(int apicid, int version); | 71 | void __cpuinit generic_processor_info(int apicid, int version); |
@@ -72,20 +74,13 @@ extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); | |||
72 | extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, | 74 | extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, |
73 | u32 gsi); | 75 | u32 gsi); |
74 | extern void mp_config_acpi_legacy_irqs(void); | 76 | extern void mp_config_acpi_legacy_irqs(void); |
75 | extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); | 77 | struct device; |
78 | extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level, | ||
79 | int active_high_low); | ||
76 | extern int acpi_probe_gsi(void); | 80 | extern int acpi_probe_gsi(void); |
77 | #ifdef CONFIG_X86_IO_APIC | 81 | #ifdef CONFIG_X86_IO_APIC |
78 | extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, | ||
79 | u32 gsi, int triggering, int polarity); | ||
80 | extern int mp_find_ioapic(int gsi); | 82 | extern int mp_find_ioapic(int gsi); |
81 | extern int mp_find_ioapic_pin(int ioapic, int gsi); | 83 | extern int mp_find_ioapic_pin(int ioapic, int gsi); |
82 | #else | ||
83 | static inline int | ||
84 | mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, | ||
85 | u32 gsi, int triggering, int polarity) | ||
86 | { | ||
87 | return 0; | ||
88 | } | ||
89 | #endif | 84 | #endif |
90 | #else /* !CONFIG_ACPI: */ | 85 | #else /* !CONFIG_ACPI: */ |
91 | static inline int acpi_probe_gsi(void) | 86 | static inline int acpi_probe_gsi(void) |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c2cceae709c8..fed93fec9764 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -135,7 +135,8 @@ extern struct cpuinfo_x86 boot_cpu_data; | |||
135 | extern struct cpuinfo_x86 new_cpu_data; | 135 | extern struct cpuinfo_x86 new_cpu_data; |
136 | 136 | ||
137 | extern struct tss_struct doublefault_tss; | 137 | extern struct tss_struct doublefault_tss; |
138 | extern __u32 cleared_cpu_caps[NCAPINTS]; | 138 | extern __u32 cpu_caps_cleared[NCAPINTS]; |
139 | extern __u32 cpu_caps_set[NCAPINTS]; | ||
139 | 140 | ||
140 | #ifdef CONFIG_SMP | 141 | #ifdef CONFIG_SMP |
141 | DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | 142 | DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index bdc2ada05ae0..4093d1ed6db2 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h | |||
@@ -33,7 +33,6 @@ struct x86_quirks { | |||
33 | int (*setup_ioapic_ids)(void); | 33 | int (*setup_ioapic_ids)(void); |
34 | }; | 34 | }; |
35 | 35 | ||
36 | extern void x86_quirk_pre_intr_init(void); | ||
37 | extern void x86_quirk_intr_init(void); | 36 | extern void x86_quirk_intr_init(void); |
38 | 37 | ||
39 | extern void x86_quirk_trap_init(void); | 38 | extern void x86_quirk_trap_init(void); |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 19e0d88b966d..6a84ed166aec 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -180,7 +180,7 @@ extern int safe_smp_processor_id(void); | |||
180 | static inline int logical_smp_processor_id(void) | 180 | static inline int logical_smp_processor_id(void) |
181 | { | 181 | { |
182 | /* we don't want to mark this access volatile - bad code generation */ | 182 | /* we don't want to mark this access volatile - bad code generation */ |
183 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | 183 | return GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); |
184 | } | 184 | } |
185 | 185 | ||
186 | #endif | 186 | #endif |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 88d1bfc847d3..235f5927bb97 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -28,7 +28,7 @@ CFLAGS_paravirt.o := $(nostackp) | |||
28 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o | 28 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o |
29 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o | 29 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o |
30 | obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o | 30 | obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o |
31 | obj-y += setup.o i8259.o irqinit_$(BITS).o | 31 | obj-y += setup.o i8259.o irqinit.o |
32 | obj-$(CONFIG_X86_VISWS) += visws_quirks.o | 32 | obj-$(CONFIG_X86_VISWS) += visws_quirks.o |
33 | obj-$(CONFIG_X86_32) += probe_roms_32.o | 33 | obj-$(CONFIG_X86_32) += probe_roms_32.o |
34 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o | 34 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 723989d7f802..631086159c53 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/irq.h> | 33 | #include <linux/irq.h> |
34 | #include <linux/bootmem.h> | 34 | #include <linux/bootmem.h> |
35 | #include <linux/ioport.h> | 35 | #include <linux/ioport.h> |
36 | #include <linux/pci.h> | ||
36 | 37 | ||
37 | #include <asm/pgtable.h> | 38 | #include <asm/pgtable.h> |
38 | #include <asm/io_apic.h> | 39 | #include <asm/io_apic.h> |
@@ -522,7 +523,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) | |||
522 | * success: return IRQ number (>=0) | 523 | * success: return IRQ number (>=0) |
523 | * failure: return < 0 | 524 | * failure: return < 0 |
524 | */ | 525 | */ |
525 | int acpi_register_gsi(u32 gsi, int triggering, int polarity) | 526 | int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) |
526 | { | 527 | { |
527 | unsigned int irq; | 528 | unsigned int irq; |
528 | unsigned int plat_gsi = gsi; | 529 | unsigned int plat_gsi = gsi; |
@@ -532,14 +533,14 @@ int acpi_register_gsi(u32 gsi, int triggering, int polarity) | |||
532 | * Make sure all (legacy) PCI IRQs are set as level-triggered. | 533 | * Make sure all (legacy) PCI IRQs are set as level-triggered. |
533 | */ | 534 | */ |
534 | if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { | 535 | if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { |
535 | if (triggering == ACPI_LEVEL_SENSITIVE) | 536 | if (trigger == ACPI_LEVEL_SENSITIVE) |
536 | eisa_set_level_irq(gsi); | 537 | eisa_set_level_irq(gsi); |
537 | } | 538 | } |
538 | #endif | 539 | #endif |
539 | 540 | ||
540 | #ifdef CONFIG_X86_IO_APIC | 541 | #ifdef CONFIG_X86_IO_APIC |
541 | if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { | 542 | if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { |
542 | plat_gsi = mp_register_gsi(gsi, triggering, polarity); | 543 | plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity); |
543 | } | 544 | } |
544 | #endif | 545 | #endif |
545 | acpi_gsi_to_irq(plat_gsi, &irq); | 546 | acpi_gsi_to_irq(plat_gsi, &irq); |
@@ -903,10 +904,8 @@ extern int es7000_plat; | |||
903 | #endif | 904 | #endif |
904 | 905 | ||
905 | static struct { | 906 | static struct { |
906 | int apic_id; | ||
907 | int gsi_base; | 907 | int gsi_base; |
908 | int gsi_end; | 908 | int gsi_end; |
909 | DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); | ||
910 | } mp_ioapic_routing[MAX_IO_APICS]; | 909 | } mp_ioapic_routing[MAX_IO_APICS]; |
911 | 910 | ||
912 | int mp_find_ioapic(int gsi) | 911 | int mp_find_ioapic(int gsi) |
@@ -986,16 +985,12 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | |||
986 | 985 | ||
987 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | 986 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); |
988 | mp_ioapics[idx].apicid = uniq_ioapic_id(id); | 987 | mp_ioapics[idx].apicid = uniq_ioapic_id(id); |
989 | #ifdef CONFIG_X86_32 | ||
990 | mp_ioapics[idx].apicver = io_apic_get_version(idx); | 988 | mp_ioapics[idx].apicver = io_apic_get_version(idx); |
991 | #else | 989 | |
992 | mp_ioapics[idx].apicver = 0; | ||
993 | #endif | ||
994 | /* | 990 | /* |
995 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups | 991 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups |
996 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). | 992 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). |
997 | */ | 993 | */ |
998 | mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid; | ||
999 | mp_ioapic_routing[idx].gsi_base = gsi_base; | 994 | mp_ioapic_routing[idx].gsi_base = gsi_base; |
1000 | mp_ioapic_routing[idx].gsi_end = gsi_base + | 995 | mp_ioapic_routing[idx].gsi_end = gsi_base + |
1001 | io_apic_get_redir_entries(idx); | 996 | io_apic_get_redir_entries(idx); |
@@ -1158,26 +1153,52 @@ void __init mp_config_acpi_legacy_irqs(void) | |||
1158 | } | 1153 | } |
1159 | } | 1154 | } |
1160 | 1155 | ||
1161 | int mp_register_gsi(u32 gsi, int triggering, int polarity) | 1156 | static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, |
1157 | int polarity) | ||
1162 | { | 1158 | { |
1159 | #ifdef CONFIG_X86_MPPARSE | ||
1160 | struct mpc_intsrc mp_irq; | ||
1161 | struct pci_dev *pdev; | ||
1162 | unsigned char number; | ||
1163 | unsigned int devfn; | ||
1163 | int ioapic; | 1164 | int ioapic; |
1164 | int ioapic_pin; | 1165 | u8 pin; |
1165 | #ifdef CONFIG_X86_32 | ||
1166 | #define MAX_GSI_NUM 4096 | ||
1167 | #define IRQ_COMPRESSION_START 64 | ||
1168 | 1166 | ||
1169 | static int pci_irq = IRQ_COMPRESSION_START; | 1167 | if (!acpi_ioapic) |
1170 | /* | 1168 | return 0; |
1171 | * Mapping between Global System Interrupts, which | 1169 | if (!dev) |
1172 | * represent all possible interrupts, and IRQs | 1170 | return 0; |
1173 | * assigned to actual devices. | 1171 | if (dev->bus != &pci_bus_type) |
1174 | */ | 1172 | return 0; |
1175 | static int gsi_to_irq[MAX_GSI_NUM]; | 1173 | |
1176 | #else | 1174 | pdev = to_pci_dev(dev); |
1175 | number = pdev->bus->number; | ||
1176 | devfn = pdev->devfn; | ||
1177 | pin = pdev->pin; | ||
1178 | /* print the entry should happen on mptable identically */ | ||
1179 | mp_irq.type = MP_INTSRC; | ||
1180 | mp_irq.irqtype = mp_INT; | ||
1181 | mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | | ||
1182 | (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); | ||
1183 | mp_irq.srcbus = number; | ||
1184 | mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); | ||
1185 | ioapic = mp_find_ioapic(gsi); | ||
1186 | mp_irq.dstapic = mp_ioapics[ioapic].apicid; | ||
1187 | mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi); | ||
1188 | |||
1189 | save_mp_irq(&mp_irq); | ||
1190 | #endif | ||
1191 | return 0; | ||
1192 | } | ||
1193 | |||
1194 | int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) | ||
1195 | { | ||
1196 | int ioapic; | ||
1197 | int ioapic_pin; | ||
1198 | struct io_apic_irq_attr irq_attr; | ||
1177 | 1199 | ||
1178 | if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) | 1200 | if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) |
1179 | return gsi; | 1201 | return gsi; |
1180 | #endif | ||
1181 | 1202 | ||
1182 | /* Don't set up the ACPI SCI because it's already set up */ | 1203 | /* Don't set up the ACPI SCI because it's already set up */ |
1183 | if (acpi_gbl_FADT.sci_interrupt == gsi) | 1204 | if (acpi_gbl_FADT.sci_interrupt == gsi) |
@@ -1196,93 +1217,22 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) | |||
1196 | gsi = ioapic_renumber_irq(ioapic, gsi); | 1217 | gsi = ioapic_renumber_irq(ioapic, gsi); |
1197 | #endif | 1218 | #endif |
1198 | 1219 | ||
1199 | /* | ||
1200 | * Avoid pin reprogramming. PRTs typically include entries | ||
1201 | * with redundant pin->gsi mappings (but unique PCI devices); | ||
1202 | * we only program the IOAPIC on the first. | ||
1203 | */ | ||
1204 | if (ioapic_pin > MP_MAX_IOAPIC_PIN) { | 1220 | if (ioapic_pin > MP_MAX_IOAPIC_PIN) { |
1205 | printk(KERN_ERR "Invalid reference to IOAPIC pin " | 1221 | printk(KERN_ERR "Invalid reference to IOAPIC pin " |
1206 | "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, | 1222 | "%d-%d\n", mp_ioapics[ioapic].apicid, |
1207 | ioapic_pin); | 1223 | ioapic_pin); |
1208 | return gsi; | 1224 | return gsi; |
1209 | } | 1225 | } |
1210 | if (test_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed)) { | ||
1211 | pr_debug("Pin %d-%d already programmed\n", | ||
1212 | mp_ioapic_routing[ioapic].apic_id, ioapic_pin); | ||
1213 | #ifdef CONFIG_X86_32 | ||
1214 | return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]); | ||
1215 | #else | ||
1216 | return gsi; | ||
1217 | #endif | ||
1218 | } | ||
1219 | |||
1220 | set_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed); | ||
1221 | #ifdef CONFIG_X86_32 | ||
1222 | /* | ||
1223 | * For GSI >= 64, use IRQ compression | ||
1224 | */ | ||
1225 | if ((gsi >= IRQ_COMPRESSION_START) | ||
1226 | && (triggering == ACPI_LEVEL_SENSITIVE)) { | ||
1227 | /* | ||
1228 | * For PCI devices assign IRQs in order, avoiding gaps | ||
1229 | * due to unused I/O APIC pins. | ||
1230 | */ | ||
1231 | int irq = gsi; | ||
1232 | if (gsi < MAX_GSI_NUM) { | ||
1233 | /* | ||
1234 | * Retain the VIA chipset work-around (gsi > 15), but | ||
1235 | * avoid a problem where the 8254 timer (IRQ0) is setup | ||
1236 | * via an override (so it's not on pin 0 of the ioapic), | ||
1237 | * and at the same time, the pin 0 interrupt is a PCI | ||
1238 | * type. The gsi > 15 test could cause these two pins | ||
1239 | * to be shared as IRQ0, and they are not shareable. | ||
1240 | * So test for this condition, and if necessary, avoid | ||
1241 | * the pin collision. | ||
1242 | */ | ||
1243 | gsi = pci_irq++; | ||
1244 | /* | ||
1245 | * Don't assign IRQ used by ACPI SCI | ||
1246 | */ | ||
1247 | if (gsi == acpi_gbl_FADT.sci_interrupt) | ||
1248 | gsi = pci_irq++; | ||
1249 | gsi_to_irq[irq] = gsi; | ||
1250 | } else { | ||
1251 | printk(KERN_ERR "GSI %u is too high\n", gsi); | ||
1252 | return gsi; | ||
1253 | } | ||
1254 | } | ||
1255 | #endif | ||
1256 | io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, | ||
1257 | triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, | ||
1258 | polarity == ACPI_ACTIVE_HIGH ? 0 : 1); | ||
1259 | return gsi; | ||
1260 | } | ||
1261 | 1226 | ||
1262 | int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, | 1227 | if (enable_update_mptable) |
1263 | u32 gsi, int triggering, int polarity) | 1228 | mp_config_acpi_gsi(dev, gsi, trigger, polarity); |
1264 | { | ||
1265 | #ifdef CONFIG_X86_MPPARSE | ||
1266 | struct mpc_intsrc mp_irq; | ||
1267 | int ioapic; | ||
1268 | 1229 | ||
1269 | if (!acpi_ioapic) | 1230 | set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin, |
1270 | return 0; | 1231 | trigger == ACPI_EDGE_SENSITIVE ? 0 : 1, |
1232 | polarity == ACPI_ACTIVE_HIGH ? 0 : 1); | ||
1233 | io_apic_set_pci_routing(dev, gsi, &irq_attr); | ||
1271 | 1234 | ||
1272 | /* print the entry should happen on mptable identically */ | 1235 | return gsi; |
1273 | mp_irq.type = MP_INTSRC; | ||
1274 | mp_irq.irqtype = mp_INT; | ||
1275 | mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | | ||
1276 | (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); | ||
1277 | mp_irq.srcbus = number; | ||
1278 | mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); | ||
1279 | ioapic = mp_find_ioapic(gsi); | ||
1280 | mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id; | ||
1281 | mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi); | ||
1282 | |||
1283 | save_mp_irq(&mp_irq); | ||
1284 | #endif | ||
1285 | return 0; | ||
1286 | } | 1236 | } |
1287 | 1237 | ||
1288 | /* | 1238 | /* |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index f2870920f246..a4c9cf0bf70b 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -98,6 +98,29 @@ early_param("lapic", parse_lapic); | |||
98 | /* Local APIC was disabled by the BIOS and enabled by the kernel */ | 98 | /* Local APIC was disabled by the BIOS and enabled by the kernel */ |
99 | static int enabled_via_apicbase; | 99 | static int enabled_via_apicbase; |
100 | 100 | ||
101 | /* | ||
102 | * Handle interrupt mode configuration register (IMCR). | ||
103 | * This register controls whether the interrupt signals | ||
104 | * that reach the BSP come from the master PIC or from the | ||
105 | * local APIC. Before entering Symmetric I/O Mode, either | ||
106 | * the BIOS or the operating system must switch out of | ||
107 | * PIC Mode by changing the IMCR. | ||
108 | */ | ||
109 | static inline void imcr_pic_to_apic(void) | ||
110 | { | ||
111 | /* select IMCR register */ | ||
112 | outb(0x70, 0x22); | ||
113 | /* NMI and 8259 INTR go through APIC */ | ||
114 | outb(0x01, 0x23); | ||
115 | } | ||
116 | |||
117 | static inline void imcr_apic_to_pic(void) | ||
118 | { | ||
119 | /* select IMCR register */ | ||
120 | outb(0x70, 0x22); | ||
121 | /* NMI and 8259 INTR go directly to BSP */ | ||
122 | outb(0x00, 0x23); | ||
123 | } | ||
101 | #endif | 124 | #endif |
102 | 125 | ||
103 | #ifdef CONFIG_X86_64 | 126 | #ifdef CONFIG_X86_64 |
@@ -111,13 +134,19 @@ static __init int setup_apicpmtimer(char *s) | |||
111 | __setup("apicpmtimer", setup_apicpmtimer); | 134 | __setup("apicpmtimer", setup_apicpmtimer); |
112 | #endif | 135 | #endif |
113 | 136 | ||
137 | int x2apic_mode; | ||
114 | #ifdef CONFIG_X86_X2APIC | 138 | #ifdef CONFIG_X86_X2APIC |
115 | int x2apic; | ||
116 | /* x2apic enabled before OS handover */ | 139 | /* x2apic enabled before OS handover */ |
117 | static int x2apic_preenabled; | 140 | static int x2apic_preenabled; |
118 | static int disable_x2apic; | 141 | static int disable_x2apic; |
119 | static __init int setup_nox2apic(char *str) | 142 | static __init int setup_nox2apic(char *str) |
120 | { | 143 | { |
144 | if (x2apic_enabled()) { | ||
145 | pr_warning("Bios already enabled x2apic, " | ||
146 | "can't enforce nox2apic"); | ||
147 | return 0; | ||
148 | } | ||
149 | |||
121 | disable_x2apic = 1; | 150 | disable_x2apic = 1; |
122 | setup_clear_cpu_cap(X86_FEATURE_X2APIC); | 151 | setup_clear_cpu_cap(X86_FEATURE_X2APIC); |
123 | return 0; | 152 | return 0; |
@@ -209,6 +238,31 @@ static int modern_apic(void) | |||
209 | return lapic_get_version() >= 0x14; | 238 | return lapic_get_version() >= 0x14; |
210 | } | 239 | } |
211 | 240 | ||
241 | /* | ||
242 | * bare function to substitute write operation | ||
243 | * and it's _that_ fast :) | ||
244 | */ | ||
245 | static void native_apic_write_dummy(u32 reg, u32 v) | ||
246 | { | ||
247 | WARN_ON_ONCE((cpu_has_apic || !disable_apic)); | ||
248 | } | ||
249 | |||
250 | static u32 native_apic_read_dummy(u32 reg) | ||
251 | { | ||
252 | WARN_ON_ONCE((cpu_has_apic && !disable_apic)); | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * right after this call apic->write/read doesn't do anything | ||
258 | * note that there is no restore operation it works one way | ||
259 | */ | ||
260 | void apic_disable(void) | ||
261 | { | ||
262 | apic->read = native_apic_read_dummy; | ||
263 | apic->write = native_apic_write_dummy; | ||
264 | } | ||
265 | |||
212 | void native_apic_wait_icr_idle(void) | 266 | void native_apic_wait_icr_idle(void) |
213 | { | 267 | { |
214 | while (apic_read(APIC_ICR) & APIC_ICR_BUSY) | 268 | while (apic_read(APIC_ICR) & APIC_ICR_BUSY) |
@@ -348,7 +402,7 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) | |||
348 | 402 | ||
349 | static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) | 403 | static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) |
350 | { | 404 | { |
351 | unsigned long reg = (lvt_off << 4) + APIC_EILVT0; | 405 | unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0); |
352 | unsigned int v = (mask << 16) | (msg_type << 8) | vector; | 406 | unsigned int v = (mask << 16) | (msg_type << 8) | vector; |
353 | 407 | ||
354 | apic_write(reg, v); | 408 | apic_write(reg, v); |
@@ -815,7 +869,7 @@ void clear_local_APIC(void) | |||
815 | u32 v; | 869 | u32 v; |
816 | 870 | ||
817 | /* APIC hasn't been mapped yet */ | 871 | /* APIC hasn't been mapped yet */ |
818 | if (!x2apic && !apic_phys) | 872 | if (!x2apic_mode && !apic_phys) |
819 | return; | 873 | return; |
820 | 874 | ||
821 | maxlvt = lapic_get_maxlvt(); | 875 | maxlvt = lapic_get_maxlvt(); |
@@ -1287,7 +1341,7 @@ void check_x2apic(void) | |||
1287 | { | 1341 | { |
1288 | if (x2apic_enabled()) { | 1342 | if (x2apic_enabled()) { |
1289 | pr_info("x2apic enabled by BIOS, switching to x2apic ops\n"); | 1343 | pr_info("x2apic enabled by BIOS, switching to x2apic ops\n"); |
1290 | x2apic_preenabled = x2apic = 1; | 1344 | x2apic_preenabled = x2apic_mode = 1; |
1291 | } | 1345 | } |
1292 | } | 1346 | } |
1293 | 1347 | ||
@@ -1295,7 +1349,7 @@ void enable_x2apic(void) | |||
1295 | { | 1349 | { |
1296 | int msr, msr2; | 1350 | int msr, msr2; |
1297 | 1351 | ||
1298 | if (!x2apic) | 1352 | if (!x2apic_mode) |
1299 | return; | 1353 | return; |
1300 | 1354 | ||
1301 | rdmsr(MSR_IA32_APICBASE, msr, msr2); | 1355 | rdmsr(MSR_IA32_APICBASE, msr, msr2); |
@@ -1304,6 +1358,7 @@ void enable_x2apic(void) | |||
1304 | wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); | 1358 | wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); |
1305 | } | 1359 | } |
1306 | } | 1360 | } |
1361 | #endif /* CONFIG_X86_X2APIC */ | ||
1307 | 1362 | ||
1308 | void __init enable_IR_x2apic(void) | 1363 | void __init enable_IR_x2apic(void) |
1309 | { | 1364 | { |
@@ -1312,32 +1367,21 @@ void __init enable_IR_x2apic(void) | |||
1312 | unsigned long flags; | 1367 | unsigned long flags; |
1313 | struct IO_APIC_route_entry **ioapic_entries = NULL; | 1368 | struct IO_APIC_route_entry **ioapic_entries = NULL; |
1314 | 1369 | ||
1315 | if (!cpu_has_x2apic) | 1370 | ret = dmar_table_init(); |
1316 | return; | 1371 | if (ret) { |
1317 | 1372 | pr_debug("dmar_table_init() failed with %d:\n", ret); | |
1318 | if (!x2apic_preenabled && disable_x2apic) { | 1373 | goto ir_failed; |
1319 | pr_info("Skipped enabling x2apic and Interrupt-remapping " | ||
1320 | "because of nox2apic\n"); | ||
1321 | return; | ||
1322 | } | 1374 | } |
1323 | 1375 | ||
1324 | if (x2apic_preenabled && disable_x2apic) | 1376 | if (!intr_remapping_supported()) { |
1325 | panic("Bios already enabled x2apic, can't enforce nox2apic"); | 1377 | pr_debug("intr-remapping not supported\n"); |
1326 | 1378 | goto ir_failed; | |
1327 | if (!x2apic_preenabled && skip_ioapic_setup) { | ||
1328 | pr_info("Skipped enabling x2apic and Interrupt-remapping " | ||
1329 | "because of skipping io-apic setup\n"); | ||
1330 | return; | ||
1331 | } | 1379 | } |
1332 | 1380 | ||
1333 | ret = dmar_table_init(); | ||
1334 | if (ret) { | ||
1335 | pr_info("dmar_table_init() failed with %d:\n", ret); | ||
1336 | 1381 | ||
1337 | if (x2apic_preenabled) | 1382 | if (!x2apic_preenabled && skip_ioapic_setup) { |
1338 | panic("x2apic enabled by bios. But IR enabling failed"); | 1383 | pr_info("Skipped enabling intr-remap because of skipping " |
1339 | else | 1384 | "io-apic setup\n"); |
1340 | pr_info("Not enabling x2apic,Intr-remapping\n"); | ||
1341 | return; | 1385 | return; |
1342 | } | 1386 | } |
1343 | 1387 | ||
@@ -1357,19 +1401,16 @@ void __init enable_IR_x2apic(void) | |||
1357 | mask_IO_APIC_setup(ioapic_entries); | 1401 | mask_IO_APIC_setup(ioapic_entries); |
1358 | mask_8259A(); | 1402 | mask_8259A(); |
1359 | 1403 | ||
1360 | ret = enable_intr_remapping(EIM_32BIT_APIC_ID); | 1404 | ret = enable_intr_remapping(x2apic_supported()); |
1361 | |||
1362 | if (ret && x2apic_preenabled) { | ||
1363 | local_irq_restore(flags); | ||
1364 | panic("x2apic enabled by bios. But IR enabling failed"); | ||
1365 | } | ||
1366 | |||
1367 | if (ret) | 1405 | if (ret) |
1368 | goto end_restore; | 1406 | goto end_restore; |
1369 | 1407 | ||
1370 | if (!x2apic) { | 1408 | pr_info("Enabled Interrupt-remapping\n"); |
1371 | x2apic = 1; | 1409 | |
1410 | if (x2apic_supported() && !x2apic_mode) { | ||
1411 | x2apic_mode = 1; | ||
1372 | enable_x2apic(); | 1412 | enable_x2apic(); |
1413 | pr_info("Enabled x2apic\n"); | ||
1373 | } | 1414 | } |
1374 | 1415 | ||
1375 | end_restore: | 1416 | end_restore: |
@@ -1378,37 +1419,34 @@ end_restore: | |||
1378 | * IR enabling failed | 1419 | * IR enabling failed |
1379 | */ | 1420 | */ |
1380 | restore_IO_APIC_setup(ioapic_entries); | 1421 | restore_IO_APIC_setup(ioapic_entries); |
1381 | else | ||
1382 | reinit_intr_remapped_IO_APIC(x2apic_preenabled, ioapic_entries); | ||
1383 | 1422 | ||
1384 | unmask_8259A(); | 1423 | unmask_8259A(); |
1385 | local_irq_restore(flags); | 1424 | local_irq_restore(flags); |
1386 | 1425 | ||
1387 | end: | 1426 | end: |
1388 | if (!ret) { | ||
1389 | if (!x2apic_preenabled) | ||
1390 | pr_info("Enabled x2apic and interrupt-remapping\n"); | ||
1391 | else | ||
1392 | pr_info("Enabled Interrupt-remapping\n"); | ||
1393 | } else | ||
1394 | pr_err("Failed to enable Interrupt-remapping and x2apic\n"); | ||
1395 | if (ioapic_entries) | 1427 | if (ioapic_entries) |
1396 | free_ioapic_entries(ioapic_entries); | 1428 | free_ioapic_entries(ioapic_entries); |
1429 | |||
1430 | if (!ret) | ||
1431 | return; | ||
1432 | |||
1433 | ir_failed: | ||
1434 | if (x2apic_preenabled) | ||
1435 | panic("x2apic enabled by bios. But IR enabling failed"); | ||
1436 | else if (cpu_has_x2apic) | ||
1437 | pr_info("Not enabling x2apic,Intr-remapping\n"); | ||
1397 | #else | 1438 | #else |
1398 | if (!cpu_has_x2apic) | 1439 | if (!cpu_has_x2apic) |
1399 | return; | 1440 | return; |
1400 | 1441 | ||
1401 | if (x2apic_preenabled) | 1442 | if (x2apic_preenabled) |
1402 | panic("x2apic enabled prior OS handover," | 1443 | panic("x2apic enabled prior OS handover," |
1403 | " enable CONFIG_INTR_REMAP"); | 1444 | " enable CONFIG_X86_X2APIC, CONFIG_INTR_REMAP"); |
1404 | |||
1405 | pr_info("Enable CONFIG_INTR_REMAP for enabling intr-remapping " | ||
1406 | " and x2apic\n"); | ||
1407 | #endif | 1445 | #endif |
1408 | 1446 | ||
1409 | return; | 1447 | return; |
1410 | } | 1448 | } |
1411 | #endif /* CONFIG_X86_X2APIC */ | 1449 | |
1412 | 1450 | ||
1413 | #ifdef CONFIG_X86_64 | 1451 | #ifdef CONFIG_X86_64 |
1414 | /* | 1452 | /* |
@@ -1425,7 +1463,6 @@ static int __init detect_init_APIC(void) | |||
1425 | } | 1463 | } |
1426 | 1464 | ||
1427 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; | 1465 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; |
1428 | boot_cpu_physical_apicid = 0; | ||
1429 | return 0; | 1466 | return 0; |
1430 | } | 1467 | } |
1431 | #else | 1468 | #else |
@@ -1539,32 +1576,49 @@ void __init early_init_lapic_mapping(void) | |||
1539 | */ | 1576 | */ |
1540 | void __init init_apic_mappings(void) | 1577 | void __init init_apic_mappings(void) |
1541 | { | 1578 | { |
1542 | if (x2apic) { | 1579 | unsigned int new_apicid; |
1580 | |||
1581 | if (x2apic_mode) { | ||
1543 | boot_cpu_physical_apicid = read_apic_id(); | 1582 | boot_cpu_physical_apicid = read_apic_id(); |
1544 | return; | 1583 | return; |
1545 | } | 1584 | } |
1546 | 1585 | ||
1547 | /* | 1586 | /* If no local APIC can be found return early */ |
1548 | * If no local APIC can be found then set up a fake all | ||
1549 | * zeroes page to simulate the local APIC and another | ||
1550 | * one for the IO-APIC. | ||
1551 | */ | ||
1552 | if (!smp_found_config && detect_init_APIC()) { | 1587 | if (!smp_found_config && detect_init_APIC()) { |
1553 | apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); | 1588 | /* lets NOP'ify apic operations */ |
1554 | apic_phys = __pa(apic_phys); | 1589 | pr_info("APIC: disable apic facility\n"); |
1555 | } else | 1590 | apic_disable(); |
1591 | } else { | ||
1556 | apic_phys = mp_lapic_addr; | 1592 | apic_phys = mp_lapic_addr; |
1557 | 1593 | ||
1558 | set_fixmap_nocache(FIX_APIC_BASE, apic_phys); | 1594 | /* |
1559 | apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", | 1595 | * acpi lapic path already maps that address in |
1560 | APIC_BASE, apic_phys); | 1596 | * acpi_register_lapic_address() |
1597 | */ | ||
1598 | if (!acpi_lapic) | ||
1599 | set_fixmap_nocache(FIX_APIC_BASE, apic_phys); | ||
1600 | |||
1601 | apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", | ||
1602 | APIC_BASE, apic_phys); | ||
1603 | } | ||
1561 | 1604 | ||
1562 | /* | 1605 | /* |
1563 | * Fetch the APIC ID of the BSP in case we have a | 1606 | * Fetch the APIC ID of the BSP in case we have a |
1564 | * default configuration (or the MP table is broken). | 1607 | * default configuration (or the MP table is broken). |
1565 | */ | 1608 | */ |
1566 | if (boot_cpu_physical_apicid == -1U) | 1609 | new_apicid = read_apic_id(); |
1567 | boot_cpu_physical_apicid = read_apic_id(); | 1610 | if (boot_cpu_physical_apicid != new_apicid) { |
1611 | boot_cpu_physical_apicid = new_apicid; | ||
1612 | /* | ||
1613 | * yeah -- we lie about apic_version | ||
1614 | * in case if apic was disabled via boot option | ||
1615 | * but it's not a problem for SMP compiled kernel | ||
1616 | * since smp_sanity_check is prepared for such a case | ||
1617 | * and disable smp mode | ||
1618 | */ | ||
1619 | apic_version[new_apicid] = | ||
1620 | GET_APIC_VERSION(apic_read(APIC_LVR)); | ||
1621 | } | ||
1568 | } | 1622 | } |
1569 | 1623 | ||
1570 | /* | 1624 | /* |
@@ -1733,8 +1787,7 @@ void __init connect_bsp_APIC(void) | |||
1733 | */ | 1787 | */ |
1734 | apic_printk(APIC_VERBOSE, "leaving PIC mode, " | 1788 | apic_printk(APIC_VERBOSE, "leaving PIC mode, " |
1735 | "enabling APIC mode.\n"); | 1789 | "enabling APIC mode.\n"); |
1736 | outb(0x70, 0x22); | 1790 | imcr_pic_to_apic(); |
1737 | outb(0x01, 0x23); | ||
1738 | } | 1791 | } |
1739 | #endif | 1792 | #endif |
1740 | if (apic->enable_apic_mode) | 1793 | if (apic->enable_apic_mode) |
@@ -1762,8 +1815,7 @@ void disconnect_bsp_APIC(int virt_wire_setup) | |||
1762 | */ | 1815 | */ |
1763 | apic_printk(APIC_VERBOSE, "disabling APIC mode, " | 1816 | apic_printk(APIC_VERBOSE, "disabling APIC mode, " |
1764 | "entering PIC mode.\n"); | 1817 | "entering PIC mode.\n"); |
1765 | outb(0x70, 0x22); | 1818 | imcr_apic_to_pic(); |
1766 | outb(0x00, 0x23); | ||
1767 | return; | 1819 | return; |
1768 | } | 1820 | } |
1769 | #endif | 1821 | #endif |
@@ -1969,10 +2021,10 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state) | |||
1969 | 2021 | ||
1970 | local_irq_save(flags); | 2022 | local_irq_save(flags); |
1971 | disable_local_APIC(); | 2023 | disable_local_APIC(); |
1972 | #ifdef CONFIG_INTR_REMAP | 2024 | |
1973 | if (intr_remapping_enabled) | 2025 | if (intr_remapping_enabled) |
1974 | disable_intr_remapping(); | 2026 | disable_intr_remapping(); |
1975 | #endif | 2027 | |
1976 | local_irq_restore(flags); | 2028 | local_irq_restore(flags); |
1977 | return 0; | 2029 | return 0; |
1978 | } | 2030 | } |
@@ -1982,42 +2034,34 @@ static int lapic_resume(struct sys_device *dev) | |||
1982 | unsigned int l, h; | 2034 | unsigned int l, h; |
1983 | unsigned long flags; | 2035 | unsigned long flags; |
1984 | int maxlvt; | 2036 | int maxlvt; |
1985 | 2037 | int ret = 0; | |
1986 | #ifdef CONFIG_INTR_REMAP | ||
1987 | int ret; | ||
1988 | struct IO_APIC_route_entry **ioapic_entries = NULL; | 2038 | struct IO_APIC_route_entry **ioapic_entries = NULL; |
1989 | 2039 | ||
1990 | if (!apic_pm_state.active) | 2040 | if (!apic_pm_state.active) |
1991 | return 0; | 2041 | return 0; |
1992 | 2042 | ||
1993 | local_irq_save(flags); | 2043 | local_irq_save(flags); |
1994 | if (x2apic) { | 2044 | if (intr_remapping_enabled) { |
1995 | ioapic_entries = alloc_ioapic_entries(); | 2045 | ioapic_entries = alloc_ioapic_entries(); |
1996 | if (!ioapic_entries) { | 2046 | if (!ioapic_entries) { |
1997 | WARN(1, "Alloc ioapic_entries in lapic resume failed."); | 2047 | WARN(1, "Alloc ioapic_entries in lapic resume failed."); |
1998 | return -ENOMEM; | 2048 | ret = -ENOMEM; |
2049 | goto restore; | ||
1999 | } | 2050 | } |
2000 | 2051 | ||
2001 | ret = save_IO_APIC_setup(ioapic_entries); | 2052 | ret = save_IO_APIC_setup(ioapic_entries); |
2002 | if (ret) { | 2053 | if (ret) { |
2003 | WARN(1, "Saving IO-APIC state failed: %d\n", ret); | 2054 | WARN(1, "Saving IO-APIC state failed: %d\n", ret); |
2004 | free_ioapic_entries(ioapic_entries); | 2055 | free_ioapic_entries(ioapic_entries); |
2005 | return ret; | 2056 | goto restore; |
2006 | } | 2057 | } |
2007 | 2058 | ||
2008 | mask_IO_APIC_setup(ioapic_entries); | 2059 | mask_IO_APIC_setup(ioapic_entries); |
2009 | mask_8259A(); | 2060 | mask_8259A(); |
2010 | enable_x2apic(); | ||
2011 | } | 2061 | } |
2012 | #else | ||
2013 | if (!apic_pm_state.active) | ||
2014 | return 0; | ||
2015 | 2062 | ||
2016 | local_irq_save(flags); | 2063 | if (x2apic_mode) |
2017 | if (x2apic) | ||
2018 | enable_x2apic(); | 2064 | enable_x2apic(); |
2019 | #endif | ||
2020 | |||
2021 | else { | 2065 | else { |
2022 | /* | 2066 | /* |
2023 | * Make sure the APICBASE points to the right address | 2067 | * Make sure the APICBASE points to the right address |
@@ -2055,21 +2099,16 @@ static int lapic_resume(struct sys_device *dev) | |||
2055 | apic_write(APIC_ESR, 0); | 2099 | apic_write(APIC_ESR, 0); |
2056 | apic_read(APIC_ESR); | 2100 | apic_read(APIC_ESR); |
2057 | 2101 | ||
2058 | #ifdef CONFIG_INTR_REMAP | 2102 | if (intr_remapping_enabled) { |
2059 | if (intr_remapping_enabled) | 2103 | reenable_intr_remapping(x2apic_mode); |
2060 | reenable_intr_remapping(EIM_32BIT_APIC_ID); | ||
2061 | |||
2062 | if (x2apic) { | ||
2063 | unmask_8259A(); | 2104 | unmask_8259A(); |
2064 | restore_IO_APIC_setup(ioapic_entries); | 2105 | restore_IO_APIC_setup(ioapic_entries); |
2065 | free_ioapic_entries(ioapic_entries); | 2106 | free_ioapic_entries(ioapic_entries); |
2066 | } | 2107 | } |
2067 | #endif | 2108 | restore: |
2068 | |||
2069 | local_irq_restore(flags); | 2109 | local_irq_restore(flags); |
2070 | 2110 | ||
2071 | 2111 | return ret; | |
2072 | return 0; | ||
2073 | } | 2112 | } |
2074 | 2113 | ||
2075 | /* | 2114 | /* |
@@ -2117,31 +2156,14 @@ static void apic_pm_activate(void) { } | |||
2117 | #endif /* CONFIG_PM */ | 2156 | #endif /* CONFIG_PM */ |
2118 | 2157 | ||
2119 | #ifdef CONFIG_X86_64 | 2158 | #ifdef CONFIG_X86_64 |
2120 | /* | 2159 | |
2121 | * apic_is_clustered_box() -- Check if we can expect good TSC | 2160 | static int __cpuinit apic_cluster_num(void) |
2122 | * | ||
2123 | * Thus far, the major user of this is IBM's Summit2 series: | ||
2124 | * | ||
2125 | * Clustered boxes may have unsynced TSC problems if they are | ||
2126 | * multi-chassis. Use available data to take a good guess. | ||
2127 | * If in doubt, go HPET. | ||
2128 | */ | ||
2129 | __cpuinit int apic_is_clustered_box(void) | ||
2130 | { | 2161 | { |
2131 | int i, clusters, zeros; | 2162 | int i, clusters, zeros; |
2132 | unsigned id; | 2163 | unsigned id; |
2133 | u16 *bios_cpu_apicid; | 2164 | u16 *bios_cpu_apicid; |
2134 | DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); | 2165 | DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); |
2135 | 2166 | ||
2136 | /* | ||
2137 | * there is not this kind of box with AMD CPU yet. | ||
2138 | * Some AMD box with quadcore cpu and 8 sockets apicid | ||
2139 | * will be [4, 0x23] or [8, 0x27] could be thought to | ||
2140 | * vsmp box still need checking... | ||
2141 | */ | ||
2142 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box()) | ||
2143 | return 0; | ||
2144 | |||
2145 | bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); | 2167 | bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); |
2146 | bitmap_zero(clustermap, NUM_APIC_CLUSTERS); | 2168 | bitmap_zero(clustermap, NUM_APIC_CLUSTERS); |
2147 | 2169 | ||
@@ -2177,18 +2199,67 @@ __cpuinit int apic_is_clustered_box(void) | |||
2177 | ++zeros; | 2199 | ++zeros; |
2178 | } | 2200 | } |
2179 | 2201 | ||
2180 | /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are | 2202 | return clusters; |
2181 | * not guaranteed to be synced between boards | 2203 | } |
2182 | */ | 2204 | |
2183 | if (is_vsmp_box() && clusters > 1) | 2205 | static int __cpuinitdata multi_checked; |
2206 | static int __cpuinitdata multi; | ||
2207 | |||
2208 | static int __cpuinit set_multi(const struct dmi_system_id *d) | ||
2209 | { | ||
2210 | if (multi) | ||
2211 | return 0; | ||
2212 | pr_info("APIC: %s detected, Multi Chassis\n", d->ident); | ||
2213 | multi = 1; | ||
2214 | return 0; | ||
2215 | } | ||
2216 | |||
2217 | static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = { | ||
2218 | { | ||
2219 | .callback = set_multi, | ||
2220 | .ident = "IBM System Summit2", | ||
2221 | .matches = { | ||
2222 | DMI_MATCH(DMI_SYS_VENDOR, "IBM"), | ||
2223 | DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"), | ||
2224 | }, | ||
2225 | }, | ||
2226 | {} | ||
2227 | }; | ||
2228 | |||
2229 | static void __cpuinit dmi_check_multi(void) | ||
2230 | { | ||
2231 | if (multi_checked) | ||
2232 | return; | ||
2233 | |||
2234 | dmi_check_system(multi_dmi_table); | ||
2235 | multi_checked = 1; | ||
2236 | } | ||
2237 | |||
2238 | /* | ||
2239 | * apic_is_clustered_box() -- Check if we can expect good TSC | ||
2240 | * | ||
2241 | * Thus far, the major user of this is IBM's Summit2 series: | ||
2242 | * Clustered boxes may have unsynced TSC problems if they are | ||
2243 | * multi-chassis. | ||
2244 | * Use DMI to check them | ||
2245 | */ | ||
2246 | __cpuinit int apic_is_clustered_box(void) | ||
2247 | { | ||
2248 | dmi_check_multi(); | ||
2249 | if (multi) | ||
2184 | return 1; | 2250 | return 1; |
2185 | 2251 | ||
2252 | if (!is_vsmp_box()) | ||
2253 | return 0; | ||
2254 | |||
2186 | /* | 2255 | /* |
2187 | * If clusters > 2, then should be multi-chassis. | 2256 | * ScaleMP vSMPowered boxes have one cluster per board and TSCs are |
2188 | * May have to revisit this when multi-core + hyperthreaded CPUs come | 2257 | * not guaranteed to be synced between boards |
2189 | * out, but AFAIK this will work even for them. | ||
2190 | */ | 2258 | */ |
2191 | return (clusters > 2); | 2259 | if (apic_cluster_num() > 1) |
2260 | return 1; | ||
2261 | |||
2262 | return 0; | ||
2192 | } | 2263 | } |
2193 | #endif | 2264 | #endif |
2194 | 2265 | ||
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 306e5e88fb6f..d0c99abc26c3 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c | |||
@@ -161,7 +161,7 @@ static int flat_apic_id_registered(void) | |||
161 | 161 | ||
162 | static int flat_phys_pkg_id(int initial_apic_id, int index_msb) | 162 | static int flat_phys_pkg_id(int initial_apic_id, int index_msb) |
163 | { | 163 | { |
164 | return hard_smp_processor_id() >> index_msb; | 164 | return initial_apic_id >> index_msb; |
165 | } | 165 | } |
166 | 166 | ||
167 | struct apic apic_flat = { | 167 | struct apic apic_flat = { |
@@ -235,7 +235,7 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
235 | * regardless of how many processors are present (x86_64 ES7000 | 235 | * regardless of how many processors are present (x86_64 ES7000 |
236 | * is an example). | 236 | * is an example). |
237 | */ | 237 | */ |
238 | if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && | 238 | if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && |
239 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { | 239 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { |
240 | printk(KERN_DEBUG "system APIC only can use physical flat"); | 240 | printk(KERN_DEBUG "system APIC only can use physical flat"); |
241 | return 1; | 241 | return 1; |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 302947775575..69328ac8de9c 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -145,7 +145,7 @@ es7000_rename_gsi(int ioapic, int gsi) | |||
145 | return gsi; | 145 | return gsi; |
146 | } | 146 | } |
147 | 147 | ||
148 | static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) | 148 | static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) |
149 | { | 149 | { |
150 | unsigned long vect = 0, psaival = 0; | 150 | unsigned long vect = 0, psaival = 0; |
151 | 151 | ||
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 30da617d18e4..f712f8ff403c 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -129,12 +129,9 @@ struct irq_pin_list { | |||
129 | struct irq_pin_list *next; | 129 | struct irq_pin_list *next; |
130 | }; | 130 | }; |
131 | 131 | ||
132 | static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) | 132 | static struct irq_pin_list *get_one_free_irq_2_pin(int node) |
133 | { | 133 | { |
134 | struct irq_pin_list *pin; | 134 | struct irq_pin_list *pin; |
135 | int node; | ||
136 | |||
137 | node = cpu_to_node(cpu); | ||
138 | 135 | ||
139 | pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); | 136 | pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); |
140 | 137 | ||
@@ -148,9 +145,6 @@ struct irq_cfg { | |||
148 | unsigned move_cleanup_count; | 145 | unsigned move_cleanup_count; |
149 | u8 vector; | 146 | u8 vector; |
150 | u8 move_in_progress : 1; | 147 | u8 move_in_progress : 1; |
151 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | ||
152 | u8 move_desc_pending : 1; | ||
153 | #endif | ||
154 | }; | 148 | }; |
155 | 149 | ||
156 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ | 150 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ |
@@ -212,12 +206,9 @@ static struct irq_cfg *irq_cfg(unsigned int irq) | |||
212 | return cfg; | 206 | return cfg; |
213 | } | 207 | } |
214 | 208 | ||
215 | static struct irq_cfg *get_one_free_irq_cfg(int cpu) | 209 | static struct irq_cfg *get_one_free_irq_cfg(int node) |
216 | { | 210 | { |
217 | struct irq_cfg *cfg; | 211 | struct irq_cfg *cfg; |
218 | int node; | ||
219 | |||
220 | node = cpu_to_node(cpu); | ||
221 | 212 | ||
222 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); | 213 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); |
223 | if (cfg) { | 214 | if (cfg) { |
@@ -238,13 +229,13 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu) | |||
238 | return cfg; | 229 | return cfg; |
239 | } | 230 | } |
240 | 231 | ||
241 | int arch_init_chip_data(struct irq_desc *desc, int cpu) | 232 | int arch_init_chip_data(struct irq_desc *desc, int node) |
242 | { | 233 | { |
243 | struct irq_cfg *cfg; | 234 | struct irq_cfg *cfg; |
244 | 235 | ||
245 | cfg = desc->chip_data; | 236 | cfg = desc->chip_data; |
246 | if (!cfg) { | 237 | if (!cfg) { |
247 | desc->chip_data = get_one_free_irq_cfg(cpu); | 238 | desc->chip_data = get_one_free_irq_cfg(node); |
248 | if (!desc->chip_data) { | 239 | if (!desc->chip_data) { |
249 | printk(KERN_ERR "can not alloc irq_cfg\n"); | 240 | printk(KERN_ERR "can not alloc irq_cfg\n"); |
250 | BUG_ON(1); | 241 | BUG_ON(1); |
@@ -254,10 +245,9 @@ int arch_init_chip_data(struct irq_desc *desc, int cpu) | |||
254 | return 0; | 245 | return 0; |
255 | } | 246 | } |
256 | 247 | ||
257 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | 248 | /* for move_irq_desc */ |
258 | |||
259 | static void | 249 | static void |
260 | init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu) | 250 | init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node) |
261 | { | 251 | { |
262 | struct irq_pin_list *old_entry, *head, *tail, *entry; | 252 | struct irq_pin_list *old_entry, *head, *tail, *entry; |
263 | 253 | ||
@@ -266,7 +256,7 @@ init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu) | |||
266 | if (!old_entry) | 256 | if (!old_entry) |
267 | return; | 257 | return; |
268 | 258 | ||
269 | entry = get_one_free_irq_2_pin(cpu); | 259 | entry = get_one_free_irq_2_pin(node); |
270 | if (!entry) | 260 | if (!entry) |
271 | return; | 261 | return; |
272 | 262 | ||
@@ -276,7 +266,7 @@ init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu) | |||
276 | tail = entry; | 266 | tail = entry; |
277 | old_entry = old_entry->next; | 267 | old_entry = old_entry->next; |
278 | while (old_entry) { | 268 | while (old_entry) { |
279 | entry = get_one_free_irq_2_pin(cpu); | 269 | entry = get_one_free_irq_2_pin(node); |
280 | if (!entry) { | 270 | if (!entry) { |
281 | entry = head; | 271 | entry = head; |
282 | while (entry) { | 272 | while (entry) { |
@@ -316,12 +306,12 @@ static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg) | |||
316 | } | 306 | } |
317 | 307 | ||
318 | void arch_init_copy_chip_data(struct irq_desc *old_desc, | 308 | void arch_init_copy_chip_data(struct irq_desc *old_desc, |
319 | struct irq_desc *desc, int cpu) | 309 | struct irq_desc *desc, int node) |
320 | { | 310 | { |
321 | struct irq_cfg *cfg; | 311 | struct irq_cfg *cfg; |
322 | struct irq_cfg *old_cfg; | 312 | struct irq_cfg *old_cfg; |
323 | 313 | ||
324 | cfg = get_one_free_irq_cfg(cpu); | 314 | cfg = get_one_free_irq_cfg(node); |
325 | 315 | ||
326 | if (!cfg) | 316 | if (!cfg) |
327 | return; | 317 | return; |
@@ -332,7 +322,7 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc, | |||
332 | 322 | ||
333 | memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); | 323 | memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); |
334 | 324 | ||
335 | init_copy_irq_2_pin(old_cfg, cfg, cpu); | 325 | init_copy_irq_2_pin(old_cfg, cfg, node); |
336 | } | 326 | } |
337 | 327 | ||
338 | static void free_irq_cfg(struct irq_cfg *old_cfg) | 328 | static void free_irq_cfg(struct irq_cfg *old_cfg) |
@@ -356,19 +346,7 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | |||
356 | old_desc->chip_data = NULL; | 346 | old_desc->chip_data = NULL; |
357 | } | 347 | } |
358 | } | 348 | } |
359 | 349 | /* end for move_irq_desc */ | |
360 | static void | ||
361 | set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
362 | { | ||
363 | struct irq_cfg *cfg = desc->chip_data; | ||
364 | |||
365 | if (!cfg->move_in_progress) { | ||
366 | /* it means that domain is not changed */ | ||
367 | if (!cpumask_intersects(desc->affinity, mask)) | ||
368 | cfg->move_desc_pending = 1; | ||
369 | } | ||
370 | } | ||
371 | #endif | ||
372 | 350 | ||
373 | #else | 351 | #else |
374 | static struct irq_cfg *irq_cfg(unsigned int irq) | 352 | static struct irq_cfg *irq_cfg(unsigned int irq) |
@@ -378,13 +356,6 @@ static struct irq_cfg *irq_cfg(unsigned int irq) | |||
378 | 356 | ||
379 | #endif | 357 | #endif |
380 | 358 | ||
381 | #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC | ||
382 | static inline void | ||
383 | set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
384 | { | ||
385 | } | ||
386 | #endif | ||
387 | |||
388 | struct io_apic { | 359 | struct io_apic { |
389 | unsigned int index; | 360 | unsigned int index; |
390 | unsigned int unused[3]; | 361 | unsigned int unused[3]; |
@@ -518,132 +489,18 @@ static void ioapic_mask_entry(int apic, int pin) | |||
518 | spin_unlock_irqrestore(&ioapic_lock, flags); | 489 | spin_unlock_irqrestore(&ioapic_lock, flags); |
519 | } | 490 | } |
520 | 491 | ||
521 | #ifdef CONFIG_SMP | ||
522 | static void send_cleanup_vector(struct irq_cfg *cfg) | ||
523 | { | ||
524 | cpumask_var_t cleanup_mask; | ||
525 | |||
526 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | ||
527 | unsigned int i; | ||
528 | cfg->move_cleanup_count = 0; | ||
529 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
530 | cfg->move_cleanup_count++; | ||
531 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
532 | apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); | ||
533 | } else { | ||
534 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | ||
535 | cfg->move_cleanup_count = cpumask_weight(cleanup_mask); | ||
536 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
537 | free_cpumask_var(cleanup_mask); | ||
538 | } | ||
539 | cfg->move_in_progress = 0; | ||
540 | } | ||
541 | |||
542 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) | ||
543 | { | ||
544 | int apic, pin; | ||
545 | struct irq_pin_list *entry; | ||
546 | u8 vector = cfg->vector; | ||
547 | |||
548 | entry = cfg->irq_2_pin; | ||
549 | for (;;) { | ||
550 | unsigned int reg; | ||
551 | |||
552 | if (!entry) | ||
553 | break; | ||
554 | |||
555 | apic = entry->apic; | ||
556 | pin = entry->pin; | ||
557 | /* | ||
558 | * With interrupt-remapping, destination information comes | ||
559 | * from interrupt-remapping table entry. | ||
560 | */ | ||
561 | if (!irq_remapped(irq)) | ||
562 | io_apic_write(apic, 0x11 + pin*2, dest); | ||
563 | reg = io_apic_read(apic, 0x10 + pin*2); | ||
564 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | ||
565 | reg |= vector; | ||
566 | io_apic_modify(apic, 0x10 + pin*2, reg); | ||
567 | if (!entry->next) | ||
568 | break; | ||
569 | entry = entry->next; | ||
570 | } | ||
571 | } | ||
572 | |||
573 | static int | ||
574 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); | ||
575 | |||
576 | /* | ||
577 | * Either sets desc->affinity to a valid value, and returns | ||
578 | * ->cpu_mask_to_apicid of that, or returns BAD_APICID and | ||
579 | * leaves desc->affinity untouched. | ||
580 | */ | ||
581 | static unsigned int | ||
582 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) | ||
583 | { | ||
584 | struct irq_cfg *cfg; | ||
585 | unsigned int irq; | ||
586 | |||
587 | if (!cpumask_intersects(mask, cpu_online_mask)) | ||
588 | return BAD_APICID; | ||
589 | |||
590 | irq = desc->irq; | ||
591 | cfg = desc->chip_data; | ||
592 | if (assign_irq_vector(irq, cfg, mask)) | ||
593 | return BAD_APICID; | ||
594 | |||
595 | /* check that before desc->addinity get updated */ | ||
596 | set_extra_move_desc(desc, mask); | ||
597 | |||
598 | cpumask_copy(desc->affinity, mask); | ||
599 | |||
600 | return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); | ||
601 | } | ||
602 | |||
603 | static void | ||
604 | set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
605 | { | ||
606 | struct irq_cfg *cfg; | ||
607 | unsigned long flags; | ||
608 | unsigned int dest; | ||
609 | unsigned int irq; | ||
610 | |||
611 | irq = desc->irq; | ||
612 | cfg = desc->chip_data; | ||
613 | |||
614 | spin_lock_irqsave(&ioapic_lock, flags); | ||
615 | dest = set_desc_affinity(desc, mask); | ||
616 | if (dest != BAD_APICID) { | ||
617 | /* Only the high 8 bits are valid. */ | ||
618 | dest = SET_APIC_LOGICAL_ID(dest); | ||
619 | __target_IO_APIC_irq(irq, dest, cfg); | ||
620 | } | ||
621 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
622 | } | ||
623 | |||
624 | static void | ||
625 | set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) | ||
626 | { | ||
627 | struct irq_desc *desc; | ||
628 | |||
629 | desc = irq_to_desc(irq); | ||
630 | |||
631 | set_ioapic_affinity_irq_desc(desc, mask); | ||
632 | } | ||
633 | #endif /* CONFIG_SMP */ | ||
634 | |||
635 | /* | 492 | /* |
636 | * The common case is 1:1 IRQ<->pin mappings. Sometimes there are | 493 | * The common case is 1:1 IRQ<->pin mappings. Sometimes there are |
637 | * shared ISA-space IRQs, so we have to support them. We are super | 494 | * shared ISA-space IRQs, so we have to support them. We are super |
638 | * fast in the common case, and fast for shared ISA-space IRQs. | 495 | * fast in the common case, and fast for shared ISA-space IRQs. |
639 | */ | 496 | */ |
640 | static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin) | 497 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) |
641 | { | 498 | { |
642 | struct irq_pin_list *entry; | 499 | struct irq_pin_list *entry; |
643 | 500 | ||
644 | entry = cfg->irq_2_pin; | 501 | entry = cfg->irq_2_pin; |
645 | if (!entry) { | 502 | if (!entry) { |
646 | entry = get_one_free_irq_2_pin(cpu); | 503 | entry = get_one_free_irq_2_pin(node); |
647 | if (!entry) { | 504 | if (!entry) { |
648 | printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n", | 505 | printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n", |
649 | apic, pin); | 506 | apic, pin); |
@@ -663,7 +520,7 @@ static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin) | |||
663 | entry = entry->next; | 520 | entry = entry->next; |
664 | } | 521 | } |
665 | 522 | ||
666 | entry->next = get_one_free_irq_2_pin(cpu); | 523 | entry->next = get_one_free_irq_2_pin(node); |
667 | entry = entry->next; | 524 | entry = entry->next; |
668 | entry->apic = apic; | 525 | entry->apic = apic; |
669 | entry->pin = pin; | 526 | entry->pin = pin; |
@@ -672,7 +529,7 @@ static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin) | |||
672 | /* | 529 | /* |
673 | * Reroute an IRQ to a different pin. | 530 | * Reroute an IRQ to a different pin. |
674 | */ | 531 | */ |
675 | static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu, | 532 | static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, |
676 | int oldapic, int oldpin, | 533 | int oldapic, int oldpin, |
677 | int newapic, int newpin) | 534 | int newapic, int newpin) |
678 | { | 535 | { |
@@ -692,7 +549,7 @@ static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu, | |||
692 | 549 | ||
693 | /* why? call replace before add? */ | 550 | /* why? call replace before add? */ |
694 | if (!replaced) | 551 | if (!replaced) |
695 | add_pin_to_irq_cpu(cfg, cpu, newapic, newpin); | 552 | add_pin_to_irq_node(cfg, node, newapic, newpin); |
696 | } | 553 | } |
697 | 554 | ||
698 | static inline void io_apic_modify_irq(struct irq_cfg *cfg, | 555 | static inline void io_apic_modify_irq(struct irq_cfg *cfg, |
@@ -850,7 +707,6 @@ static int __init ioapic_pirq_setup(char *str) | |||
850 | __setup("pirq=", ioapic_pirq_setup); | 707 | __setup("pirq=", ioapic_pirq_setup); |
851 | #endif /* CONFIG_X86_32 */ | 708 | #endif /* CONFIG_X86_32 */ |
852 | 709 | ||
853 | #ifdef CONFIG_INTR_REMAP | ||
854 | struct IO_APIC_route_entry **alloc_ioapic_entries(void) | 710 | struct IO_APIC_route_entry **alloc_ioapic_entries(void) |
855 | { | 711 | { |
856 | int apic; | 712 | int apic; |
@@ -948,20 +804,6 @@ int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) | |||
948 | return 0; | 804 | return 0; |
949 | } | 805 | } |
950 | 806 | ||
951 | void reinit_intr_remapped_IO_APIC(int intr_remapping, | ||
952 | struct IO_APIC_route_entry **ioapic_entries) | ||
953 | |||
954 | { | ||
955 | /* | ||
956 | * for now plain restore of previous settings. | ||
957 | * TBD: In the case of OS enabling interrupt-remapping, | ||
958 | * IO-APIC RTE's need to be setup to point to interrupt-remapping | ||
959 | * table entries. for now, do a plain restore, and wait for | ||
960 | * the setup_IO_APIC_irqs() to do proper initialization. | ||
961 | */ | ||
962 | restore_IO_APIC_setup(ioapic_entries); | ||
963 | } | ||
964 | |||
965 | void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) | 807 | void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) |
966 | { | 808 | { |
967 | int apic; | 809 | int apic; |
@@ -971,7 +813,6 @@ void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) | |||
971 | 813 | ||
972 | kfree(ioapic_entries); | 814 | kfree(ioapic_entries); |
973 | } | 815 | } |
974 | #endif | ||
975 | 816 | ||
976 | /* | 817 | /* |
977 | * Find the IRQ entry number of a certain pin. | 818 | * Find the IRQ entry number of a certain pin. |
@@ -1032,54 +873,6 @@ static int __init find_isa_irq_apic(int irq, int type) | |||
1032 | return -1; | 873 | return -1; |
1033 | } | 874 | } |
1034 | 875 | ||
1035 | /* | ||
1036 | * Find a specific PCI IRQ entry. | ||
1037 | * Not an __init, possibly needed by modules | ||
1038 | */ | ||
1039 | static int pin_2_irq(int idx, int apic, int pin); | ||
1040 | |||
1041 | int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) | ||
1042 | { | ||
1043 | int apic, i, best_guess = -1; | ||
1044 | |||
1045 | apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", | ||
1046 | bus, slot, pin); | ||
1047 | if (test_bit(bus, mp_bus_not_pci)) { | ||
1048 | apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus); | ||
1049 | return -1; | ||
1050 | } | ||
1051 | for (i = 0; i < mp_irq_entries; i++) { | ||
1052 | int lbus = mp_irqs[i].srcbus; | ||
1053 | |||
1054 | for (apic = 0; apic < nr_ioapics; apic++) | ||
1055 | if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic || | ||
1056 | mp_irqs[i].dstapic == MP_APIC_ALL) | ||
1057 | break; | ||
1058 | |||
1059 | if (!test_bit(lbus, mp_bus_not_pci) && | ||
1060 | !mp_irqs[i].irqtype && | ||
1061 | (bus == lbus) && | ||
1062 | (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { | ||
1063 | int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); | ||
1064 | |||
1065 | if (!(apic || IO_APIC_IRQ(irq))) | ||
1066 | continue; | ||
1067 | |||
1068 | if (pin == (mp_irqs[i].srcbusirq & 3)) | ||
1069 | return irq; | ||
1070 | /* | ||
1071 | * Use the first all-but-pin matching entry as a | ||
1072 | * best-guess fuzzy result for broken mptables. | ||
1073 | */ | ||
1074 | if (best_guess < 0) | ||
1075 | best_guess = irq; | ||
1076 | } | ||
1077 | } | ||
1078 | return best_guess; | ||
1079 | } | ||
1080 | |||
1081 | EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); | ||
1082 | |||
1083 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | 876 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) |
1084 | /* | 877 | /* |
1085 | * EISA Edge/Level control register, ELCR | 878 | * EISA Edge/Level control register, ELCR |
@@ -1298,6 +1091,64 @@ static int pin_2_irq(int idx, int apic, int pin) | |||
1298 | return irq; | 1091 | return irq; |
1299 | } | 1092 | } |
1300 | 1093 | ||
1094 | /* | ||
1095 | * Find a specific PCI IRQ entry. | ||
1096 | * Not an __init, possibly needed by modules | ||
1097 | */ | ||
1098 | int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, | ||
1099 | struct io_apic_irq_attr *irq_attr) | ||
1100 | { | ||
1101 | int apic, i, best_guess = -1; | ||
1102 | |||
1103 | apic_printk(APIC_DEBUG, | ||
1104 | "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", | ||
1105 | bus, slot, pin); | ||
1106 | if (test_bit(bus, mp_bus_not_pci)) { | ||
1107 | apic_printk(APIC_VERBOSE, | ||
1108 | "PCI BIOS passed nonexistent PCI bus %d!\n", bus); | ||
1109 | return -1; | ||
1110 | } | ||
1111 | for (i = 0; i < mp_irq_entries; i++) { | ||
1112 | int lbus = mp_irqs[i].srcbus; | ||
1113 | |||
1114 | for (apic = 0; apic < nr_ioapics; apic++) | ||
1115 | if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic || | ||
1116 | mp_irqs[i].dstapic == MP_APIC_ALL) | ||
1117 | break; | ||
1118 | |||
1119 | if (!test_bit(lbus, mp_bus_not_pci) && | ||
1120 | !mp_irqs[i].irqtype && | ||
1121 | (bus == lbus) && | ||
1122 | (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { | ||
1123 | int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); | ||
1124 | |||
1125 | if (!(apic || IO_APIC_IRQ(irq))) | ||
1126 | continue; | ||
1127 | |||
1128 | if (pin == (mp_irqs[i].srcbusirq & 3)) { | ||
1129 | set_io_apic_irq_attr(irq_attr, apic, | ||
1130 | mp_irqs[i].dstirq, | ||
1131 | irq_trigger(i), | ||
1132 | irq_polarity(i)); | ||
1133 | return irq; | ||
1134 | } | ||
1135 | /* | ||
1136 | * Use the first all-but-pin matching entry as a | ||
1137 | * best-guess fuzzy result for broken mptables. | ||
1138 | */ | ||
1139 | if (best_guess < 0) { | ||
1140 | set_io_apic_irq_attr(irq_attr, apic, | ||
1141 | mp_irqs[i].dstirq, | ||
1142 | irq_trigger(i), | ||
1143 | irq_polarity(i)); | ||
1144 | best_guess = irq; | ||
1145 | } | ||
1146 | } | ||
1147 | } | ||
1148 | return best_guess; | ||
1149 | } | ||
1150 | EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); | ||
1151 | |||
1301 | void lock_vector_lock(void) | 1152 | void lock_vector_lock(void) |
1302 | { | 1153 | { |
1303 | /* Used to the online set of cpus does not change | 1154 | /* Used to the online set of cpus does not change |
@@ -1628,58 +1479,70 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq | |||
1628 | ioapic_write_entry(apic_id, pin, entry); | 1479 | ioapic_write_entry(apic_id, pin, entry); |
1629 | } | 1480 | } |
1630 | 1481 | ||
1482 | static struct { | ||
1483 | DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); | ||
1484 | } mp_ioapic_routing[MAX_IO_APICS]; | ||
1485 | |||
1631 | static void __init setup_IO_APIC_irqs(void) | 1486 | static void __init setup_IO_APIC_irqs(void) |
1632 | { | 1487 | { |
1633 | int apic_id, pin, idx, irq; | 1488 | int apic_id = 0, pin, idx, irq; |
1634 | int notcon = 0; | 1489 | int notcon = 0; |
1635 | struct irq_desc *desc; | 1490 | struct irq_desc *desc; |
1636 | struct irq_cfg *cfg; | 1491 | struct irq_cfg *cfg; |
1637 | int cpu = boot_cpu_id; | 1492 | int node = cpu_to_node(boot_cpu_id); |
1638 | 1493 | ||
1639 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); | 1494 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); |
1640 | 1495 | ||
1641 | for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { | 1496 | #ifdef CONFIG_ACPI |
1642 | for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { | 1497 | if (!acpi_disabled && acpi_ioapic) { |
1643 | 1498 | apic_id = mp_find_ioapic(0); | |
1644 | idx = find_irq_entry(apic_id, pin, mp_INT); | 1499 | if (apic_id < 0) |
1645 | if (idx == -1) { | 1500 | apic_id = 0; |
1646 | if (!notcon) { | 1501 | } |
1647 | notcon = 1; | 1502 | #endif |
1648 | apic_printk(APIC_VERBOSE, | ||
1649 | KERN_DEBUG " %d-%d", | ||
1650 | mp_ioapics[apic_id].apicid, pin); | ||
1651 | } else | ||
1652 | apic_printk(APIC_VERBOSE, " %d-%d", | ||
1653 | mp_ioapics[apic_id].apicid, pin); | ||
1654 | continue; | ||
1655 | } | ||
1656 | if (notcon) { | ||
1657 | apic_printk(APIC_VERBOSE, | ||
1658 | " (apicid-pin) not connected\n"); | ||
1659 | notcon = 0; | ||
1660 | } | ||
1661 | 1503 | ||
1662 | irq = pin_2_irq(idx, apic_id, pin); | 1504 | for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { |
1505 | idx = find_irq_entry(apic_id, pin, mp_INT); | ||
1506 | if (idx == -1) { | ||
1507 | if (!notcon) { | ||
1508 | notcon = 1; | ||
1509 | apic_printk(APIC_VERBOSE, | ||
1510 | KERN_DEBUG " %d-%d", | ||
1511 | mp_ioapics[apic_id].apicid, pin); | ||
1512 | } else | ||
1513 | apic_printk(APIC_VERBOSE, " %d-%d", | ||
1514 | mp_ioapics[apic_id].apicid, pin); | ||
1515 | continue; | ||
1516 | } | ||
1517 | if (notcon) { | ||
1518 | apic_printk(APIC_VERBOSE, | ||
1519 | " (apicid-pin) not connected\n"); | ||
1520 | notcon = 0; | ||
1521 | } | ||
1663 | 1522 | ||
1664 | /* | 1523 | irq = pin_2_irq(idx, apic_id, pin); |
1665 | * Skip the timer IRQ if there's a quirk handler | ||
1666 | * installed and if it returns 1: | ||
1667 | */ | ||
1668 | if (apic->multi_timer_check && | ||
1669 | apic->multi_timer_check(apic_id, irq)) | ||
1670 | continue; | ||
1671 | 1524 | ||
1672 | desc = irq_to_desc_alloc_cpu(irq, cpu); | 1525 | /* |
1673 | if (!desc) { | 1526 | * Skip the timer IRQ if there's a quirk handler |
1674 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | 1527 | * installed and if it returns 1: |
1675 | continue; | 1528 | */ |
1676 | } | 1529 | if (apic->multi_timer_check && |
1677 | cfg = desc->chip_data; | 1530 | apic->multi_timer_check(apic_id, irq)) |
1678 | add_pin_to_irq_cpu(cfg, cpu, apic_id, pin); | 1531 | continue; |
1679 | 1532 | ||
1680 | setup_IO_APIC_irq(apic_id, pin, irq, desc, | 1533 | desc = irq_to_desc_alloc_node(irq, node); |
1681 | irq_trigger(idx), irq_polarity(idx)); | 1534 | if (!desc) { |
1535 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | ||
1536 | continue; | ||
1682 | } | 1537 | } |
1538 | cfg = desc->chip_data; | ||
1539 | add_pin_to_irq_node(cfg, node, apic_id, pin); | ||
1540 | /* | ||
1541 | * don't mark it in pin_programmed, so later acpi could | ||
1542 | * set it correctly when irq < 16 | ||
1543 | */ | ||
1544 | setup_IO_APIC_irq(apic_id, pin, irq, desc, | ||
1545 | irq_trigger(idx), irq_polarity(idx)); | ||
1683 | } | 1546 | } |
1684 | 1547 | ||
1685 | if (notcon) | 1548 | if (notcon) |
@@ -1869,7 +1732,7 @@ __apicdebuginit(void) print_APIC_bitfield(int base) | |||
1869 | 1732 | ||
1870 | __apicdebuginit(void) print_local_APIC(void *dummy) | 1733 | __apicdebuginit(void) print_local_APIC(void *dummy) |
1871 | { | 1734 | { |
1872 | unsigned int v, ver, maxlvt; | 1735 | unsigned int i, v, ver, maxlvt; |
1873 | u64 icr; | 1736 | u64 icr; |
1874 | 1737 | ||
1875 | if (apic_verbosity == APIC_QUIET) | 1738 | if (apic_verbosity == APIC_QUIET) |
@@ -1957,6 +1820,18 @@ __apicdebuginit(void) print_local_APIC(void *dummy) | |||
1957 | printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); | 1820 | printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); |
1958 | v = apic_read(APIC_TDCR); | 1821 | v = apic_read(APIC_TDCR); |
1959 | printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); | 1822 | printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); |
1823 | |||
1824 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | ||
1825 | v = apic_read(APIC_EFEAT); | ||
1826 | maxlvt = (v >> 16) & 0xff; | ||
1827 | printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); | ||
1828 | v = apic_read(APIC_ECTRL); | ||
1829 | printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); | ||
1830 | for (i = 0; i < maxlvt; i++) { | ||
1831 | v = apic_read(APIC_EILVTn(i)); | ||
1832 | printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); | ||
1833 | } | ||
1834 | } | ||
1960 | printk("\n"); | 1835 | printk("\n"); |
1961 | } | 1836 | } |
1962 | 1837 | ||
@@ -2005,6 +1880,11 @@ __apicdebuginit(void) print_PIC(void) | |||
2005 | __apicdebuginit(int) print_all_ICs(void) | 1880 | __apicdebuginit(int) print_all_ICs(void) |
2006 | { | 1881 | { |
2007 | print_PIC(); | 1882 | print_PIC(); |
1883 | |||
1884 | /* don't print out if apic is not there */ | ||
1885 | if (!cpu_has_apic || disable_apic) | ||
1886 | return 0; | ||
1887 | |||
2008 | print_all_local_APICs(); | 1888 | print_all_local_APICs(); |
2009 | print_IO_APIC(); | 1889 | print_IO_APIC(); |
2010 | 1890 | ||
@@ -2360,6 +2240,118 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2360 | */ | 2240 | */ |
2361 | 2241 | ||
2362 | #ifdef CONFIG_SMP | 2242 | #ifdef CONFIG_SMP |
2243 | static void send_cleanup_vector(struct irq_cfg *cfg) | ||
2244 | { | ||
2245 | cpumask_var_t cleanup_mask; | ||
2246 | |||
2247 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | ||
2248 | unsigned int i; | ||
2249 | cfg->move_cleanup_count = 0; | ||
2250 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
2251 | cfg->move_cleanup_count++; | ||
2252 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
2253 | apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); | ||
2254 | } else { | ||
2255 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | ||
2256 | cfg->move_cleanup_count = cpumask_weight(cleanup_mask); | ||
2257 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
2258 | free_cpumask_var(cleanup_mask); | ||
2259 | } | ||
2260 | cfg->move_in_progress = 0; | ||
2261 | } | ||
2262 | |||
2263 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) | ||
2264 | { | ||
2265 | int apic, pin; | ||
2266 | struct irq_pin_list *entry; | ||
2267 | u8 vector = cfg->vector; | ||
2268 | |||
2269 | entry = cfg->irq_2_pin; | ||
2270 | for (;;) { | ||
2271 | unsigned int reg; | ||
2272 | |||
2273 | if (!entry) | ||
2274 | break; | ||
2275 | |||
2276 | apic = entry->apic; | ||
2277 | pin = entry->pin; | ||
2278 | /* | ||
2279 | * With interrupt-remapping, destination information comes | ||
2280 | * from interrupt-remapping table entry. | ||
2281 | */ | ||
2282 | if (!irq_remapped(irq)) | ||
2283 | io_apic_write(apic, 0x11 + pin*2, dest); | ||
2284 | reg = io_apic_read(apic, 0x10 + pin*2); | ||
2285 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | ||
2286 | reg |= vector; | ||
2287 | io_apic_modify(apic, 0x10 + pin*2, reg); | ||
2288 | if (!entry->next) | ||
2289 | break; | ||
2290 | entry = entry->next; | ||
2291 | } | ||
2292 | } | ||
2293 | |||
2294 | static int | ||
2295 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); | ||
2296 | |||
2297 | /* | ||
2298 | * Either sets desc->affinity to a valid value, and returns | ||
2299 | * ->cpu_mask_to_apicid of that, or returns BAD_APICID and | ||
2300 | * leaves desc->affinity untouched. | ||
2301 | */ | ||
2302 | static unsigned int | ||
2303 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) | ||
2304 | { | ||
2305 | struct irq_cfg *cfg; | ||
2306 | unsigned int irq; | ||
2307 | |||
2308 | if (!cpumask_intersects(mask, cpu_online_mask)) | ||
2309 | return BAD_APICID; | ||
2310 | |||
2311 | irq = desc->irq; | ||
2312 | cfg = desc->chip_data; | ||
2313 | if (assign_irq_vector(irq, cfg, mask)) | ||
2314 | return BAD_APICID; | ||
2315 | |||
2316 | cpumask_copy(desc->affinity, mask); | ||
2317 | |||
2318 | return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); | ||
2319 | } | ||
2320 | |||
2321 | static int | ||
2322 | set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
2323 | { | ||
2324 | struct irq_cfg *cfg; | ||
2325 | unsigned long flags; | ||
2326 | unsigned int dest; | ||
2327 | unsigned int irq; | ||
2328 | int ret = -1; | ||
2329 | |||
2330 | irq = desc->irq; | ||
2331 | cfg = desc->chip_data; | ||
2332 | |||
2333 | spin_lock_irqsave(&ioapic_lock, flags); | ||
2334 | dest = set_desc_affinity(desc, mask); | ||
2335 | if (dest != BAD_APICID) { | ||
2336 | /* Only the high 8 bits are valid. */ | ||
2337 | dest = SET_APIC_LOGICAL_ID(dest); | ||
2338 | __target_IO_APIC_irq(irq, dest, cfg); | ||
2339 | ret = 0; | ||
2340 | } | ||
2341 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2342 | |||
2343 | return ret; | ||
2344 | } | ||
2345 | |||
2346 | static int | ||
2347 | set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) | ||
2348 | { | ||
2349 | struct irq_desc *desc; | ||
2350 | |||
2351 | desc = irq_to_desc(irq); | ||
2352 | |||
2353 | return set_ioapic_affinity_irq_desc(desc, mask); | ||
2354 | } | ||
2363 | 2355 | ||
2364 | #ifdef CONFIG_INTR_REMAP | 2356 | #ifdef CONFIG_INTR_REMAP |
2365 | 2357 | ||
@@ -2374,26 +2366,25 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2374 | * Real vector that is used for interrupting cpu will be coming from | 2366 | * Real vector that is used for interrupting cpu will be coming from |
2375 | * the interrupt-remapping table entry. | 2367 | * the interrupt-remapping table entry. |
2376 | */ | 2368 | */ |
2377 | static void | 2369 | static int |
2378 | migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | 2370 | migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) |
2379 | { | 2371 | { |
2380 | struct irq_cfg *cfg; | 2372 | struct irq_cfg *cfg; |
2381 | struct irte irte; | 2373 | struct irte irte; |
2382 | unsigned int dest; | 2374 | unsigned int dest; |
2383 | unsigned int irq; | 2375 | unsigned int irq; |
2376 | int ret = -1; | ||
2384 | 2377 | ||
2385 | if (!cpumask_intersects(mask, cpu_online_mask)) | 2378 | if (!cpumask_intersects(mask, cpu_online_mask)) |
2386 | return; | 2379 | return ret; |
2387 | 2380 | ||
2388 | irq = desc->irq; | 2381 | irq = desc->irq; |
2389 | if (get_irte(irq, &irte)) | 2382 | if (get_irte(irq, &irte)) |
2390 | return; | 2383 | return ret; |
2391 | 2384 | ||
2392 | cfg = desc->chip_data; | 2385 | cfg = desc->chip_data; |
2393 | if (assign_irq_vector(irq, cfg, mask)) | 2386 | if (assign_irq_vector(irq, cfg, mask)) |
2394 | return; | 2387 | return ret; |
2395 | |||
2396 | set_extra_move_desc(desc, mask); | ||
2397 | 2388 | ||
2398 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); | 2389 | dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); |
2399 | 2390 | ||
@@ -2409,27 +2400,30 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | |||
2409 | send_cleanup_vector(cfg); | 2400 | send_cleanup_vector(cfg); |
2410 | 2401 | ||
2411 | cpumask_copy(desc->affinity, mask); | 2402 | cpumask_copy(desc->affinity, mask); |
2403 | |||
2404 | return 0; | ||
2412 | } | 2405 | } |
2413 | 2406 | ||
2414 | /* | 2407 | /* |
2415 | * Migrates the IRQ destination in the process context. | 2408 | * Migrates the IRQ destination in the process context. |
2416 | */ | 2409 | */ |
2417 | static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, | 2410 | static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, |
2418 | const struct cpumask *mask) | 2411 | const struct cpumask *mask) |
2419 | { | 2412 | { |
2420 | migrate_ioapic_irq_desc(desc, mask); | 2413 | return migrate_ioapic_irq_desc(desc, mask); |
2421 | } | 2414 | } |
2422 | static void set_ir_ioapic_affinity_irq(unsigned int irq, | 2415 | static int set_ir_ioapic_affinity_irq(unsigned int irq, |
2423 | const struct cpumask *mask) | 2416 | const struct cpumask *mask) |
2424 | { | 2417 | { |
2425 | struct irq_desc *desc = irq_to_desc(irq); | 2418 | struct irq_desc *desc = irq_to_desc(irq); |
2426 | 2419 | ||
2427 | set_ir_ioapic_affinity_irq_desc(desc, mask); | 2420 | return set_ir_ioapic_affinity_irq_desc(desc, mask); |
2428 | } | 2421 | } |
2429 | #else | 2422 | #else |
2430 | static inline void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, | 2423 | static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, |
2431 | const struct cpumask *mask) | 2424 | const struct cpumask *mask) |
2432 | { | 2425 | { |
2426 | return 0; | ||
2433 | } | 2427 | } |
2434 | #endif | 2428 | #endif |
2435 | 2429 | ||
@@ -2491,86 +2485,19 @@ static void irq_complete_move(struct irq_desc **descp) | |||
2491 | struct irq_cfg *cfg = desc->chip_data; | 2485 | struct irq_cfg *cfg = desc->chip_data; |
2492 | unsigned vector, me; | 2486 | unsigned vector, me; |
2493 | 2487 | ||
2494 | if (likely(!cfg->move_in_progress)) { | 2488 | if (likely(!cfg->move_in_progress)) |
2495 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | ||
2496 | if (likely(!cfg->move_desc_pending)) | ||
2497 | return; | ||
2498 | |||
2499 | /* domain has not changed, but affinity did */ | ||
2500 | me = smp_processor_id(); | ||
2501 | if (cpumask_test_cpu(me, desc->affinity)) { | ||
2502 | *descp = desc = move_irq_desc(desc, me); | ||
2503 | /* get the new one */ | ||
2504 | cfg = desc->chip_data; | ||
2505 | cfg->move_desc_pending = 0; | ||
2506 | } | ||
2507 | #endif | ||
2508 | return; | 2489 | return; |
2509 | } | ||
2510 | 2490 | ||
2511 | vector = ~get_irq_regs()->orig_ax; | 2491 | vector = ~get_irq_regs()->orig_ax; |
2512 | me = smp_processor_id(); | 2492 | me = smp_processor_id(); |
2513 | 2493 | ||
2514 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) { | 2494 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
2515 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | ||
2516 | *descp = desc = move_irq_desc(desc, me); | ||
2517 | /* get the new one */ | ||
2518 | cfg = desc->chip_data; | ||
2519 | #endif | ||
2520 | send_cleanup_vector(cfg); | 2495 | send_cleanup_vector(cfg); |
2521 | } | ||
2522 | } | 2496 | } |
2523 | #else | 2497 | #else |
2524 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2498 | static inline void irq_complete_move(struct irq_desc **descp) {} |
2525 | #endif | 2499 | #endif |
2526 | 2500 | ||
2527 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | ||
2528 | { | ||
2529 | int apic, pin; | ||
2530 | struct irq_pin_list *entry; | ||
2531 | |||
2532 | entry = cfg->irq_2_pin; | ||
2533 | for (;;) { | ||
2534 | |||
2535 | if (!entry) | ||
2536 | break; | ||
2537 | |||
2538 | apic = entry->apic; | ||
2539 | pin = entry->pin; | ||
2540 | io_apic_eoi(apic, pin); | ||
2541 | entry = entry->next; | ||
2542 | } | ||
2543 | } | ||
2544 | |||
2545 | static void | ||
2546 | eoi_ioapic_irq(struct irq_desc *desc) | ||
2547 | { | ||
2548 | struct irq_cfg *cfg; | ||
2549 | unsigned long flags; | ||
2550 | unsigned int irq; | ||
2551 | |||
2552 | irq = desc->irq; | ||
2553 | cfg = desc->chip_data; | ||
2554 | |||
2555 | spin_lock_irqsave(&ioapic_lock, flags); | ||
2556 | __eoi_ioapic_irq(irq, cfg); | ||
2557 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2558 | } | ||
2559 | |||
2560 | #ifdef CONFIG_X86_X2APIC | ||
2561 | static void ack_x2apic_level(unsigned int irq) | ||
2562 | { | ||
2563 | struct irq_desc *desc = irq_to_desc(irq); | ||
2564 | ack_x2APIC_irq(); | ||
2565 | eoi_ioapic_irq(desc); | ||
2566 | } | ||
2567 | |||
2568 | static void ack_x2apic_edge(unsigned int irq) | ||
2569 | { | ||
2570 | ack_x2APIC_irq(); | ||
2571 | } | ||
2572 | #endif | ||
2573 | |||
2574 | static void ack_apic_edge(unsigned int irq) | 2501 | static void ack_apic_edge(unsigned int irq) |
2575 | { | 2502 | { |
2576 | struct irq_desc *desc = irq_to_desc(irq); | 2503 | struct irq_desc *desc = irq_to_desc(irq); |
@@ -2634,9 +2561,6 @@ static void ack_apic_level(unsigned int irq) | |||
2634 | */ | 2561 | */ |
2635 | ack_APIC_irq(); | 2562 | ack_APIC_irq(); |
2636 | 2563 | ||
2637 | if (irq_remapped(irq)) | ||
2638 | eoi_ioapic_irq(desc); | ||
2639 | |||
2640 | /* Now we can move and renable the irq */ | 2564 | /* Now we can move and renable the irq */ |
2641 | if (unlikely(do_unmask_irq)) { | 2565 | if (unlikely(do_unmask_irq)) { |
2642 | /* Only migrate the irq if the ack has been received. | 2566 | /* Only migrate the irq if the ack has been received. |
@@ -2683,22 +2607,50 @@ static void ack_apic_level(unsigned int irq) | |||
2683 | } | 2607 | } |
2684 | 2608 | ||
2685 | #ifdef CONFIG_INTR_REMAP | 2609 | #ifdef CONFIG_INTR_REMAP |
2610 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | ||
2611 | { | ||
2612 | int apic, pin; | ||
2613 | struct irq_pin_list *entry; | ||
2614 | |||
2615 | entry = cfg->irq_2_pin; | ||
2616 | for (;;) { | ||
2617 | |||
2618 | if (!entry) | ||
2619 | break; | ||
2620 | |||
2621 | apic = entry->apic; | ||
2622 | pin = entry->pin; | ||
2623 | io_apic_eoi(apic, pin); | ||
2624 | entry = entry->next; | ||
2625 | } | ||
2626 | } | ||
2627 | |||
2628 | static void | ||
2629 | eoi_ioapic_irq(struct irq_desc *desc) | ||
2630 | { | ||
2631 | struct irq_cfg *cfg; | ||
2632 | unsigned long flags; | ||
2633 | unsigned int irq; | ||
2634 | |||
2635 | irq = desc->irq; | ||
2636 | cfg = desc->chip_data; | ||
2637 | |||
2638 | spin_lock_irqsave(&ioapic_lock, flags); | ||
2639 | __eoi_ioapic_irq(irq, cfg); | ||
2640 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
2641 | } | ||
2642 | |||
2686 | static void ir_ack_apic_edge(unsigned int irq) | 2643 | static void ir_ack_apic_edge(unsigned int irq) |
2687 | { | 2644 | { |
2688 | #ifdef CONFIG_X86_X2APIC | 2645 | ack_APIC_irq(); |
2689 | if (x2apic_enabled()) | ||
2690 | return ack_x2apic_edge(irq); | ||
2691 | #endif | ||
2692 | return ack_apic_edge(irq); | ||
2693 | } | 2646 | } |
2694 | 2647 | ||
2695 | static void ir_ack_apic_level(unsigned int irq) | 2648 | static void ir_ack_apic_level(unsigned int irq) |
2696 | { | 2649 | { |
2697 | #ifdef CONFIG_X86_X2APIC | 2650 | struct irq_desc *desc = irq_to_desc(irq); |
2698 | if (x2apic_enabled()) | 2651 | |
2699 | return ack_x2apic_level(irq); | 2652 | ack_APIC_irq(); |
2700 | #endif | 2653 | eoi_ioapic_irq(desc); |
2701 | return ack_apic_level(irq); | ||
2702 | } | 2654 | } |
2703 | #endif /* CONFIG_INTR_REMAP */ | 2655 | #endif /* CONFIG_INTR_REMAP */ |
2704 | 2656 | ||
@@ -2903,7 +2855,7 @@ static inline void __init check_timer(void) | |||
2903 | { | 2855 | { |
2904 | struct irq_desc *desc = irq_to_desc(0); | 2856 | struct irq_desc *desc = irq_to_desc(0); |
2905 | struct irq_cfg *cfg = desc->chip_data; | 2857 | struct irq_cfg *cfg = desc->chip_data; |
2906 | int cpu = boot_cpu_id; | 2858 | int node = cpu_to_node(boot_cpu_id); |
2907 | int apic1, pin1, apic2, pin2; | 2859 | int apic1, pin1, apic2, pin2; |
2908 | unsigned long flags; | 2860 | unsigned long flags; |
2909 | int no_pin1 = 0; | 2861 | int no_pin1 = 0; |
@@ -2969,7 +2921,7 @@ static inline void __init check_timer(void) | |||
2969 | * Ok, does IRQ0 through the IOAPIC work? | 2921 | * Ok, does IRQ0 through the IOAPIC work? |
2970 | */ | 2922 | */ |
2971 | if (no_pin1) { | 2923 | if (no_pin1) { |
2972 | add_pin_to_irq_cpu(cfg, cpu, apic1, pin1); | 2924 | add_pin_to_irq_node(cfg, node, apic1, pin1); |
2973 | setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); | 2925 | setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); |
2974 | } else { | 2926 | } else { |
2975 | /* for edge trigger, setup_IO_APIC_irq already | 2927 | /* for edge trigger, setup_IO_APIC_irq already |
@@ -3006,7 +2958,7 @@ static inline void __init check_timer(void) | |||
3006 | /* | 2958 | /* |
3007 | * legacy devices should be connected to IO APIC #0 | 2959 | * legacy devices should be connected to IO APIC #0 |
3008 | */ | 2960 | */ |
3009 | replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2); | 2961 | replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); |
3010 | setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); | 2962 | setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); |
3011 | enable_8259A_irq(0); | 2963 | enable_8259A_irq(0); |
3012 | if (timer_irq_works()) { | 2964 | if (timer_irq_works()) { |
@@ -3218,14 +3170,13 @@ static int nr_irqs_gsi = NR_IRQS_LEGACY; | |||
3218 | /* | 3170 | /* |
3219 | * Dynamic irq allocate and deallocation | 3171 | * Dynamic irq allocate and deallocation |
3220 | */ | 3172 | */ |
3221 | unsigned int create_irq_nr(unsigned int irq_want) | 3173 | unsigned int create_irq_nr(unsigned int irq_want, int node) |
3222 | { | 3174 | { |
3223 | /* Allocate an unused irq */ | 3175 | /* Allocate an unused irq */ |
3224 | unsigned int irq; | 3176 | unsigned int irq; |
3225 | unsigned int new; | 3177 | unsigned int new; |
3226 | unsigned long flags; | 3178 | unsigned long flags; |
3227 | struct irq_cfg *cfg_new = NULL; | 3179 | struct irq_cfg *cfg_new = NULL; |
3228 | int cpu = boot_cpu_id; | ||
3229 | struct irq_desc *desc_new = NULL; | 3180 | struct irq_desc *desc_new = NULL; |
3230 | 3181 | ||
3231 | irq = 0; | 3182 | irq = 0; |
@@ -3234,7 +3185,7 @@ unsigned int create_irq_nr(unsigned int irq_want) | |||
3234 | 3185 | ||
3235 | spin_lock_irqsave(&vector_lock, flags); | 3186 | spin_lock_irqsave(&vector_lock, flags); |
3236 | for (new = irq_want; new < nr_irqs; new++) { | 3187 | for (new = irq_want; new < nr_irqs; new++) { |
3237 | desc_new = irq_to_desc_alloc_cpu(new, cpu); | 3188 | desc_new = irq_to_desc_alloc_node(new, node); |
3238 | if (!desc_new) { | 3189 | if (!desc_new) { |
3239 | printk(KERN_INFO "can not get irq_desc for %d\n", new); | 3190 | printk(KERN_INFO "can not get irq_desc for %d\n", new); |
3240 | continue; | 3191 | continue; |
@@ -3243,6 +3194,9 @@ unsigned int create_irq_nr(unsigned int irq_want) | |||
3243 | 3194 | ||
3244 | if (cfg_new->vector != 0) | 3195 | if (cfg_new->vector != 0) |
3245 | continue; | 3196 | continue; |
3197 | |||
3198 | desc_new = move_irq_desc(desc_new, node); | ||
3199 | |||
3246 | if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) | 3200 | if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) |
3247 | irq = new; | 3201 | irq = new; |
3248 | break; | 3202 | break; |
@@ -3260,11 +3214,12 @@ unsigned int create_irq_nr(unsigned int irq_want) | |||
3260 | 3214 | ||
3261 | int create_irq(void) | 3215 | int create_irq(void) |
3262 | { | 3216 | { |
3217 | int node = cpu_to_node(boot_cpu_id); | ||
3263 | unsigned int irq_want; | 3218 | unsigned int irq_want; |
3264 | int irq; | 3219 | int irq; |
3265 | 3220 | ||
3266 | irq_want = nr_irqs_gsi; | 3221 | irq_want = nr_irqs_gsi; |
3267 | irq = create_irq_nr(irq_want); | 3222 | irq = create_irq_nr(irq_want, node); |
3268 | 3223 | ||
3269 | if (irq == 0) | 3224 | if (irq == 0) |
3270 | irq = -1; | 3225 | irq = -1; |
@@ -3366,7 +3321,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
3366 | } | 3321 | } |
3367 | 3322 | ||
3368 | #ifdef CONFIG_SMP | 3323 | #ifdef CONFIG_SMP |
3369 | static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3324 | static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3370 | { | 3325 | { |
3371 | struct irq_desc *desc = irq_to_desc(irq); | 3326 | struct irq_desc *desc = irq_to_desc(irq); |
3372 | struct irq_cfg *cfg; | 3327 | struct irq_cfg *cfg; |
@@ -3375,7 +3330,7 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3375 | 3330 | ||
3376 | dest = set_desc_affinity(desc, mask); | 3331 | dest = set_desc_affinity(desc, mask); |
3377 | if (dest == BAD_APICID) | 3332 | if (dest == BAD_APICID) |
3378 | return; | 3333 | return -1; |
3379 | 3334 | ||
3380 | cfg = desc->chip_data; | 3335 | cfg = desc->chip_data; |
3381 | 3336 | ||
@@ -3387,13 +3342,15 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3387 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3342 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3388 | 3343 | ||
3389 | write_msi_msg_desc(desc, &msg); | 3344 | write_msi_msg_desc(desc, &msg); |
3345 | |||
3346 | return 0; | ||
3390 | } | 3347 | } |
3391 | #ifdef CONFIG_INTR_REMAP | 3348 | #ifdef CONFIG_INTR_REMAP |
3392 | /* | 3349 | /* |
3393 | * Migrate the MSI irq to another cpumask. This migration is | 3350 | * Migrate the MSI irq to another cpumask. This migration is |
3394 | * done in the process context using interrupt-remapping hardware. | 3351 | * done in the process context using interrupt-remapping hardware. |
3395 | */ | 3352 | */ |
3396 | static void | 3353 | static int |
3397 | ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3354 | ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3398 | { | 3355 | { |
3399 | struct irq_desc *desc = irq_to_desc(irq); | 3356 | struct irq_desc *desc = irq_to_desc(irq); |
@@ -3402,11 +3359,11 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3402 | struct irte irte; | 3359 | struct irte irte; |
3403 | 3360 | ||
3404 | if (get_irte(irq, &irte)) | 3361 | if (get_irte(irq, &irte)) |
3405 | return; | 3362 | return -1; |
3406 | 3363 | ||
3407 | dest = set_desc_affinity(desc, mask); | 3364 | dest = set_desc_affinity(desc, mask); |
3408 | if (dest == BAD_APICID) | 3365 | if (dest == BAD_APICID) |
3409 | return; | 3366 | return -1; |
3410 | 3367 | ||
3411 | irte.vector = cfg->vector; | 3368 | irte.vector = cfg->vector; |
3412 | irte.dest_id = IRTE_DEST(dest); | 3369 | irte.dest_id = IRTE_DEST(dest); |
@@ -3423,6 +3380,8 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3423 | */ | 3380 | */ |
3424 | if (cfg->move_in_progress) | 3381 | if (cfg->move_in_progress) |
3425 | send_cleanup_vector(cfg); | 3382 | send_cleanup_vector(cfg); |
3383 | |||
3384 | return 0; | ||
3426 | } | 3385 | } |
3427 | 3386 | ||
3428 | #endif | 3387 | #endif |
@@ -3518,15 +3477,17 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
3518 | unsigned int irq_want; | 3477 | unsigned int irq_want; |
3519 | struct intel_iommu *iommu = NULL; | 3478 | struct intel_iommu *iommu = NULL; |
3520 | int index = 0; | 3479 | int index = 0; |
3480 | int node; | ||
3521 | 3481 | ||
3522 | /* x86 doesn't support multiple MSI yet */ | 3482 | /* x86 doesn't support multiple MSI yet */ |
3523 | if (type == PCI_CAP_ID_MSI && nvec > 1) | 3483 | if (type == PCI_CAP_ID_MSI && nvec > 1) |
3524 | return 1; | 3484 | return 1; |
3525 | 3485 | ||
3486 | node = dev_to_node(&dev->dev); | ||
3526 | irq_want = nr_irqs_gsi; | 3487 | irq_want = nr_irqs_gsi; |
3527 | sub_handle = 0; | 3488 | sub_handle = 0; |
3528 | list_for_each_entry(msidesc, &dev->msi_list, list) { | 3489 | list_for_each_entry(msidesc, &dev->msi_list, list) { |
3529 | irq = create_irq_nr(irq_want); | 3490 | irq = create_irq_nr(irq_want, node); |
3530 | if (irq == 0) | 3491 | if (irq == 0) |
3531 | return -1; | 3492 | return -1; |
3532 | irq_want = irq + 1; | 3493 | irq_want = irq + 1; |
@@ -3576,7 +3537,7 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
3576 | 3537 | ||
3577 | #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) | 3538 | #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) |
3578 | #ifdef CONFIG_SMP | 3539 | #ifdef CONFIG_SMP |
3579 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | 3540 | static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3580 | { | 3541 | { |
3581 | struct irq_desc *desc = irq_to_desc(irq); | 3542 | struct irq_desc *desc = irq_to_desc(irq); |
3582 | struct irq_cfg *cfg; | 3543 | struct irq_cfg *cfg; |
@@ -3585,7 +3546,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
3585 | 3546 | ||
3586 | dest = set_desc_affinity(desc, mask); | 3547 | dest = set_desc_affinity(desc, mask); |
3587 | if (dest == BAD_APICID) | 3548 | if (dest == BAD_APICID) |
3588 | return; | 3549 | return -1; |
3589 | 3550 | ||
3590 | cfg = desc->chip_data; | 3551 | cfg = desc->chip_data; |
3591 | 3552 | ||
@@ -3597,6 +3558,8 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
3597 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3558 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3598 | 3559 | ||
3599 | dmar_msi_write(irq, &msg); | 3560 | dmar_msi_write(irq, &msg); |
3561 | |||
3562 | return 0; | ||
3600 | } | 3563 | } |
3601 | 3564 | ||
3602 | #endif /* CONFIG_SMP */ | 3565 | #endif /* CONFIG_SMP */ |
@@ -3630,7 +3593,7 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3630 | #ifdef CONFIG_HPET_TIMER | 3593 | #ifdef CONFIG_HPET_TIMER |
3631 | 3594 | ||
3632 | #ifdef CONFIG_SMP | 3595 | #ifdef CONFIG_SMP |
3633 | static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | 3596 | static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3634 | { | 3597 | { |
3635 | struct irq_desc *desc = irq_to_desc(irq); | 3598 | struct irq_desc *desc = irq_to_desc(irq); |
3636 | struct irq_cfg *cfg; | 3599 | struct irq_cfg *cfg; |
@@ -3639,7 +3602,7 @@ static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
3639 | 3602 | ||
3640 | dest = set_desc_affinity(desc, mask); | 3603 | dest = set_desc_affinity(desc, mask); |
3641 | if (dest == BAD_APICID) | 3604 | if (dest == BAD_APICID) |
3642 | return; | 3605 | return -1; |
3643 | 3606 | ||
3644 | cfg = desc->chip_data; | 3607 | cfg = desc->chip_data; |
3645 | 3608 | ||
@@ -3651,6 +3614,8 @@ static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
3651 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3614 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3652 | 3615 | ||
3653 | hpet_msi_write(irq, &msg); | 3616 | hpet_msi_write(irq, &msg); |
3617 | |||
3618 | return 0; | ||
3654 | } | 3619 | } |
3655 | 3620 | ||
3656 | #endif /* CONFIG_SMP */ | 3621 | #endif /* CONFIG_SMP */ |
@@ -3707,7 +3672,7 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
3707 | write_ht_irq_msg(irq, &msg); | 3672 | write_ht_irq_msg(irq, &msg); |
3708 | } | 3673 | } |
3709 | 3674 | ||
3710 | static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3675 | static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3711 | { | 3676 | { |
3712 | struct irq_desc *desc = irq_to_desc(irq); | 3677 | struct irq_desc *desc = irq_to_desc(irq); |
3713 | struct irq_cfg *cfg; | 3678 | struct irq_cfg *cfg; |
@@ -3715,11 +3680,13 @@ static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3715 | 3680 | ||
3716 | dest = set_desc_affinity(desc, mask); | 3681 | dest = set_desc_affinity(desc, mask); |
3717 | if (dest == BAD_APICID) | 3682 | if (dest == BAD_APICID) |
3718 | return; | 3683 | return -1; |
3719 | 3684 | ||
3720 | cfg = desc->chip_data; | 3685 | cfg = desc->chip_data; |
3721 | 3686 | ||
3722 | target_ht_irq(irq, dest, cfg->vector); | 3687 | target_ht_irq(irq, dest, cfg->vector); |
3688 | |||
3689 | return 0; | ||
3723 | } | 3690 | } |
3724 | 3691 | ||
3725 | #endif | 3692 | #endif |
@@ -3794,6 +3761,8 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
3794 | unsigned long flags; | 3761 | unsigned long flags; |
3795 | int err; | 3762 | int err; |
3796 | 3763 | ||
3764 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); | ||
3765 | |||
3797 | cfg = irq_cfg(irq); | 3766 | cfg = irq_cfg(irq); |
3798 | 3767 | ||
3799 | err = assign_irq_vector(irq, cfg, eligible_cpu); | 3768 | err = assign_irq_vector(irq, cfg, eligible_cpu); |
@@ -3807,15 +3776,13 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
3807 | 3776 | ||
3808 | mmr_value = 0; | 3777 | mmr_value = 0; |
3809 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | 3778 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; |
3810 | BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); | 3779 | entry->vector = cfg->vector; |
3811 | 3780 | entry->delivery_mode = apic->irq_delivery_mode; | |
3812 | entry->vector = cfg->vector; | 3781 | entry->dest_mode = apic->irq_dest_mode; |
3813 | entry->delivery_mode = apic->irq_delivery_mode; | 3782 | entry->polarity = 0; |
3814 | entry->dest_mode = apic->irq_dest_mode; | 3783 | entry->trigger = 0; |
3815 | entry->polarity = 0; | 3784 | entry->mask = 0; |
3816 | entry->trigger = 0; | 3785 | entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); |
3817 | entry->mask = 0; | ||
3818 | entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); | ||
3819 | 3786 | ||
3820 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | 3787 | mmr_pnode = uv_blade_to_pnode(mmr_blade); |
3821 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 3788 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
@@ -3833,10 +3800,10 @@ void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset) | |||
3833 | struct uv_IO_APIC_route_entry *entry; | 3800 | struct uv_IO_APIC_route_entry *entry; |
3834 | int mmr_pnode; | 3801 | int mmr_pnode; |
3835 | 3802 | ||
3803 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); | ||
3804 | |||
3836 | mmr_value = 0; | 3805 | mmr_value = 0; |
3837 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | 3806 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; |
3838 | BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); | ||
3839 | |||
3840 | entry->mask = 1; | 3807 | entry->mask = 1; |
3841 | 3808 | ||
3842 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | 3809 | mmr_pnode = uv_blade_to_pnode(mmr_blade); |
@@ -3900,6 +3867,71 @@ int __init arch_probe_nr_irqs(void) | |||
3900 | } | 3867 | } |
3901 | #endif | 3868 | #endif |
3902 | 3869 | ||
3870 | static int __io_apic_set_pci_routing(struct device *dev, int irq, | ||
3871 | struct io_apic_irq_attr *irq_attr) | ||
3872 | { | ||
3873 | struct irq_desc *desc; | ||
3874 | struct irq_cfg *cfg; | ||
3875 | int node; | ||
3876 | int ioapic, pin; | ||
3877 | int trigger, polarity; | ||
3878 | |||
3879 | ioapic = irq_attr->ioapic; | ||
3880 | if (!IO_APIC_IRQ(irq)) { | ||
3881 | apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", | ||
3882 | ioapic); | ||
3883 | return -EINVAL; | ||
3884 | } | ||
3885 | |||
3886 | if (dev) | ||
3887 | node = dev_to_node(dev); | ||
3888 | else | ||
3889 | node = cpu_to_node(boot_cpu_id); | ||
3890 | |||
3891 | desc = irq_to_desc_alloc_node(irq, node); | ||
3892 | if (!desc) { | ||
3893 | printk(KERN_INFO "can not get irq_desc %d\n", irq); | ||
3894 | return 0; | ||
3895 | } | ||
3896 | |||
3897 | pin = irq_attr->ioapic_pin; | ||
3898 | trigger = irq_attr->trigger; | ||
3899 | polarity = irq_attr->polarity; | ||
3900 | |||
3901 | /* | ||
3902 | * IRQs < 16 are already in the irq_2_pin[] map | ||
3903 | */ | ||
3904 | if (irq >= NR_IRQS_LEGACY) { | ||
3905 | cfg = desc->chip_data; | ||
3906 | add_pin_to_irq_node(cfg, node, ioapic, pin); | ||
3907 | } | ||
3908 | |||
3909 | setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); | ||
3910 | |||
3911 | return 0; | ||
3912 | } | ||
3913 | |||
3914 | int io_apic_set_pci_routing(struct device *dev, int irq, | ||
3915 | struct io_apic_irq_attr *irq_attr) | ||
3916 | { | ||
3917 | int ioapic, pin; | ||
3918 | /* | ||
3919 | * Avoid pin reprogramming. PRTs typically include entries | ||
3920 | * with redundant pin->gsi mappings (but unique PCI devices); | ||
3921 | * we only program the IOAPIC on the first. | ||
3922 | */ | ||
3923 | ioapic = irq_attr->ioapic; | ||
3924 | pin = irq_attr->ioapic_pin; | ||
3925 | if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) { | ||
3926 | pr_debug("Pin %d-%d already programmed\n", | ||
3927 | mp_ioapics[ioapic].apicid, pin); | ||
3928 | return 0; | ||
3929 | } | ||
3930 | set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed); | ||
3931 | |||
3932 | return __io_apic_set_pci_routing(dev, irq, irq_attr); | ||
3933 | } | ||
3934 | |||
3903 | /* -------------------------------------------------------------------------- | 3935 | /* -------------------------------------------------------------------------- |
3904 | ACPI-based IOAPIC Configuration | 3936 | ACPI-based IOAPIC Configuration |
3905 | -------------------------------------------------------------------------- */ | 3937 | -------------------------------------------------------------------------- */ |
@@ -3980,6 +4012,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
3980 | 4012 | ||
3981 | return apic_id; | 4013 | return apic_id; |
3982 | } | 4014 | } |
4015 | #endif | ||
3983 | 4016 | ||
3984 | int __init io_apic_get_version(int ioapic) | 4017 | int __init io_apic_get_version(int ioapic) |
3985 | { | 4018 | { |
@@ -3992,39 +4025,6 @@ int __init io_apic_get_version(int ioapic) | |||
3992 | 4025 | ||
3993 | return reg_01.bits.version; | 4026 | return reg_01.bits.version; |
3994 | } | 4027 | } |
3995 | #endif | ||
3996 | |||
3997 | int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) | ||
3998 | { | ||
3999 | struct irq_desc *desc; | ||
4000 | struct irq_cfg *cfg; | ||
4001 | int cpu = boot_cpu_id; | ||
4002 | |||
4003 | if (!IO_APIC_IRQ(irq)) { | ||
4004 | apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", | ||
4005 | ioapic); | ||
4006 | return -EINVAL; | ||
4007 | } | ||
4008 | |||
4009 | desc = irq_to_desc_alloc_cpu(irq, cpu); | ||
4010 | if (!desc) { | ||
4011 | printk(KERN_INFO "can not get irq_desc %d\n", irq); | ||
4012 | return 0; | ||
4013 | } | ||
4014 | |||
4015 | /* | ||
4016 | * IRQs < 16 are already in the irq_2_pin[] map | ||
4017 | */ | ||
4018 | if (irq >= NR_IRQS_LEGACY) { | ||
4019 | cfg = desc->chip_data; | ||
4020 | add_pin_to_irq_cpu(cfg, cpu, ioapic, pin); | ||
4021 | } | ||
4022 | |||
4023 | setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity); | ||
4024 | |||
4025 | return 0; | ||
4026 | } | ||
4027 | |||
4028 | 4028 | ||
4029 | int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | 4029 | int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) |
4030 | { | 4030 | { |
@@ -4055,51 +4055,44 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | |||
4055 | #ifdef CONFIG_SMP | 4055 | #ifdef CONFIG_SMP |
4056 | void __init setup_ioapic_dest(void) | 4056 | void __init setup_ioapic_dest(void) |
4057 | { | 4057 | { |
4058 | int pin, ioapic, irq, irq_entry; | 4058 | int pin, ioapic = 0, irq, irq_entry; |
4059 | struct irq_desc *desc; | 4059 | struct irq_desc *desc; |
4060 | struct irq_cfg *cfg; | ||
4061 | const struct cpumask *mask; | 4060 | const struct cpumask *mask; |
4062 | 4061 | ||
4063 | if (skip_ioapic_setup == 1) | 4062 | if (skip_ioapic_setup == 1) |
4064 | return; | 4063 | return; |
4065 | 4064 | ||
4066 | for (ioapic = 0; ioapic < nr_ioapics; ioapic++) { | 4065 | #ifdef CONFIG_ACPI |
4067 | for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { | 4066 | if (!acpi_disabled && acpi_ioapic) { |
4068 | irq_entry = find_irq_entry(ioapic, pin, mp_INT); | 4067 | ioapic = mp_find_ioapic(0); |
4069 | if (irq_entry == -1) | 4068 | if (ioapic < 0) |
4070 | continue; | 4069 | ioapic = 0; |
4071 | irq = pin_2_irq(irq_entry, ioapic, pin); | 4070 | } |
4072 | 4071 | #endif | |
4073 | /* setup_IO_APIC_irqs could fail to get vector for some device | ||
4074 | * when you have too many devices, because at that time only boot | ||
4075 | * cpu is online. | ||
4076 | */ | ||
4077 | desc = irq_to_desc(irq); | ||
4078 | cfg = desc->chip_data; | ||
4079 | if (!cfg->vector) { | ||
4080 | setup_IO_APIC_irq(ioapic, pin, irq, desc, | ||
4081 | irq_trigger(irq_entry), | ||
4082 | irq_polarity(irq_entry)); | ||
4083 | continue; | ||
4084 | 4072 | ||
4085 | } | 4073 | for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { |
4074 | irq_entry = find_irq_entry(ioapic, pin, mp_INT); | ||
4075 | if (irq_entry == -1) | ||
4076 | continue; | ||
4077 | irq = pin_2_irq(irq_entry, ioapic, pin); | ||
4086 | 4078 | ||
4087 | /* | 4079 | desc = irq_to_desc(irq); |
4088 | * Honour affinities which have been set in early boot | ||
4089 | */ | ||
4090 | if (desc->status & | ||
4091 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | ||
4092 | mask = desc->affinity; | ||
4093 | else | ||
4094 | mask = apic->target_cpus(); | ||
4095 | 4080 | ||
4096 | if (intr_remapping_enabled) | 4081 | /* |
4097 | set_ir_ioapic_affinity_irq_desc(desc, mask); | 4082 | * Honour affinities which have been set in early boot |
4098 | else | 4083 | */ |
4099 | set_ioapic_affinity_irq_desc(desc, mask); | 4084 | if (desc->status & |
4100 | } | 4085 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) |
4086 | mask = desc->affinity; | ||
4087 | else | ||
4088 | mask = apic->target_cpus(); | ||
4101 | 4089 | ||
4090 | if (intr_remapping_enabled) | ||
4091 | set_ir_ioapic_affinity_irq_desc(desc, mask); | ||
4092 | else | ||
4093 | set_ioapic_affinity_irq_desc(desc, mask); | ||
4102 | } | 4094 | } |
4095 | |||
4103 | } | 4096 | } |
4104 | #endif | 4097 | #endif |
4105 | 4098 | ||
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 1783652bb0e5..bc3e880f9b82 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
@@ -50,7 +50,7 @@ static struct apic *apic_probe[] __initdata = { | |||
50 | void __init default_setup_apic_routing(void) | 50 | void __init default_setup_apic_routing(void) |
51 | { | 51 | { |
52 | #ifdef CONFIG_X86_X2APIC | 52 | #ifdef CONFIG_X86_X2APIC |
53 | if (x2apic && (apic != &apic_x2apic_phys && | 53 | if (x2apic_mode && (apic != &apic_x2apic_phys && |
54 | #ifdef CONFIG_X86_UV | 54 | #ifdef CONFIG_X86_UV |
55 | apic != &apic_x2apic_uv_x && | 55 | apic != &apic_x2apic_uv_x && |
56 | #endif | 56 | #endif |
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 9cfe1f415d81..344eee4ac0a4 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c | |||
@@ -173,13 +173,6 @@ static inline int is_WPEG(struct rio_detail *rio){ | |||
173 | rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); | 173 | rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); |
174 | } | 174 | } |
175 | 175 | ||
176 | |||
177 | /* In clustered mode, the high nibble of APIC ID is a cluster number. | ||
178 | * The low nibble is a 4-bit bitmap. */ | ||
179 | #define XAPIC_DEST_CPUS_SHIFT 4 | ||
180 | #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) | ||
181 | #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) | ||
182 | |||
183 | #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 176 | #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
184 | 177 | ||
185 | static const struct cpumask *summit_target_cpus(void) | 178 | static const struct cpumask *summit_target_cpus(void) |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 2bda69352976..780a733a5e7a 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -105,7 +105,7 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) | |||
105 | cpumask_set_cpu(cpu, retmask); | 105 | cpumask_set_cpu(cpu, retmask); |
106 | } | 106 | } |
107 | 107 | ||
108 | static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) | 108 | static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) |
109 | { | 109 | { |
110 | #ifdef CONFIG_SMP | 110 | #ifdef CONFIG_SMP |
111 | unsigned long val; | 111 | unsigned long val; |
@@ -583,15 +583,18 @@ void __init uv_system_init(void) | |||
583 | 583 | ||
584 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); | 584 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); |
585 | uv_blade_info = kmalloc(bytes, GFP_KERNEL); | 585 | uv_blade_info = kmalloc(bytes, GFP_KERNEL); |
586 | BUG_ON(!uv_blade_info); | ||
586 | 587 | ||
587 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); | 588 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); |
588 | 589 | ||
589 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); | 590 | bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); |
590 | uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); | 591 | uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); |
592 | BUG_ON(!uv_node_to_blade); | ||
591 | memset(uv_node_to_blade, 255, bytes); | 593 | memset(uv_node_to_blade, 255, bytes); |
592 | 594 | ||
593 | bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); | 595 | bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); |
594 | uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); | 596 | uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); |
597 | BUG_ON(!uv_cpu_to_blade); | ||
595 | memset(uv_cpu_to_blade, 255, bytes); | 598 | memset(uv_cpu_to_blade, 255, bytes); |
596 | 599 | ||
597 | blade = 0; | 600 | blade = 0; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7e4a459daa64..728b3750a3e8 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -272,7 +272,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
272 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 272 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) |
273 | int cpu = smp_processor_id(); | 273 | int cpu = smp_processor_id(); |
274 | int node; | 274 | int node; |
275 | unsigned apicid = hard_smp_processor_id(); | 275 | unsigned apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; |
276 | 276 | ||
277 | node = c->phys_proc_id; | 277 | node = c->phys_proc_id; |
278 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | 278 | if (apicid_to_node[apicid] != NUMA_NO_NODE) |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 77848d9fca68..b0517aa2bd3b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -299,7 +299,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) | |||
299 | return NULL; /* Not found */ | 299 | return NULL; /* Not found */ |
300 | } | 300 | } |
301 | 301 | ||
302 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | 302 | __u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; |
303 | __u32 cpu_caps_set[NCAPINTS] __cpuinitdata; | ||
303 | 304 | ||
304 | void load_percpu_segment(int cpu) | 305 | void load_percpu_segment(int cpu) |
305 | { | 306 | { |
@@ -768,6 +769,12 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
768 | if (this_cpu->c_identify) | 769 | if (this_cpu->c_identify) |
769 | this_cpu->c_identify(c); | 770 | this_cpu->c_identify(c); |
770 | 771 | ||
772 | /* Clear/Set all flags overriden by options, after probe */ | ||
773 | for (i = 0; i < NCAPINTS; i++) { | ||
774 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | ||
775 | c->x86_capability[i] |= cpu_caps_set[i]; | ||
776 | } | ||
777 | |||
771 | #ifdef CONFIG_X86_64 | 778 | #ifdef CONFIG_X86_64 |
772 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); | 779 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
773 | #endif | 780 | #endif |
@@ -813,6 +820,16 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
813 | #endif | 820 | #endif |
814 | 821 | ||
815 | init_hypervisor(c); | 822 | init_hypervisor(c); |
823 | |||
824 | /* | ||
825 | * Clear/Set all flags overriden by options, need do it | ||
826 | * before following smp all cpus cap AND. | ||
827 | */ | ||
828 | for (i = 0; i < NCAPINTS; i++) { | ||
829 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | ||
830 | c->x86_capability[i] |= cpu_caps_set[i]; | ||
831 | } | ||
832 | |||
816 | /* | 833 | /* |
817 | * On SMP, boot_cpu_data holds the common feature set between | 834 | * On SMP, boot_cpu_data holds the common feature set between |
818 | * all CPUs; so make sure that we indicate which features are | 835 | * all CPUs; so make sure that we indicate which features are |
@@ -825,10 +842,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
825 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 842 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
826 | } | 843 | } |
827 | 844 | ||
828 | /* Clear all flags overriden by options */ | ||
829 | for (i = 0; i < NCAPINTS; i++) | ||
830 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; | ||
831 | |||
832 | #ifdef CONFIG_X86_MCE | 845 | #ifdef CONFIG_X86_MCE |
833 | /* Init Machine Check Exception if available. */ | 846 | /* Init Machine Check Exception if available. */ |
834 | mcheck_init(c); | 847 | mcheck_init(c); |
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index 46e29ab96c6a..2fc4f6bb9ca5 100644 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c | |||
@@ -588,8 +588,20 @@ static void print_apic(void *arg) | |||
588 | seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT)); | 588 | seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT)); |
589 | seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT)); | 589 | seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT)); |
590 | seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR)); | 590 | seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR)); |
591 | #endif /* CONFIG_X86_LOCAL_APIC */ | 591 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { |
592 | unsigned int i, v, maxeilvt; | ||
593 | |||
594 | v = apic_read(APIC_EFEAT); | ||
595 | maxeilvt = (v >> 16) & 0xff; | ||
596 | seq_printf(seq, " EFEAT\t\t: %08x\n", v); | ||
597 | seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL)); | ||
592 | 598 | ||
599 | for (i = 0; i < maxeilvt; i++) { | ||
600 | v = apic_read(APIC_EILVTn(i)); | ||
601 | seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v); | ||
602 | } | ||
603 | } | ||
604 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
593 | seq_printf(seq, "\n MSR\t:\n"); | 605 | seq_printf(seq, "\n MSR\t:\n"); |
594 | } | 606 | } |
595 | 607 | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 7437fa133c02..daed39ba2614 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -229,12 +229,12 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
229 | } | 229 | } |
230 | #endif | 230 | #endif |
231 | 231 | ||
232 | static void __cpuinit srat_detect_node(void) | 232 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) |
233 | { | 233 | { |
234 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 234 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) |
235 | unsigned node; | 235 | unsigned node; |
236 | int cpu = smp_processor_id(); | 236 | int cpu = smp_processor_id(); |
237 | int apicid = hard_smp_processor_id(); | 237 | int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; |
238 | 238 | ||
239 | /* Don't do the funky fallback heuristics the AMD version employs | 239 | /* Don't do the funky fallback heuristics the AMD version employs |
240 | for now. */ | 240 | for now. */ |
@@ -400,7 +400,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
400 | } | 400 | } |
401 | 401 | ||
402 | /* Work around errata */ | 402 | /* Work around errata */ |
403 | srat_detect_node(); | 403 | srat_detect_node(c); |
404 | 404 | ||
405 | if (cpu_has(c, X86_FEATURE_VMX)) | 405 | if (cpu_has(c, X86_FEATURE_VMX)) |
406 | detect_vmx_virtcap(c); | 406 | detect_vmx_virtcap(c); |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index c3fe010d74c8..c1739ac29708 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -24,9 +24,9 @@ void (*generic_interrupt_extension)(void) = NULL; | |||
24 | */ | 24 | */ |
25 | void ack_bad_irq(unsigned int irq) | 25 | void ack_bad_irq(unsigned int irq) |
26 | { | 26 | { |
27 | printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); | 27 | if (printk_ratelimit()) |
28 | pr_err("unexpected IRQ trap at vector %02x\n", irq); | ||
28 | 29 | ||
29 | #ifdef CONFIG_X86_LOCAL_APIC | ||
30 | /* | 30 | /* |
31 | * Currently unexpected vectors happen only on SMP and APIC. | 31 | * Currently unexpected vectors happen only on SMP and APIC. |
32 | * We _must_ ack these because every local APIC has only N | 32 | * We _must_ ack these because every local APIC has only N |
@@ -36,9 +36,7 @@ void ack_bad_irq(unsigned int irq) | |||
36 | * completely. | 36 | * completely. |
37 | * But only ack when the APIC is enabled -AK | 37 | * But only ack when the APIC is enabled -AK |
38 | */ | 38 | */ |
39 | if (cpu_has_apic) | 39 | ack_APIC_irq(); |
40 | ack_APIC_irq(); | ||
41 | #endif | ||
42 | } | 40 | } |
43 | 41 | ||
44 | #define irq_stats(x) (&per_cpu(irq_stat, x)) | 42 | #define irq_stats(x) (&per_cpu(irq_stat, x)) |
@@ -178,7 +176,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) | |||
178 | sum += irq_stats(cpu)->irq_thermal_count; | 176 | sum += irq_stats(cpu)->irq_thermal_count; |
179 | # ifdef CONFIG_X86_64 | 177 | # ifdef CONFIG_X86_64 |
180 | sum += irq_stats(cpu)->irq_threshold_count; | 178 | sum += irq_stats(cpu)->irq_threshold_count; |
181 | #endif | 179 | # endif |
182 | #endif | 180 | #endif |
183 | return sum; | 181 | return sum; |
184 | } | 182 | } |
@@ -213,14 +211,11 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |||
213 | irq = __get_cpu_var(vector_irq)[vector]; | 211 | irq = __get_cpu_var(vector_irq)[vector]; |
214 | 212 | ||
215 | if (!handle_irq(irq, regs)) { | 213 | if (!handle_irq(irq, regs)) { |
216 | #ifdef CONFIG_X86_64 | 214 | ack_APIC_irq(); |
217 | if (!disable_apic) | ||
218 | ack_APIC_irq(); | ||
219 | #endif | ||
220 | 215 | ||
221 | if (printk_ratelimit()) | 216 | if (printk_ratelimit()) |
222 | printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n", | 217 | pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n", |
223 | __func__, smp_processor_id(), vector, irq); | 218 | __func__, smp_processor_id(), vector, irq); |
224 | } | 219 | } |
225 | 220 | ||
226 | irq_exit(); | 221 | irq_exit(); |
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit.c index 368b0a8836f9..2e08b10ad51a 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -1,20 +1,25 @@ | |||
1 | #include <linux/linkage.h> | ||
1 | #include <linux/errno.h> | 2 | #include <linux/errno.h> |
2 | #include <linux/signal.h> | 3 | #include <linux/signal.h> |
3 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
4 | #include <linux/ioport.h> | 5 | #include <linux/ioport.h> |
5 | #include <linux/interrupt.h> | 6 | #include <linux/interrupt.h> |
7 | #include <linux/timex.h> | ||
6 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
7 | #include <linux/random.h> | 9 | #include <linux/random.h> |
10 | #include <linux/kprobes.h> | ||
8 | #include <linux/init.h> | 11 | #include <linux/init.h> |
9 | #include <linux/kernel_stat.h> | 12 | #include <linux/kernel_stat.h> |
10 | #include <linux/sysdev.h> | 13 | #include <linux/sysdev.h> |
11 | #include <linux/bitops.h> | 14 | #include <linux/bitops.h> |
15 | #include <linux/acpi.h> | ||
12 | #include <linux/io.h> | 16 | #include <linux/io.h> |
13 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
14 | 18 | ||
15 | #include <asm/atomic.h> | 19 | #include <asm/atomic.h> |
16 | #include <asm/system.h> | 20 | #include <asm/system.h> |
17 | #include <asm/timer.h> | 21 | #include <asm/timer.h> |
22 | #include <asm/hw_irq.h> | ||
18 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
19 | #include <asm/desc.h> | 24 | #include <asm/desc.h> |
20 | #include <asm/apic.h> | 25 | #include <asm/apic.h> |
@@ -22,7 +27,23 @@ | |||
22 | #include <asm/i8259.h> | 27 | #include <asm/i8259.h> |
23 | #include <asm/traps.h> | 28 | #include <asm/traps.h> |
24 | 29 | ||
30 | /* | ||
31 | * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: | ||
32 | * (these are usually mapped to vectors 0x30-0x3f) | ||
33 | */ | ||
34 | |||
35 | /* | ||
36 | * The IO-APIC gives us many more interrupt sources. Most of these | ||
37 | * are unused but an SMP system is supposed to have enough memory ... | ||
38 | * sometimes (mostly wrt. hw bugs) we get corrupted vectors all | ||
39 | * across the spectrum, so we really want to be prepared to get all | ||
40 | * of these. Plus, more powerful systems might have more than 64 | ||
41 | * IO-APIC registers. | ||
42 | * | ||
43 | * (these are usually mapped into the 0x30-0xff vector range) | ||
44 | */ | ||
25 | 45 | ||
46 | #ifdef CONFIG_X86_32 | ||
26 | /* | 47 | /* |
27 | * Note that on a 486, we don't want to do a SIGFPE on an irq13 | 48 | * Note that on a 486, we don't want to do a SIGFPE on an irq13 |
28 | * as the irq is unreliable, and exception 16 works correctly | 49 | * as the irq is unreliable, and exception 16 works correctly |
@@ -52,30 +73,7 @@ static struct irqaction fpu_irq = { | |||
52 | .handler = math_error_irq, | 73 | .handler = math_error_irq, |
53 | .name = "fpu", | 74 | .name = "fpu", |
54 | }; | 75 | }; |
55 | |||
56 | void __init init_ISA_irqs(void) | ||
57 | { | ||
58 | int i; | ||
59 | |||
60 | #ifdef CONFIG_X86_LOCAL_APIC | ||
61 | init_bsp_APIC(); | ||
62 | #endif | 76 | #endif |
63 | init_8259A(0); | ||
64 | |||
65 | /* | ||
66 | * 16 old-style INTA-cycle interrupts: | ||
67 | */ | ||
68 | for (i = 0; i < NR_IRQS_LEGACY; i++) { | ||
69 | struct irq_desc *desc = irq_to_desc(i); | ||
70 | |||
71 | desc->status = IRQ_DISABLED; | ||
72 | desc->action = NULL; | ||
73 | desc->depth = 1; | ||
74 | |||
75 | set_irq_chip_and_handler_name(i, &i8259A_chip, | ||
76 | handle_level_irq, "XT"); | ||
77 | } | ||
78 | } | ||
79 | 77 | ||
80 | /* | 78 | /* |
81 | * IRQ2 is cascade interrupt to second interrupt controller | 79 | * IRQ2 is cascade interrupt to second interrupt controller |
@@ -118,29 +116,37 @@ int vector_used_by_percpu_irq(unsigned int vector) | |||
118 | return 0; | 116 | return 0; |
119 | } | 117 | } |
120 | 118 | ||
121 | /* Overridden in paravirt.c */ | 119 | static void __init init_ISA_irqs(void) |
122 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | ||
123 | |||
124 | void __init native_init_IRQ(void) | ||
125 | { | 120 | { |
126 | int i; | 121 | int i; |
127 | 122 | ||
128 | /* Execute any quirks before the call gates are initialised: */ | 123 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) |
129 | x86_quirk_pre_intr_init(); | 124 | init_bsp_APIC(); |
125 | #endif | ||
126 | init_8259A(0); | ||
130 | 127 | ||
131 | /* | 128 | /* |
132 | * Cover the whole vector space, no vector can escape | 129 | * 16 old-style INTA-cycle interrupts: |
133 | * us. (some of these will be overridden and become | ||
134 | * 'special' SMP interrupts) | ||
135 | */ | 130 | */ |
136 | for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { | 131 | for (i = 0; i < NR_IRQS_LEGACY; i++) { |
137 | /* SYSCALL_VECTOR was reserved in trap_init. */ | 132 | struct irq_desc *desc = irq_to_desc(i); |
138 | if (i != SYSCALL_VECTOR) | 133 | |
139 | set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); | 134 | desc->status = IRQ_DISABLED; |
135 | desc->action = NULL; | ||
136 | desc->depth = 1; | ||
137 | |||
138 | set_irq_chip_and_handler_name(i, &i8259A_chip, | ||
139 | handle_level_irq, "XT"); | ||
140 | } | 140 | } |
141 | } | ||
141 | 142 | ||
143 | /* Overridden in paravirt.c */ | ||
144 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | ||
142 | 145 | ||
143 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP) | 146 | static void __init smp_intr_init(void) |
147 | { | ||
148 | #ifdef CONFIG_SMP | ||
149 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) | ||
144 | /* | 150 | /* |
145 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | 151 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper |
146 | * IPI, driven by wakeup. | 152 | * IPI, driven by wakeup. |
@@ -160,16 +166,27 @@ void __init native_init_IRQ(void) | |||
160 | /* IPI for generic function call */ | 166 | /* IPI for generic function call */ |
161 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 167 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
162 | 168 | ||
163 | /* IPI for single call function */ | 169 | /* IPI for generic single function call */ |
164 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | 170 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, |
165 | call_function_single_interrupt); | 171 | call_function_single_interrupt); |
166 | 172 | ||
167 | /* Low priority IPI to cleanup after moving an irq */ | 173 | /* Low priority IPI to cleanup after moving an irq */ |
168 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 174 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
169 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); | 175 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); |
170 | #endif | 176 | #endif |
177 | #endif /* CONFIG_SMP */ | ||
178 | } | ||
179 | |||
180 | static void __init apic_intr_init(void) | ||
181 | { | ||
182 | smp_intr_init(); | ||
183 | |||
184 | #ifdef CONFIG_X86_64 | ||
185 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | ||
186 | alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); | ||
187 | #endif | ||
171 | 188 | ||
172 | #ifdef CONFIG_X86_LOCAL_APIC | 189 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) |
173 | /* self generated IPI for local APIC timer */ | 190 | /* self generated IPI for local APIC timer */ |
174 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | 191 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); |
175 | 192 | ||
@@ -179,16 +196,67 @@ void __init native_init_IRQ(void) | |||
179 | /* IPI vectors for APIC spurious and error interrupts */ | 196 | /* IPI vectors for APIC spurious and error interrupts */ |
180 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | 197 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); |
181 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | 198 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); |
199 | |||
200 | /* Performance monitoring interrupts: */ | ||
201 | # ifdef CONFIG_PERF_COUNTERS | ||
202 | alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); | ||
203 | alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); | ||
204 | # endif | ||
205 | |||
182 | #endif | 206 | #endif |
183 | 207 | ||
208 | #ifdef CONFIG_X86_32 | ||
184 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) | 209 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) |
185 | /* thermal monitor LVT interrupt */ | 210 | /* thermal monitor LVT interrupt */ |
186 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | 211 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); |
187 | #endif | 212 | #endif |
213 | #endif | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors | ||
218 | * | ||
219 | * Description: | ||
220 | * Perform any necessary interrupt initialisation prior to setting up | ||
221 | * the "ordinary" interrupt call gates. For legacy reasons, the ISA | ||
222 | * interrupts should be initialised here if the machine emulates a PC | ||
223 | * in any way. | ||
224 | **/ | ||
225 | static void __init x86_quirk_pre_intr_init(void) | ||
226 | { | ||
227 | #ifdef CONFIG_X86_32 | ||
228 | if (x86_quirks->arch_pre_intr_init) { | ||
229 | if (x86_quirks->arch_pre_intr_init()) | ||
230 | return; | ||
231 | } | ||
232 | #endif | ||
233 | init_ISA_irqs(); | ||
234 | } | ||
235 | |||
236 | void __init native_init_IRQ(void) | ||
237 | { | ||
238 | int i; | ||
239 | |||
240 | /* Execute any quirks before the call gates are initialised: */ | ||
241 | x86_quirk_pre_intr_init(); | ||
242 | |||
243 | apic_intr_init(); | ||
244 | |||
245 | /* | ||
246 | * Cover the whole vector space, no vector can escape | ||
247 | * us. (some of these will be overridden and become | ||
248 | * 'special' SMP interrupts) | ||
249 | */ | ||
250 | for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { | ||
251 | /* IA32_SYSCALL_VECTOR could be used in trap_init already. */ | ||
252 | if (!test_bit(i, used_vectors)) | ||
253 | set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); | ||
254 | } | ||
188 | 255 | ||
189 | if (!acpi_ioapic) | 256 | if (!acpi_ioapic) |
190 | setup_irq(2, &irq2); | 257 | setup_irq(2, &irq2); |
191 | 258 | ||
259 | #ifdef CONFIG_X86_32 | ||
192 | /* | 260 | /* |
193 | * Call quirks after call gates are initialised (usually add in | 261 | * Call quirks after call gates are initialised (usually add in |
194 | * the architecture specific gates): | 262 | * the architecture specific gates): |
@@ -203,4 +271,5 @@ void __init native_init_IRQ(void) | |||
203 | setup_irq(FPU_IRQ, &fpu_irq); | 271 | setup_irq(FPU_IRQ, &fpu_irq); |
204 | 272 | ||
205 | irq_ctx_init(smp_processor_id()); | 273 | irq_ctx_init(smp_processor_id()); |
274 | #endif | ||
206 | } | 275 | } |
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c deleted file mode 100644 index 8cd10537fd46..000000000000 --- a/arch/x86/kernel/irqinit_64.c +++ /dev/null | |||
@@ -1,177 +0,0 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | #include <linux/errno.h> | ||
3 | #include <linux/signal.h> | ||
4 | #include <linux/sched.h> | ||
5 | #include <linux/ioport.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | #include <linux/timex.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/random.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/kernel_stat.h> | ||
12 | #include <linux/sysdev.h> | ||
13 | #include <linux/bitops.h> | ||
14 | #include <linux/acpi.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/delay.h> | ||
17 | |||
18 | #include <asm/atomic.h> | ||
19 | #include <asm/system.h> | ||
20 | #include <asm/hw_irq.h> | ||
21 | #include <asm/pgtable.h> | ||
22 | #include <asm/desc.h> | ||
23 | #include <asm/apic.h> | ||
24 | #include <asm/i8259.h> | ||
25 | |||
26 | /* | ||
27 | * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: | ||
28 | * (these are usually mapped to vectors 0x30-0x3f) | ||
29 | */ | ||
30 | |||
31 | /* | ||
32 | * The IO-APIC gives us many more interrupt sources. Most of these | ||
33 | * are unused but an SMP system is supposed to have enough memory ... | ||
34 | * sometimes (mostly wrt. hw bugs) we get corrupted vectors all | ||
35 | * across the spectrum, so we really want to be prepared to get all | ||
36 | * of these. Plus, more powerful systems might have more than 64 | ||
37 | * IO-APIC registers. | ||
38 | * | ||
39 | * (these are usually mapped into the 0x30-0xff vector range) | ||
40 | */ | ||
41 | |||
42 | /* | ||
43 | * IRQ2 is cascade interrupt to second interrupt controller | ||
44 | */ | ||
45 | |||
46 | static struct irqaction irq2 = { | ||
47 | .handler = no_action, | ||
48 | .name = "cascade", | ||
49 | }; | ||
50 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | ||
51 | [0 ... IRQ0_VECTOR - 1] = -1, | ||
52 | [IRQ0_VECTOR] = 0, | ||
53 | [IRQ1_VECTOR] = 1, | ||
54 | [IRQ2_VECTOR] = 2, | ||
55 | [IRQ3_VECTOR] = 3, | ||
56 | [IRQ4_VECTOR] = 4, | ||
57 | [IRQ5_VECTOR] = 5, | ||
58 | [IRQ6_VECTOR] = 6, | ||
59 | [IRQ7_VECTOR] = 7, | ||
60 | [IRQ8_VECTOR] = 8, | ||
61 | [IRQ9_VECTOR] = 9, | ||
62 | [IRQ10_VECTOR] = 10, | ||
63 | [IRQ11_VECTOR] = 11, | ||
64 | [IRQ12_VECTOR] = 12, | ||
65 | [IRQ13_VECTOR] = 13, | ||
66 | [IRQ14_VECTOR] = 14, | ||
67 | [IRQ15_VECTOR] = 15, | ||
68 | [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 | ||
69 | }; | ||
70 | |||
71 | int vector_used_by_percpu_irq(unsigned int vector) | ||
72 | { | ||
73 | int cpu; | ||
74 | |||
75 | for_each_online_cpu(cpu) { | ||
76 | if (per_cpu(vector_irq, cpu)[vector] != -1) | ||
77 | return 1; | ||
78 | } | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | static void __init init_ISA_irqs(void) | ||
84 | { | ||
85 | int i; | ||
86 | |||
87 | init_bsp_APIC(); | ||
88 | init_8259A(0); | ||
89 | |||
90 | for (i = 0; i < NR_IRQS_LEGACY; i++) { | ||
91 | struct irq_desc *desc = irq_to_desc(i); | ||
92 | |||
93 | desc->status = IRQ_DISABLED; | ||
94 | desc->action = NULL; | ||
95 | desc->depth = 1; | ||
96 | |||
97 | /* | ||
98 | * 16 old-style INTA-cycle interrupts: | ||
99 | */ | ||
100 | set_irq_chip_and_handler_name(i, &i8259A_chip, | ||
101 | handle_level_irq, "XT"); | ||
102 | } | ||
103 | } | ||
104 | |||
105 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | ||
106 | |||
107 | static void __init smp_intr_init(void) | ||
108 | { | ||
109 | #ifdef CONFIG_SMP | ||
110 | /* | ||
111 | * The reschedule interrupt is a CPU-to-CPU reschedule-helper | ||
112 | * IPI, driven by wakeup. | ||
113 | */ | ||
114 | alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); | ||
115 | |||
116 | /* IPIs for invalidation */ | ||
117 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); | ||
118 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); | ||
119 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); | ||
120 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); | ||
121 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); | ||
122 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); | ||
123 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); | ||
124 | alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); | ||
125 | |||
126 | /* IPI for generic function call */ | ||
127 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | ||
128 | |||
129 | /* IPI for generic single function call */ | ||
130 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | ||
131 | call_function_single_interrupt); | ||
132 | |||
133 | /* Low priority IPI to cleanup after moving an irq */ | ||
134 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | ||
135 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); | ||
136 | #endif | ||
137 | } | ||
138 | |||
139 | static void __init apic_intr_init(void) | ||
140 | { | ||
141 | smp_intr_init(); | ||
142 | |||
143 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | ||
144 | alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); | ||
145 | |||
146 | /* self generated IPI for local APIC timer */ | ||
147 | alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | ||
148 | |||
149 | /* generic IPI for platform specific use */ | ||
150 | alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt); | ||
151 | |||
152 | /* IPI vectors for APIC spurious and error interrupts */ | ||
153 | alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | ||
154 | alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | ||
155 | } | ||
156 | |||
157 | void __init native_init_IRQ(void) | ||
158 | { | ||
159 | int i; | ||
160 | |||
161 | init_ISA_irqs(); | ||
162 | /* | ||
163 | * Cover the whole vector space, no vector can escape | ||
164 | * us. (some of these will be overridden and become | ||
165 | * 'special' SMP interrupts) | ||
166 | */ | ||
167 | for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { | ||
168 | int vector = FIRST_EXTERNAL_VECTOR + i; | ||
169 | if (vector != IA32_SYSCALL_VECTOR) | ||
170 | set_intr_gate(vector, interrupt[i]); | ||
171 | } | ||
172 | |||
173 | apic_intr_init(); | ||
174 | |||
175 | if (!acpi_ioapic) | ||
176 | setup_irq(2, &irq2); | ||
177 | } | ||
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 70fd7e414c15..651c93b28862 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/acpi.h> | 17 | #include <linux/acpi.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <linux/pci.h> | ||
20 | 21 | ||
21 | #include <asm/mtrr.h> | 22 | #include <asm/mtrr.h> |
22 | #include <asm/mpspec.h> | 23 | #include <asm/mpspec.h> |
@@ -870,24 +871,17 @@ static | |||
870 | inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} | 871 | inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} |
871 | #endif /* CONFIG_X86_IO_APIC */ | 872 | #endif /* CONFIG_X86_IO_APIC */ |
872 | 873 | ||
873 | static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, | 874 | static int |
874 | int count) | 875 | check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count) |
875 | { | 876 | { |
876 | if (!mpc_new_phys) { | 877 | int ret = 0; |
877 | pr_info("No spare slots, try to append...take your risk, " | 878 | |
878 | "new mpc_length %x\n", count); | 879 | if (!mpc_new_phys || count <= mpc_new_length) { |
879 | } else { | 880 | WARN(1, "update_mptable: No spare slots (length: %x)\n", count); |
880 | if (count <= mpc_new_length) | 881 | return -1; |
881 | pr_info("No spare slots, try to append..., " | ||
882 | "new mpc_length %x\n", count); | ||
883 | else { | ||
884 | pr_err("mpc_new_length %lx is too small\n", | ||
885 | mpc_new_length); | ||
886 | return -1; | ||
887 | } | ||
888 | } | 882 | } |
889 | 883 | ||
890 | return 0; | 884 | return ret; |
891 | } | 885 | } |
892 | 886 | ||
893 | static int __init replace_intsrc_all(struct mpc_table *mpc, | 887 | static int __init replace_intsrc_all(struct mpc_table *mpc, |
@@ -946,7 +940,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, | |||
946 | } else { | 940 | } else { |
947 | struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; | 941 | struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; |
948 | count += sizeof(struct mpc_intsrc); | 942 | count += sizeof(struct mpc_intsrc); |
949 | if (!check_slot(mpc_new_phys, mpc_new_length, count)) | 943 | if (check_slot(mpc_new_phys, mpc_new_length, count) < 0) |
950 | goto out; | 944 | goto out; |
951 | assign_to_mpc_intsrc(&mp_irqs[i], m); | 945 | assign_to_mpc_intsrc(&mp_irqs[i], m); |
952 | mpc->length = count; | 946 | mpc->length = count; |
@@ -963,11 +957,14 @@ out: | |||
963 | return 0; | 957 | return 0; |
964 | } | 958 | } |
965 | 959 | ||
966 | static int __initdata enable_update_mptable; | 960 | int enable_update_mptable; |
967 | 961 | ||
968 | static int __init update_mptable_setup(char *str) | 962 | static int __init update_mptable_setup(char *str) |
969 | { | 963 | { |
970 | enable_update_mptable = 1; | 964 | enable_update_mptable = 1; |
965 | #ifdef CONFIG_PCI | ||
966 | pci_routeirq = 1; | ||
967 | #endif | ||
971 | return 0; | 968 | return 0; |
972 | } | 969 | } |
973 | early_param("update_mptable", update_mptable_setup); | 970 | early_param("update_mptable", update_mptable_setup); |
@@ -980,6 +977,9 @@ static int __initdata alloc_mptable; | |||
980 | static int __init parse_alloc_mptable_opt(char *p) | 977 | static int __init parse_alloc_mptable_opt(char *p) |
981 | { | 978 | { |
982 | enable_update_mptable = 1; | 979 | enable_update_mptable = 1; |
980 | #ifdef CONFIG_PCI | ||
981 | pci_routeirq = 1; | ||
982 | #endif | ||
983 | alloc_mptable = 1; | 983 | alloc_mptable = 1; |
984 | if (!p) | 984 | if (!p) |
985 | return 0; | 985 | return 0; |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b4158439bf63..523bb697120d 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -997,24 +997,6 @@ void __init setup_arch(char **cmdline_p) | |||
997 | #ifdef CONFIG_X86_32 | 997 | #ifdef CONFIG_X86_32 |
998 | 998 | ||
999 | /** | 999 | /** |
1000 | * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors | ||
1001 | * | ||
1002 | * Description: | ||
1003 | * Perform any necessary interrupt initialisation prior to setting up | ||
1004 | * the "ordinary" interrupt call gates. For legacy reasons, the ISA | ||
1005 | * interrupts should be initialised here if the machine emulates a PC | ||
1006 | * in any way. | ||
1007 | **/ | ||
1008 | void __init x86_quirk_pre_intr_init(void) | ||
1009 | { | ||
1010 | if (x86_quirks->arch_pre_intr_init) { | ||
1011 | if (x86_quirks->arch_pre_intr_init()) | ||
1012 | return; | ||
1013 | } | ||
1014 | init_ISA_irqs(); | ||
1015 | } | ||
1016 | |||
1017 | /** | ||
1018 | * x86_quirk_intr_init - post gate setup interrupt initialisation | 1000 | * x86_quirk_intr_init - post gate setup interrupt initialisation |
1019 | * | 1001 | * |
1020 | * Description: | 1002 | * Description: |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 13f33ea8ccaa..f6db48c405b8 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -193,19 +193,19 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) | |||
193 | } | 193 | } |
194 | 194 | ||
195 | struct smp_ops smp_ops = { | 195 | struct smp_ops smp_ops = { |
196 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | 196 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, |
197 | .smp_prepare_cpus = native_smp_prepare_cpus, | 197 | .smp_prepare_cpus = native_smp_prepare_cpus, |
198 | .smp_cpus_done = native_smp_cpus_done, | 198 | .smp_cpus_done = native_smp_cpus_done, |
199 | 199 | ||
200 | .smp_send_stop = native_smp_send_stop, | 200 | .smp_send_stop = native_smp_send_stop, |
201 | .smp_send_reschedule = native_smp_send_reschedule, | 201 | .smp_send_reschedule = native_smp_send_reschedule, |
202 | 202 | ||
203 | .cpu_up = native_cpu_up, | 203 | .cpu_up = native_cpu_up, |
204 | .cpu_die = native_cpu_die, | 204 | .cpu_die = native_cpu_die, |
205 | .cpu_disable = native_cpu_disable, | 205 | .cpu_disable = native_cpu_disable, |
206 | .play_dead = native_play_dead, | 206 | .play_dead = native_play_dead, |
207 | 207 | ||
208 | .send_call_func_ipi = native_send_call_func_ipi, | 208 | .send_call_func_ipi = native_send_call_func_ipi, |
209 | .send_call_func_single_ipi = native_send_call_func_single_ipi, | 209 | .send_call_func_single_ipi = native_send_call_func_single_ipi, |
210 | }; | 210 | }; |
211 | EXPORT_SYMBOL_GPL(smp_ops); | 211 | EXPORT_SYMBOL_GPL(smp_ops); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 58d24ef917d8..7c80007ea5f7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -504,7 +504,7 @@ void __inquire_remote_apic(int apicid) | |||
504 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this | 504 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this |
505 | * won't ... remember to clear down the APIC, etc later. | 505 | * won't ... remember to clear down the APIC, etc later. |
506 | */ | 506 | */ |
507 | int __devinit | 507 | int __cpuinit |
508 | wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) | 508 | wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) |
509 | { | 509 | { |
510 | unsigned long send_status, accept_status = 0; | 510 | unsigned long send_status, accept_status = 0; |
@@ -538,7 +538,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) | |||
538 | return (send_status | accept_status); | 538 | return (send_status | accept_status); |
539 | } | 539 | } |
540 | 540 | ||
541 | int __devinit | 541 | static int __cpuinit |
542 | wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) | 542 | wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) |
543 | { | 543 | { |
544 | unsigned long send_status, accept_status = 0; | 544 | unsigned long send_status, accept_status = 0; |
@@ -822,10 +822,12 @@ do_rest: | |||
822 | /* mark "stuck" area as not stuck */ | 822 | /* mark "stuck" area as not stuck */ |
823 | *((volatile unsigned long *)trampoline_base) = 0; | 823 | *((volatile unsigned long *)trampoline_base) = 0; |
824 | 824 | ||
825 | /* | 825 | if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { |
826 | * Cleanup possible dangling ends... | 826 | /* |
827 | */ | 827 | * Cleanup possible dangling ends... |
828 | smpboot_restore_warm_reset_vector(); | 828 | */ |
829 | smpboot_restore_warm_reset_vector(); | ||
830 | } | ||
829 | 831 | ||
830 | return boot_error; | 832 | return boot_error; |
831 | } | 833 | } |
@@ -990,10 +992,12 @@ static int __init smp_sanity_check(unsigned max_cpus) | |||
990 | */ | 992 | */ |
991 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && | 993 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && |
992 | !cpu_has_apic) { | 994 | !cpu_has_apic) { |
993 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | 995 | if (!disable_apic) { |
994 | boot_cpu_physical_apicid); | 996 | pr_err("BIOS bug, local APIC #%d not detected!...\n", |
995 | printk(KERN_ERR "... forcing use of dummy APIC emulation." | 997 | boot_cpu_physical_apicid); |
998 | pr_err("... forcing use of dummy APIC emulation." | ||
996 | "(tell your hw vendor)\n"); | 999 | "(tell your hw vendor)\n"); |
1000 | } | ||
997 | smpboot_clear_io_apic(); | 1001 | smpboot_clear_io_apic(); |
998 | arch_disable_smp_support(); | 1002 | arch_disable_smp_support(); |
999 | return -1; | 1003 | return -1; |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index a1d288327ff0..2310700faca5 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -969,11 +969,8 @@ void __init trap_init(void) | |||
969 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) | 969 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) |
970 | set_bit(i, used_vectors); | 970 | set_bit(i, used_vectors); |
971 | 971 | ||
972 | #ifdef CONFIG_X86_64 | ||
973 | set_bit(IA32_SYSCALL_VECTOR, used_vectors); | 972 | set_bit(IA32_SYSCALL_VECTOR, used_vectors); |
974 | #else | 973 | |
975 | set_bit(SYSCALL_VECTOR, used_vectors); | ||
976 | #endif | ||
977 | /* | 974 | /* |
978 | * Should be a barrier for any external CPU state: | 975 | * Should be a barrier for any external CPU state: |
979 | */ | 976 | */ |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 33a93b417396..ef4205c1a7a5 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -637,7 +637,7 @@ static void __init lguest_init_IRQ(void) | |||
637 | 637 | ||
638 | void lguest_setup_irq(unsigned int irq) | 638 | void lguest_setup_irq(unsigned int irq) |
639 | { | 639 | { |
640 | irq_to_desc_alloc_cpu(irq, 0); | 640 | irq_to_desc_alloc_node(irq, 0); |
641 | set_irq_chip_and_handler_name(irq, &lguest_irq_controller, | 641 | set_irq_chip_and_handler_name(irq, &lguest_irq_controller, |
642 | handle_level_irq, "level"); | 642 | handle_level_irq, "level"); |
643 | } | 643 | } |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index fecbce6e7d7c..0696d506c4ad 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -889,6 +889,9 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) | |||
889 | return 0; | 889 | return 0; |
890 | } | 890 | } |
891 | 891 | ||
892 | if (io_apic_assign_pci_irqs) | ||
893 | return 0; | ||
894 | |||
892 | /* Find IRQ routing entry */ | 895 | /* Find IRQ routing entry */ |
893 | 896 | ||
894 | if (!pirq_table) | 897 | if (!pirq_table) |
@@ -1039,56 +1042,15 @@ static void __init pcibios_fixup_irqs(void) | |||
1039 | pirq_penalty[dev->irq]++; | 1042 | pirq_penalty[dev->irq]++; |
1040 | } | 1043 | } |
1041 | 1044 | ||
1045 | if (io_apic_assign_pci_irqs) | ||
1046 | return; | ||
1047 | |||
1042 | dev = NULL; | 1048 | dev = NULL; |
1043 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 1049 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { |
1044 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); | 1050 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); |
1045 | if (!pin) | 1051 | if (!pin) |
1046 | continue; | 1052 | continue; |
1047 | 1053 | ||
1048 | #ifdef CONFIG_X86_IO_APIC | ||
1049 | /* | ||
1050 | * Recalculate IRQ numbers if we use the I/O APIC. | ||
1051 | */ | ||
1052 | if (io_apic_assign_pci_irqs) { | ||
1053 | int irq; | ||
1054 | |||
1055 | /* | ||
1056 | * interrupt pins are numbered starting from 1 | ||
1057 | */ | ||
1058 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, | ||
1059 | PCI_SLOT(dev->devfn), pin - 1); | ||
1060 | /* | ||
1061 | * Busses behind bridges are typically not listed in the | ||
1062 | * MP-table. In this case we have to look up the IRQ | ||
1063 | * based on the parent bus, parent slot, and pin number. | ||
1064 | * The SMP code detects such bridged busses itself so we | ||
1065 | * should get into this branch reliably. | ||
1066 | */ | ||
1067 | if (irq < 0 && dev->bus->parent) { | ||
1068 | /* go back to the bridge */ | ||
1069 | struct pci_dev *bridge = dev->bus->self; | ||
1070 | int bus; | ||
1071 | |||
1072 | pin = pci_swizzle_interrupt_pin(dev, pin); | ||
1073 | bus = bridge->bus->number; | ||
1074 | irq = IO_APIC_get_PCI_irq_vector(bus, | ||
1075 | PCI_SLOT(bridge->devfn), pin - 1); | ||
1076 | if (irq >= 0) | ||
1077 | dev_warn(&dev->dev, | ||
1078 | "using bridge %s INT %c to " | ||
1079 | "get IRQ %d\n", | ||
1080 | pci_name(bridge), | ||
1081 | 'A' + pin - 1, irq); | ||
1082 | } | ||
1083 | if (irq >= 0) { | ||
1084 | dev_info(&dev->dev, | ||
1085 | "PCI->APIC IRQ transform: INT %c " | ||
1086 | "-> IRQ %d\n", | ||
1087 | 'A' + pin - 1, irq); | ||
1088 | dev->irq = irq; | ||
1089 | } | ||
1090 | } | ||
1091 | #endif | ||
1092 | /* | 1054 | /* |
1093 | * Still no IRQ? Try to lookup one... | 1055 | * Still no IRQ? Try to lookup one... |
1094 | */ | 1056 | */ |
@@ -1183,6 +1145,19 @@ int __init pcibios_irq_init(void) | |||
1183 | pcibios_enable_irq = pirq_enable_irq; | 1145 | pcibios_enable_irq = pirq_enable_irq; |
1184 | 1146 | ||
1185 | pcibios_fixup_irqs(); | 1147 | pcibios_fixup_irqs(); |
1148 | |||
1149 | if (io_apic_assign_pci_irqs && pci_routeirq) { | ||
1150 | struct pci_dev *dev = NULL; | ||
1151 | /* | ||
1152 | * PCI IRQ routing is set up by pci_enable_device(), but we | ||
1153 | * also do it here in case there are still broken drivers that | ||
1154 | * don't use pci_enable_device(). | ||
1155 | */ | ||
1156 | printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n"); | ||
1157 | for_each_pci_dev(dev) | ||
1158 | pirq_enable_irq(dev); | ||
1159 | } | ||
1160 | |||
1186 | return 0; | 1161 | return 0; |
1187 | } | 1162 | } |
1188 | 1163 | ||
@@ -1213,16 +1188,23 @@ void pcibios_penalize_isa_irq(int irq, int active) | |||
1213 | static int pirq_enable_irq(struct pci_dev *dev) | 1188 | static int pirq_enable_irq(struct pci_dev *dev) |
1214 | { | 1189 | { |
1215 | u8 pin; | 1190 | u8 pin; |
1216 | struct pci_dev *temp_dev; | ||
1217 | 1191 | ||
1218 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); | 1192 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); |
1219 | if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) { | 1193 | if (pin && !pcibios_lookup_irq(dev, 1)) { |
1220 | char *msg = ""; | 1194 | char *msg = ""; |
1221 | 1195 | ||
1196 | if (!io_apic_assign_pci_irqs && dev->irq) | ||
1197 | return 0; | ||
1198 | |||
1222 | if (io_apic_assign_pci_irqs) { | 1199 | if (io_apic_assign_pci_irqs) { |
1200 | #ifdef CONFIG_X86_IO_APIC | ||
1201 | struct pci_dev *temp_dev; | ||
1223 | int irq; | 1202 | int irq; |
1203 | struct io_apic_irq_attr irq_attr; | ||
1224 | 1204 | ||
1225 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin - 1); | 1205 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, |
1206 | PCI_SLOT(dev->devfn), | ||
1207 | pin - 1, &irq_attr); | ||
1226 | /* | 1208 | /* |
1227 | * Busses behind bridges are typically not listed in the MP-table. | 1209 | * Busses behind bridges are typically not listed in the MP-table. |
1228 | * In this case we have to look up the IRQ based on the parent bus, | 1210 | * In this case we have to look up the IRQ based on the parent bus, |
@@ -1235,7 +1217,8 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
1235 | 1217 | ||
1236 | pin = pci_swizzle_interrupt_pin(dev, pin); | 1218 | pin = pci_swizzle_interrupt_pin(dev, pin); |
1237 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, | 1219 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, |
1238 | PCI_SLOT(bridge->devfn), pin - 1); | 1220 | PCI_SLOT(bridge->devfn), |
1221 | pin - 1, &irq_attr); | ||
1239 | if (irq >= 0) | 1222 | if (irq >= 0) |
1240 | dev_warn(&dev->dev, "using bridge %s " | 1223 | dev_warn(&dev->dev, "using bridge %s " |
1241 | "INT %c to get IRQ %d\n", | 1224 | "INT %c to get IRQ %d\n", |
@@ -1245,12 +1228,15 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
1245 | } | 1228 | } |
1246 | dev = temp_dev; | 1229 | dev = temp_dev; |
1247 | if (irq >= 0) { | 1230 | if (irq >= 0) { |
1231 | io_apic_set_pci_routing(&dev->dev, irq, | ||
1232 | &irq_attr); | ||
1233 | dev->irq = irq; | ||
1248 | dev_info(&dev->dev, "PCI->APIC IRQ transform: " | 1234 | dev_info(&dev->dev, "PCI->APIC IRQ transform: " |
1249 | "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); | 1235 | "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); |
1250 | dev->irq = irq; | ||
1251 | return 0; | 1236 | return 0; |
1252 | } else | 1237 | } else |
1253 | msg = "; probably buggy MP table"; | 1238 | msg = "; probably buggy MP table"; |
1239 | #endif | ||
1254 | } else if (pci_probe & PCI_BIOS_IRQ_SCAN) | 1240 | } else if (pci_probe & PCI_BIOS_IRQ_SCAN) |
1255 | msg = ""; | 1241 | msg = ""; |
1256 | else | 1242 | else |