aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-22 15:19:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-22 15:19:36 -0400
commitbd3e57f9132ac55e2848aa10cf50341de2508e1d (patch)
tree3633129fe02e804852a18c8e9b3045f35db6db0b
parent3fad0953a12f92289f1e35f091c4fa09d8e1884e (diff)
parent36d93d88a5396baa135f8bcde7b8501dfe3b8e53 (diff)
Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 platform changes from Ingo Molnar: "This tree mostly involves various APIC driver cleanups/robustization, and vSMP motivated platform callback improvements/cleanups" Fix up trivial conflict due to printk cleanup right next to return value change. * 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (29 commits) Revert "x86/early_printk: Replace obsolete simple_strtoul() usage with kstrtoint()" x86/apic/x2apic: Use multiple cluster members for the irq destination only with the explicit affinity x86/apic/x2apic: Limit the vector reservation to the user specified mask x86/apic: Optimize cpu traversal in __assign_irq_vector() using domain membership x86/vsmp: Fix vector_allocation_domain's return value irq/apic: Use config_enabled(CONFIG_SMP) checks to clean up irq_set_affinity() for UP x86/vsmp: Fix linker error when CONFIG_PROC_FS is not set x86/apic/es7000: Make apicid of a cluster (not CPU) from a cpumask x86/apic/es7000+summit: Always make valid apicid from a cpumask x86/apic/es7000+summit: Fix compile warning in cpu_mask_to_apicid() x86/apic: Fix ugly casting and branching in cpu_mask_to_apicid_and() x86/apic: Eliminate cpu_mask_to_apicid() operation x86/x2apic/cluster: Vector_allocation_domain() should return a value x86/apic/irq_remap: Silence a bogus pr_err() x86/vsmp: Ignore IOAPIC IRQ affinity if possible x86/apic: Make cpu_mask_to_apicid() operations check cpu_online_mask x86/apic: Make cpu_mask_to_apicid() operations return error code x86/apic: Avoid useless scanning thru a cpumask in assign_irq_vector() x86/apic: Try to spread IRQ vectors to different priority levels x86/apic: Factor out default vector_allocation_domain() operation ...
-rw-r--r--arch/x86/include/asm/apic.h61
-rw-r--r--arch/x86/include/asm/x2apic.h18
-rw-r--r--arch/x86/include/asm/x86_init.h4
-rw-r--r--arch/x86/kernel/apic/apic.c19
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c76
-rw-r--r--arch/x86/kernel/apic/apic_noop.c9
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c50
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c48
-rw-r--r--arch/x86/kernel/apic/es7000_32.c51
-rw-r--r--arch/x86/kernel/apic/io_apic.c312
-rw-r--r--arch/x86/kernel/apic/numaq_32.c30
-rw-r--r--arch/x86/kernel/apic/probe_32.c23
-rw-r--r--arch/x86/kernel/apic/probe_64.c11
-rw-r--r--arch/x86/kernel/apic/summit_32.c46
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c82
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c39
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c45
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/vsmp_64.c44
-rw-r--r--arch/x86/kernel/x86_init.c2
-rw-r--r--arch/x86/platform/uv/uv_irq.c9
-rw-r--r--drivers/iommu/intel_irq_remapping.c20
-rw-r--r--drivers/iommu/irq_remapping.c5
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--include/linux/irq.h2
25 files changed, 429 insertions, 581 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index eaff4790ed96..88093c1d44fd 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -306,7 +306,8 @@ struct apic {
306 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); 306 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
307 unsigned long (*check_apicid_present)(int apicid); 307 unsigned long (*check_apicid_present)(int apicid);
308 308
309 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); 309 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask,
310 const struct cpumask *mask);
310 void (*init_apic_ldr)(void); 311 void (*init_apic_ldr)(void);
311 312
312 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); 313 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
@@ -331,9 +332,9 @@ struct apic {
331 unsigned long (*set_apic_id)(unsigned int id); 332 unsigned long (*set_apic_id)(unsigned int id);
332 unsigned long apic_id_mask; 333 unsigned long apic_id_mask;
333 334
334 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); 335 int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
335 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, 336 const struct cpumask *andmask,
336 const struct cpumask *andmask); 337 unsigned int *apicid);
337 338
338 /* ipi */ 339 /* ipi */
339 void (*send_IPI_mask)(const struct cpumask *mask, int vector); 340 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
@@ -537,6 +538,11 @@ static inline const struct cpumask *default_target_cpus(void)
537#endif 538#endif
538} 539}
539 540
541static inline const struct cpumask *online_target_cpus(void)
542{
543 return cpu_online_mask;
544}
545
540DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 546DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
541 547
542 548
@@ -586,21 +592,50 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
586 592
587#endif 593#endif
588 594
589static inline unsigned int 595static inline int
590default_cpu_mask_to_apicid(const struct cpumask *cpumask) 596flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
597 const struct cpumask *andmask,
598 unsigned int *apicid)
591{ 599{
592 return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; 600 unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
601 cpumask_bits(andmask)[0] &
602 cpumask_bits(cpu_online_mask)[0] &
603 APIC_ALL_CPUS;
604
605 if (likely(cpu_mask)) {
606 *apicid = (unsigned int)cpu_mask;
607 return 0;
608 } else {
609 return -EINVAL;
610 }
593} 611}
594 612
595static inline unsigned int 613extern int
596default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 614default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
597 const struct cpumask *andmask) 615 const struct cpumask *andmask,
616 unsigned int *apicid);
617
618static inline void
619flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
620 const struct cpumask *mask)
598{ 621{
599 unsigned long mask1 = cpumask_bits(cpumask)[0]; 622 /* Careful. Some cpus do not strictly honor the set of cpus
600 unsigned long mask2 = cpumask_bits(andmask)[0]; 623 * specified in the interrupt destination when using lowest
601 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; 624 * priority interrupt delivery mode.
625 *
626 * In particular there was a hyperthreading cpu observed to
627 * deliver interrupts to the wrong hyperthread when only one
628 * hyperthread was specified in the interrupt desitination.
629 */
630 cpumask_clear(retmask);
631 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
632}
602 633
603 return (unsigned int)(mask1 & mask2 & mask3); 634static inline void
635default_vector_allocation_domain(int cpu, struct cpumask *retmask,
636 const struct cpumask *mask)
637{
638 cpumask_copy(retmask, cpumask_of(cpu));
604} 639}
605 640
606static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) 641static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h
index 92e54abf89e0..f90f0a587c66 100644
--- a/arch/x86/include/asm/x2apic.h
+++ b/arch/x86/include/asm/x2apic.h
@@ -9,15 +9,6 @@
9#include <asm/ipi.h> 9#include <asm/ipi.h>
10#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11 11
12/*
13 * Need to use more than cpu 0, because we need more vectors
14 * when MSI-X are used.
15 */
16static const struct cpumask *x2apic_target_cpus(void)
17{
18 return cpu_online_mask;
19}
20
21static int x2apic_apic_id_valid(int apicid) 12static int x2apic_apic_id_valid(int apicid)
22{ 13{
23 return 1; 14 return 1;
@@ -28,15 +19,6 @@ static int x2apic_apic_id_registered(void)
28 return 1; 19 return 1;
29} 20}
30 21
31/*
32 * For now each logical cpu is in its own vector allocation domain.
33 */
34static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
35{
36 cpumask_clear(retmask);
37 cpumask_set_cpu(cpu, retmask);
38}
39
40static void 22static void
41__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) 23__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
42{ 24{
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index c090af10ac7d..38155f667144 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -156,7 +156,6 @@ struct x86_cpuinit_ops {
156/** 156/**
157 * struct x86_platform_ops - platform specific runtime functions 157 * struct x86_platform_ops - platform specific runtime functions
158 * @calibrate_tsc: calibrate TSC 158 * @calibrate_tsc: calibrate TSC
159 * @wallclock_init: init the wallclock device
160 * @get_wallclock: get time from HW clock like RTC etc. 159 * @get_wallclock: get time from HW clock like RTC etc.
161 * @set_wallclock: set time back to HW clock 160 * @set_wallclock: set time back to HW clock
162 * @is_untracked_pat_range exclude from PAT logic 161 * @is_untracked_pat_range exclude from PAT logic
@@ -164,10 +163,10 @@ struct x86_cpuinit_ops {
164 * @i8042_detect pre-detect if i8042 controller exists 163 * @i8042_detect pre-detect if i8042 controller exists
165 * @save_sched_clock_state: save state for sched_clock() on suspend 164 * @save_sched_clock_state: save state for sched_clock() on suspend
166 * @restore_sched_clock_state: restore state for sched_clock() on resume 165 * @restore_sched_clock_state: restore state for sched_clock() on resume
166 * @apic_post_init: adjust apic if neeeded
167 */ 167 */
168struct x86_platform_ops { 168struct x86_platform_ops {
169 unsigned long (*calibrate_tsc)(void); 169 unsigned long (*calibrate_tsc)(void);
170 void (*wallclock_init)(void);
171 unsigned long (*get_wallclock)(void); 170 unsigned long (*get_wallclock)(void);
172 int (*set_wallclock)(unsigned long nowtime); 171 int (*set_wallclock)(unsigned long nowtime);
173 void (*iommu_shutdown)(void); 172 void (*iommu_shutdown)(void);
@@ -177,6 +176,7 @@ struct x86_platform_ops {
177 int (*i8042_detect)(void); 176 int (*i8042_detect)(void);
178 void (*save_sched_clock_state)(void); 177 void (*save_sched_clock_state)(void);
179 void (*restore_sched_clock_state)(void); 178 void (*restore_sched_clock_state)(void);
179 void (*apic_post_init)(void);
180}; 180};
181 181
182struct pci_dev; 182struct pci_dev;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 39a222e094af..c421512ca5eb 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2123,6 +2123,25 @@ void default_init_apic_ldr(void)
2123 apic_write(APIC_LDR, val); 2123 apic_write(APIC_LDR, val);
2124} 2124}
2125 2125
2126int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
2127 const struct cpumask *andmask,
2128 unsigned int *apicid)
2129{
2130 unsigned int cpu;
2131
2132 for_each_cpu_and(cpu, cpumask, andmask) {
2133 if (cpumask_test_cpu(cpu, cpu_online_mask))
2134 break;
2135 }
2136
2137 if (likely(cpu < nr_cpu_ids)) {
2138 *apicid = per_cpu(x86_cpu_to_apicid, cpu);
2139 return 0;
2140 }
2141
2142 return -EINVAL;
2143}
2144
2126/* 2145/*
2127 * Power management 2146 * Power management
2128 */ 2147 */
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 0e881c46e8c8..00c77cf78e9e 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -36,25 +36,6 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
36 return 1; 36 return 1;
37} 37}
38 38
39static const struct cpumask *flat_target_cpus(void)
40{
41 return cpu_online_mask;
42}
43
44static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
45{
46 /* Careful. Some cpus do not strictly honor the set of cpus
47 * specified in the interrupt destination when using lowest
48 * priority interrupt delivery mode.
49 *
50 * In particular there was a hyperthreading cpu observed to
51 * deliver interrupts to the wrong hyperthread when only one
52 * hyperthread was specified in the interrupt desitination.
53 */
54 cpumask_clear(retmask);
55 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
56}
57
58/* 39/*
59 * Set up the logical destination ID. 40 * Set up the logical destination ID.
60 * 41 *
@@ -92,7 +73,7 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
92} 73}
93 74
94static void 75static void
95 flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) 76flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
96{ 77{
97 unsigned long mask = cpumask_bits(cpumask)[0]; 78 unsigned long mask = cpumask_bits(cpumask)[0];
98 int cpu = smp_processor_id(); 79 int cpu = smp_processor_id();
@@ -186,7 +167,7 @@ static struct apic apic_flat = {
186 .irq_delivery_mode = dest_LowestPrio, 167 .irq_delivery_mode = dest_LowestPrio,
187 .irq_dest_mode = 1, /* logical */ 168 .irq_dest_mode = 1, /* logical */
188 169
189 .target_cpus = flat_target_cpus, 170 .target_cpus = online_target_cpus,
190 .disable_esr = 0, 171 .disable_esr = 0,
191 .dest_logical = APIC_DEST_LOGICAL, 172 .dest_logical = APIC_DEST_LOGICAL,
192 .check_apicid_used = NULL, 173 .check_apicid_used = NULL,
@@ -210,8 +191,7 @@ static struct apic apic_flat = {
210 .set_apic_id = set_apic_id, 191 .set_apic_id = set_apic_id,
211 .apic_id_mask = 0xFFu << 24, 192 .apic_id_mask = 0xFFu << 24,
212 193
213 .cpu_mask_to_apicid = default_cpu_mask_to_apicid, 194 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
214 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
215 195
216 .send_IPI_mask = flat_send_IPI_mask, 196 .send_IPI_mask = flat_send_IPI_mask,
217 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, 197 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
@@ -262,17 +242,6 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
262 return 0; 242 return 0;
263} 243}
264 244
265static const struct cpumask *physflat_target_cpus(void)
266{
267 return cpu_online_mask;
268}
269
270static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
271{
272 cpumask_clear(retmask);
273 cpumask_set_cpu(cpu, retmask);
274}
275
276static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) 245static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
277{ 246{
278 default_send_IPI_mask_sequence_phys(cpumask, vector); 247 default_send_IPI_mask_sequence_phys(cpumask, vector);
@@ -294,38 +263,6 @@ static void physflat_send_IPI_all(int vector)
294 physflat_send_IPI_mask(cpu_online_mask, vector); 263 physflat_send_IPI_mask(cpu_online_mask, vector);
295} 264}
296 265
297static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
298{
299 int cpu;
300
301 /*
302 * We're using fixed IRQ delivery, can only return one phys APIC ID.
303 * May as well be the first.
304 */
305 cpu = cpumask_first(cpumask);
306 if ((unsigned)cpu < nr_cpu_ids)
307 return per_cpu(x86_cpu_to_apicid, cpu);
308 else
309 return BAD_APICID;
310}
311
312static unsigned int
313physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
314 const struct cpumask *andmask)
315{
316 int cpu;
317
318 /*
319 * We're using fixed IRQ delivery, can only return one phys APIC ID.
320 * May as well be the first.
321 */
322 for_each_cpu_and(cpu, cpumask, andmask) {
323 if (cpumask_test_cpu(cpu, cpu_online_mask))
324 break;
325 }
326 return per_cpu(x86_cpu_to_apicid, cpu);
327}
328
329static int physflat_probe(void) 266static int physflat_probe(void)
330{ 267{
331 if (apic == &apic_physflat || num_possible_cpus() > 8) 268 if (apic == &apic_physflat || num_possible_cpus() > 8)
@@ -345,13 +282,13 @@ static struct apic apic_physflat = {
345 .irq_delivery_mode = dest_Fixed, 282 .irq_delivery_mode = dest_Fixed,
346 .irq_dest_mode = 0, /* physical */ 283 .irq_dest_mode = 0, /* physical */
347 284
348 .target_cpus = physflat_target_cpus, 285 .target_cpus = online_target_cpus,
349 .disable_esr = 0, 286 .disable_esr = 0,
350 .dest_logical = 0, 287 .dest_logical = 0,
351 .check_apicid_used = NULL, 288 .check_apicid_used = NULL,
352 .check_apicid_present = NULL, 289 .check_apicid_present = NULL,
353 290
354 .vector_allocation_domain = physflat_vector_allocation_domain, 291 .vector_allocation_domain = default_vector_allocation_domain,
355 /* not needed, but shouldn't hurt: */ 292 /* not needed, but shouldn't hurt: */
356 .init_apic_ldr = flat_init_apic_ldr, 293 .init_apic_ldr = flat_init_apic_ldr,
357 294
@@ -370,8 +307,7 @@ static struct apic apic_physflat = {
370 .set_apic_id = set_apic_id, 307 .set_apic_id = set_apic_id,
371 .apic_id_mask = 0xFFu << 24, 308 .apic_id_mask = 0xFFu << 24,
372 309
373 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, 310 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
374 .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
375 311
376 .send_IPI_mask = physflat_send_IPI_mask, 312 .send_IPI_mask = physflat_send_IPI_mask,
377 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, 313 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index a6e4c6e06c08..e145f28b4099 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -100,12 +100,12 @@ static unsigned long noop_check_apicid_present(int bit)
100 return physid_isset(bit, phys_cpu_present_map); 100 return physid_isset(bit, phys_cpu_present_map);
101} 101}
102 102
103static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) 103static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask,
104 const struct cpumask *mask)
104{ 105{
105 if (cpu != 0) 106 if (cpu != 0)
106 pr_warning("APIC: Vector allocated for non-BSP cpu\n"); 107 pr_warning("APIC: Vector allocated for non-BSP cpu\n");
107 cpumask_clear(retmask); 108 cpumask_copy(retmask, cpumask_of(cpu));
108 cpumask_set_cpu(cpu, retmask);
109} 109}
110 110
111static u32 noop_apic_read(u32 reg) 111static u32 noop_apic_read(u32 reg)
@@ -159,8 +159,7 @@ struct apic apic_noop = {
159 .set_apic_id = NULL, 159 .set_apic_id = NULL,
160 .apic_id_mask = 0x0F << 24, 160 .apic_id_mask = 0x0F << 24,
161 161
162 .cpu_mask_to_apicid = default_cpu_mask_to_apicid, 162 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
163 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
164 163
165 .send_IPI_mask = noop_send_IPI_mask, 164 .send_IPI_mask = noop_send_IPI_mask,
166 .send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself, 165 .send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself,
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 6ec6d5d297c3..bc552cff2578 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -72,17 +72,6 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
72 return initial_apic_id >> index_msb; 72 return initial_apic_id >> index_msb;
73} 73}
74 74
75static const struct cpumask *numachip_target_cpus(void)
76{
77 return cpu_online_mask;
78}
79
80static void numachip_vector_allocation_domain(int cpu, struct cpumask *retmask)
81{
82 cpumask_clear(retmask);
83 cpumask_set_cpu(cpu, retmask);
84}
85
86static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) 75static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
87{ 76{
88 union numachip_csr_g3_ext_irq_gen int_gen; 77 union numachip_csr_g3_ext_irq_gen int_gen;
@@ -157,38 +146,6 @@ static void numachip_send_IPI_self(int vector)
157 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); 146 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
158} 147}
159 148
160static unsigned int numachip_cpu_mask_to_apicid(const struct cpumask *cpumask)
161{
162 int cpu;
163
164 /*
165 * We're using fixed IRQ delivery, can only return one phys APIC ID.
166 * May as well be the first.
167 */
168 cpu = cpumask_first(cpumask);
169 if (likely((unsigned)cpu < nr_cpu_ids))
170 return per_cpu(x86_cpu_to_apicid, cpu);
171
172 return BAD_APICID;
173}
174
175static unsigned int
176numachip_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
177 const struct cpumask *andmask)
178{
179 int cpu;
180
181 /*
182 * We're using fixed IRQ delivery, can only return one phys APIC ID.
183 * May as well be the first.
184 */
185 for_each_cpu_and(cpu, cpumask, andmask) {
186 if (cpumask_test_cpu(cpu, cpu_online_mask))
187 break;
188 }
189 return per_cpu(x86_cpu_to_apicid, cpu);
190}
191
192static int __init numachip_probe(void) 149static int __init numachip_probe(void)
193{ 150{
194 return apic == &apic_numachip; 151 return apic == &apic_numachip;
@@ -253,13 +210,13 @@ static struct apic apic_numachip __refconst = {
253 .irq_delivery_mode = dest_Fixed, 210 .irq_delivery_mode = dest_Fixed,
254 .irq_dest_mode = 0, /* physical */ 211 .irq_dest_mode = 0, /* physical */
255 212
256 .target_cpus = numachip_target_cpus, 213 .target_cpus = online_target_cpus,
257 .disable_esr = 0, 214 .disable_esr = 0,
258 .dest_logical = 0, 215 .dest_logical = 0,
259 .check_apicid_used = NULL, 216 .check_apicid_used = NULL,
260 .check_apicid_present = NULL, 217 .check_apicid_present = NULL,
261 218
262 .vector_allocation_domain = numachip_vector_allocation_domain, 219 .vector_allocation_domain = default_vector_allocation_domain,
263 .init_apic_ldr = flat_init_apic_ldr, 220 .init_apic_ldr = flat_init_apic_ldr,
264 221
265 .ioapic_phys_id_map = NULL, 222 .ioapic_phys_id_map = NULL,
@@ -277,8 +234,7 @@ static struct apic apic_numachip __refconst = {
277 .set_apic_id = set_apic_id, 234 .set_apic_id = set_apic_id,
278 .apic_id_mask = 0xffU << 24, 235 .apic_id_mask = 0xffU << 24,
279 236
280 .cpu_mask_to_apicid = numachip_cpu_mask_to_apicid, 237 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
281 .cpu_mask_to_apicid_and = numachip_cpu_mask_to_apicid_and,
282 238
283 .send_IPI_mask = numachip_send_IPI_mask, 239 .send_IPI_mask = numachip_send_IPI_mask,
284 .send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself, 240 .send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself,
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 31fbdbfbf960..d50e3640d5ae 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -26,15 +26,6 @@ static int bigsmp_apic_id_registered(void)
26 return 1; 26 return 1;
27} 27}
28 28
29static const struct cpumask *bigsmp_target_cpus(void)
30{
31#ifdef CONFIG_SMP
32 return cpu_online_mask;
33#else
34 return cpumask_of(0);
35#endif
36}
37
38static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid) 29static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
39{ 30{
40 return 0; 31 return 0;
@@ -105,32 +96,6 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid)
105 return 1; 96 return 1;
106} 97}
107 98
108/* As we are using single CPU as destination, pick only one CPU here */
109static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
110{
111 int cpu = cpumask_first(cpumask);
112
113 if (cpu < nr_cpu_ids)
114 return cpu_physical_id(cpu);
115 return BAD_APICID;
116}
117
118static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
119 const struct cpumask *andmask)
120{
121 int cpu;
122
123 /*
124 * We're using fixed IRQ delivery, can only return one phys APIC ID.
125 * May as well be the first.
126 */
127 for_each_cpu_and(cpu, cpumask, andmask) {
128 if (cpumask_test_cpu(cpu, cpu_online_mask))
129 return cpu_physical_id(cpu);
130 }
131 return BAD_APICID;
132}
133
134static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) 99static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
135{ 100{
136 return cpuid_apic >> index_msb; 101 return cpuid_apic >> index_msb;
@@ -177,12 +142,6 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
177 { } /* NULL entry stops DMI scanning */ 142 { } /* NULL entry stops DMI scanning */
178}; 143};
179 144
180static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
181{
182 cpumask_clear(retmask);
183 cpumask_set_cpu(cpu, retmask);
184}
185
186static int probe_bigsmp(void) 145static int probe_bigsmp(void)
187{ 146{
188 if (def_to_bigsmp) 147 if (def_to_bigsmp)
@@ -205,13 +164,13 @@ static struct apic apic_bigsmp = {
205 /* phys delivery to target CPU: */ 164 /* phys delivery to target CPU: */
206 .irq_dest_mode = 0, 165 .irq_dest_mode = 0,
207 166
208 .target_cpus = bigsmp_target_cpus, 167 .target_cpus = default_target_cpus,
209 .disable_esr = 1, 168 .disable_esr = 1,
210 .dest_logical = 0, 169 .dest_logical = 0,
211 .check_apicid_used = bigsmp_check_apicid_used, 170 .check_apicid_used = bigsmp_check_apicid_used,
212 .check_apicid_present = bigsmp_check_apicid_present, 171 .check_apicid_present = bigsmp_check_apicid_present,
213 172
214 .vector_allocation_domain = bigsmp_vector_allocation_domain, 173 .vector_allocation_domain = default_vector_allocation_domain,
215 .init_apic_ldr = bigsmp_init_apic_ldr, 174 .init_apic_ldr = bigsmp_init_apic_ldr,
216 175
217 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, 176 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
@@ -229,8 +188,7 @@ static struct apic apic_bigsmp = {
229 .set_apic_id = NULL, 188 .set_apic_id = NULL,
230 .apic_id_mask = 0xFF << 24, 189 .apic_id_mask = 0xFF << 24,
231 190
232 .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, 191 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
233 .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
234 192
235 .send_IPI_mask = bigsmp_send_IPI_mask, 193 .send_IPI_mask = bigsmp_send_IPI_mask,
236 .send_IPI_mask_allbutself = NULL, 194 .send_IPI_mask_allbutself = NULL,
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index db4ab1be3c79..0874799a98c6 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -394,21 +394,6 @@ static void es7000_enable_apic_mode(void)
394 WARN(1, "Command failed, status = %x\n", mip_status); 394 WARN(1, "Command failed, status = %x\n", mip_status);
395} 395}
396 396
397static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask)
398{
399 /* Careful. Some cpus do not strictly honor the set of cpus
400 * specified in the interrupt destination when using lowest
401 * priority interrupt delivery mode.
402 *
403 * In particular there was a hyperthreading cpu observed to
404 * deliver interrupts to the wrong hyperthread when only one
405 * hyperthread was specified in the interrupt desitination.
406 */
407 cpumask_clear(retmask);
408 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
409}
410
411
412static void es7000_wait_for_init_deassert(atomic_t *deassert) 397static void es7000_wait_for_init_deassert(atomic_t *deassert)
413{ 398{
414 while (!atomic_read(deassert)) 399 while (!atomic_read(deassert))
@@ -540,45 +525,49 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
540 return 1; 525 return 1;
541} 526}
542 527
543static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) 528static inline int
529es7000_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
544{ 530{
545 unsigned int round = 0; 531 unsigned int round = 0;
546 int cpu, uninitialized_var(apicid); 532 unsigned int cpu, uninitialized_var(apicid);
547 533
548 /* 534 /*
549 * The cpus in the mask must all be on the apic cluster. 535 * The cpus in the mask must all be on the apic cluster.
550 */ 536 */
551 for_each_cpu(cpu, cpumask) { 537 for_each_cpu_and(cpu, cpumask, cpu_online_mask) {
552 int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); 538 int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
553 539
554 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { 540 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
555 WARN(1, "Not a valid mask!"); 541 WARN(1, "Not a valid mask!");
556 542
557 return BAD_APICID; 543 return -EINVAL;
558 } 544 }
559 apicid = new_apicid; 545 apicid |= new_apicid;
560 round++; 546 round++;
561 } 547 }
562 return apicid; 548 if (!round)
549 return -EINVAL;
550 *dest_id = apicid;
551 return 0;
563} 552}
564 553
565static unsigned int 554static int
566es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, 555es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
567 const struct cpumask *andmask) 556 const struct cpumask *andmask,
557 unsigned int *apicid)
568{ 558{
569 int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
570 cpumask_var_t cpumask; 559 cpumask_var_t cpumask;
560 *apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
571 561
572 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 562 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
573 return apicid; 563 return 0;
574 564
575 cpumask_and(cpumask, inmask, andmask); 565 cpumask_and(cpumask, inmask, andmask);
576 cpumask_and(cpumask, cpumask, cpu_online_mask); 566 es7000_cpu_mask_to_apicid(cpumask, apicid);
577 apicid = es7000_cpu_mask_to_apicid(cpumask);
578 567
579 free_cpumask_var(cpumask); 568 free_cpumask_var(cpumask);
580 569
581 return apicid; 570 return 0;
582} 571}
583 572
584static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) 573static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
@@ -638,7 +627,7 @@ static struct apic __refdata apic_es7000_cluster = {
638 .check_apicid_used = es7000_check_apicid_used, 627 .check_apicid_used = es7000_check_apicid_used,
639 .check_apicid_present = es7000_check_apicid_present, 628 .check_apicid_present = es7000_check_apicid_present,
640 629
641 .vector_allocation_domain = es7000_vector_allocation_domain, 630 .vector_allocation_domain = flat_vector_allocation_domain,
642 .init_apic_ldr = es7000_init_apic_ldr_cluster, 631 .init_apic_ldr = es7000_init_apic_ldr_cluster,
643 632
644 .ioapic_phys_id_map = es7000_ioapic_phys_id_map, 633 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
@@ -656,7 +645,6 @@ static struct apic __refdata apic_es7000_cluster = {
656 .set_apic_id = NULL, 645 .set_apic_id = NULL,
657 .apic_id_mask = 0xFF << 24, 646 .apic_id_mask = 0xFF << 24,
658 647
659 .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
660 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, 648 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
661 649
662 .send_IPI_mask = es7000_send_IPI_mask, 650 .send_IPI_mask = es7000_send_IPI_mask,
@@ -705,7 +693,7 @@ static struct apic __refdata apic_es7000 = {
705 .check_apicid_used = es7000_check_apicid_used, 693 .check_apicid_used = es7000_check_apicid_used,
706 .check_apicid_present = es7000_check_apicid_present, 694 .check_apicid_present = es7000_check_apicid_present,
707 695
708 .vector_allocation_domain = es7000_vector_allocation_domain, 696 .vector_allocation_domain = flat_vector_allocation_domain,
709 .init_apic_ldr = es7000_init_apic_ldr, 697 .init_apic_ldr = es7000_init_apic_ldr,
710 698
711 .ioapic_phys_id_map = es7000_ioapic_phys_id_map, 699 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
@@ -723,7 +711,6 @@ static struct apic __refdata apic_es7000 = {
723 .set_apic_id = NULL, 711 .set_apic_id = NULL,
724 .apic_id_mask = 0xFF << 24, 712 .apic_id_mask = 0xFF << 24,
725 713
726 .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
727 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, 714 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
728 715
729 .send_IPI_mask = es7000_send_IPI_mask, 716 .send_IPI_mask = es7000_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 8704918514dd..406eee784684 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1112,8 +1112,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1112 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1112 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1113 */ 1113 */
1114 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 1114 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1115 static int current_offset = VECTOR_OFFSET_START % 8; 1115 static int current_offset = VECTOR_OFFSET_START % 16;
1116 unsigned int old_vector;
1117 int cpu, err; 1116 int cpu, err;
1118 cpumask_var_t tmp_mask; 1117 cpumask_var_t tmp_mask;
1119 1118
@@ -1123,35 +1122,45 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1123 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 1122 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1124 return -ENOMEM; 1123 return -ENOMEM;
1125 1124
1126 old_vector = cfg->vector;
1127 if (old_vector) {
1128 cpumask_and(tmp_mask, mask, cpu_online_mask);
1129 cpumask_and(tmp_mask, cfg->domain, tmp_mask);
1130 if (!cpumask_empty(tmp_mask)) {
1131 free_cpumask_var(tmp_mask);
1132 return 0;
1133 }
1134 }
1135
1136 /* Only try and allocate irqs on cpus that are present */ 1125 /* Only try and allocate irqs on cpus that are present */
1137 err = -ENOSPC; 1126 err = -ENOSPC;
1138 for_each_cpu_and(cpu, mask, cpu_online_mask) { 1127 cpumask_clear(cfg->old_domain);
1139 int new_cpu; 1128 cpu = cpumask_first_and(mask, cpu_online_mask);
1140 int vector, offset; 1129 while (cpu < nr_cpu_ids) {
1130 int new_cpu, vector, offset;
1141 1131
1142 apic->vector_allocation_domain(cpu, tmp_mask); 1132 apic->vector_allocation_domain(cpu, tmp_mask, mask);
1133
1134 if (cpumask_subset(tmp_mask, cfg->domain)) {
1135 err = 0;
1136 if (cpumask_equal(tmp_mask, cfg->domain))
1137 break;
1138 /*
1139 * New cpumask using the vector is a proper subset of
1140 * the current in use mask. So cleanup the vector
1141 * allocation for the members that are not used anymore.
1142 */
1143 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
1144 cfg->move_in_progress = 1;
1145 cpumask_and(cfg->domain, cfg->domain, tmp_mask);
1146 break;
1147 }
1143 1148
1144 vector = current_vector; 1149 vector = current_vector;
1145 offset = current_offset; 1150 offset = current_offset;
1146next: 1151next:
1147 vector += 8; 1152 vector += 16;
1148 if (vector >= first_system_vector) { 1153 if (vector >= first_system_vector) {
1149 /* If out of vectors on large boxen, must share them. */ 1154 offset = (offset + 1) % 16;
1150 offset = (offset + 1) % 8;
1151 vector = FIRST_EXTERNAL_VECTOR + offset; 1155 vector = FIRST_EXTERNAL_VECTOR + offset;
1152 } 1156 }
1153 if (unlikely(current_vector == vector)) 1157
1158 if (unlikely(current_vector == vector)) {
1159 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
1160 cpumask_andnot(tmp_mask, mask, cfg->old_domain);
1161 cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
1154 continue; 1162 continue;
1163 }
1155 1164
1156 if (test_bit(vector, used_vectors)) 1165 if (test_bit(vector, used_vectors))
1157 goto next; 1166 goto next;
@@ -1162,7 +1171,7 @@ next:
1162 /* Found one! */ 1171 /* Found one! */
1163 current_vector = vector; 1172 current_vector = vector;
1164 current_offset = offset; 1173 current_offset = offset;
1165 if (old_vector) { 1174 if (cfg->vector) {
1166 cfg->move_in_progress = 1; 1175 cfg->move_in_progress = 1;
1167 cpumask_copy(cfg->old_domain, cfg->domain); 1176 cpumask_copy(cfg->old_domain, cfg->domain);
1168 } 1177 }
@@ -1346,18 +1355,18 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1346 1355
1347 if (!IO_APIC_IRQ(irq)) 1356 if (!IO_APIC_IRQ(irq))
1348 return; 1357 return;
1349 /*
1350 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
1351 * controllers like 8259. Now that IO-APIC can handle this irq, update
1352 * the cfg->domain.
1353 */
1354 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1355 apic->vector_allocation_domain(0, cfg->domain);
1356 1358
1357 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1359 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1358 return; 1360 return;
1359 1361
1360 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 1362 if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(),
1363 &dest)) {
1364 pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
1365 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1366 __clear_irq_vector(irq, cfg);
1367
1368 return;
1369 }
1361 1370
1362 apic_printk(APIC_VERBOSE,KERN_DEBUG 1371 apic_printk(APIC_VERBOSE,KERN_DEBUG
1363 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1372 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
@@ -1366,7 +1375,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1366 cfg->vector, irq, attr->trigger, attr->polarity, dest); 1375 cfg->vector, irq, attr->trigger, attr->polarity, dest);
1367 1376
1368 if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) { 1377 if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) {
1369 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1378 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1370 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); 1379 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1371 __clear_irq_vector(irq, cfg); 1380 __clear_irq_vector(irq, cfg);
1372 1381
@@ -1469,9 +1478,10 @@ void setup_IO_APIC_irq_extra(u32 gsi)
1469 * Set up the timer pin, possibly with the 8259A-master behind. 1478 * Set up the timer pin, possibly with the 8259A-master behind.
1470 */ 1479 */
1471static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx, 1480static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1472 unsigned int pin, int vector) 1481 unsigned int pin, int vector)
1473{ 1482{
1474 struct IO_APIC_route_entry entry; 1483 struct IO_APIC_route_entry entry;
1484 unsigned int dest;
1475 1485
1476 if (irq_remapping_enabled) 1486 if (irq_remapping_enabled)
1477 return; 1487 return;
@@ -1482,9 +1492,13 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1482 * We use logical delivery to get the timer IRQ 1492 * We use logical delivery to get the timer IRQ
1483 * to the first CPU. 1493 * to the first CPU.
1484 */ 1494 */
1495 if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(),
1496 apic->target_cpus(), &dest)))
1497 dest = BAD_APICID;
1498
1485 entry.dest_mode = apic->irq_dest_mode; 1499 entry.dest_mode = apic->irq_dest_mode;
1486 entry.mask = 0; /* don't mask IRQ for edge */ 1500 entry.mask = 0; /* don't mask IRQ for edge */
1487 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); 1501 entry.dest = dest;
1488 entry.delivery_mode = apic->irq_delivery_mode; 1502 entry.delivery_mode = apic->irq_delivery_mode;
1489 entry.polarity = 0; 1503 entry.polarity = 0;
1490 entry.trigger = 0; 1504 entry.trigger = 0;
@@ -2209,71 +2223,6 @@ void send_cleanup_vector(struct irq_cfg *cfg)
2209 cfg->move_in_progress = 0; 2223 cfg->move_in_progress = 0;
2210} 2224}
2211 2225
2212static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2213{
2214 int apic, pin;
2215 struct irq_pin_list *entry;
2216 u8 vector = cfg->vector;
2217
2218 for_each_irq_pin(entry, cfg->irq_2_pin) {
2219 unsigned int reg;
2220
2221 apic = entry->apic;
2222 pin = entry->pin;
2223 /*
2224 * With interrupt-remapping, destination information comes
2225 * from interrupt-remapping table entry.
2226 */
2227 if (!irq_remapped(cfg))
2228 io_apic_write(apic, 0x11 + pin*2, dest);
2229 reg = io_apic_read(apic, 0x10 + pin*2);
2230 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2231 reg |= vector;
2232 io_apic_modify(apic, 0x10 + pin*2, reg);
2233 }
2234}
2235
2236/*
2237 * Either sets data->affinity to a valid value, and returns
2238 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2239 * leaves data->affinity untouched.
2240 */
2241int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2242 unsigned int *dest_id)
2243{
2244 struct irq_cfg *cfg = data->chip_data;
2245
2246 if (!cpumask_intersects(mask, cpu_online_mask))
2247 return -1;
2248
2249 if (assign_irq_vector(data->irq, data->chip_data, mask))
2250 return -1;
2251
2252 cpumask_copy(data->affinity, mask);
2253
2254 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain);
2255 return 0;
2256}
2257
2258static int
2259ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2260 bool force)
2261{
2262 unsigned int dest, irq = data->irq;
2263 unsigned long flags;
2264 int ret;
2265
2266 raw_spin_lock_irqsave(&ioapic_lock, flags);
2267 ret = __ioapic_set_affinity(data, mask, &dest);
2268 if (!ret) {
2269 /* Only the high 8 bits are valid. */
2270 dest = SET_APIC_LOGICAL_ID(dest);
2271 __target_IO_APIC_irq(irq, dest, data->chip_data);
2272 }
2273 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2274 return ret;
2275}
2276
2277asmlinkage void smp_irq_move_cleanup_interrupt(void) 2226asmlinkage void smp_irq_move_cleanup_interrupt(void)
2278{ 2227{
2279 unsigned vector, me; 2228 unsigned vector, me;
@@ -2361,6 +2310,87 @@ void irq_force_complete_move(int irq)
2361static inline void irq_complete_move(struct irq_cfg *cfg) { } 2310static inline void irq_complete_move(struct irq_cfg *cfg) { }
2362#endif 2311#endif
2363 2312
2313static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2314{
2315 int apic, pin;
2316 struct irq_pin_list *entry;
2317 u8 vector = cfg->vector;
2318
2319 for_each_irq_pin(entry, cfg->irq_2_pin) {
2320 unsigned int reg;
2321
2322 apic = entry->apic;
2323 pin = entry->pin;
2324 /*
2325 * With interrupt-remapping, destination information comes
2326 * from interrupt-remapping table entry.
2327 */
2328 if (!irq_remapped(cfg))
2329 io_apic_write(apic, 0x11 + pin*2, dest);
2330 reg = io_apic_read(apic, 0x10 + pin*2);
2331 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2332 reg |= vector;
2333 io_apic_modify(apic, 0x10 + pin*2, reg);
2334 }
2335}
2336
2337/*
2338 * Either sets data->affinity to a valid value, and returns
2339 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2340 * leaves data->affinity untouched.
2341 */
2342int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2343 unsigned int *dest_id)
2344{
2345 struct irq_cfg *cfg = data->chip_data;
2346 unsigned int irq = data->irq;
2347 int err;
2348
2349 if (!config_enabled(CONFIG_SMP))
2350 return -1;
2351
2352 if (!cpumask_intersects(mask, cpu_online_mask))
2353 return -EINVAL;
2354
2355 err = assign_irq_vector(irq, cfg, mask);
2356 if (err)
2357 return err;
2358
2359 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
2360 if (err) {
2361 if (assign_irq_vector(irq, cfg, data->affinity))
2362 pr_err("Failed to recover vector for irq %d\n", irq);
2363 return err;
2364 }
2365
2366 cpumask_copy(data->affinity, mask);
2367
2368 return 0;
2369}
2370
2371static int
2372ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2373 bool force)
2374{
2375 unsigned int dest, irq = data->irq;
2376 unsigned long flags;
2377 int ret;
2378
2379 if (!config_enabled(CONFIG_SMP))
2380 return -1;
2381
2382 raw_spin_lock_irqsave(&ioapic_lock, flags);
2383 ret = __ioapic_set_affinity(data, mask, &dest);
2384 if (!ret) {
2385 /* Only the high 8 bits are valid. */
2386 dest = SET_APIC_LOGICAL_ID(dest);
2387 __target_IO_APIC_irq(irq, dest, data->chip_data);
2388 ret = IRQ_SET_MASK_OK_NOCOPY;
2389 }
2390 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2391 return ret;
2392}
2393
2364static void ack_apic_edge(struct irq_data *data) 2394static void ack_apic_edge(struct irq_data *data)
2365{ 2395{
2366 irq_complete_move(data->chip_data); 2396 irq_complete_move(data->chip_data);
@@ -2540,9 +2570,7 @@ static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
2540 chip->irq_ack = ir_ack_apic_edge; 2570 chip->irq_ack = ir_ack_apic_edge;
2541 chip->irq_eoi = ir_ack_apic_level; 2571 chip->irq_eoi = ir_ack_apic_level;
2542 2572
2543#ifdef CONFIG_SMP
2544 chip->irq_set_affinity = set_remapped_irq_affinity; 2573 chip->irq_set_affinity = set_remapped_irq_affinity;
2545#endif
2546} 2574}
2547#endif /* CONFIG_IRQ_REMAP */ 2575#endif /* CONFIG_IRQ_REMAP */
2548 2576
@@ -2553,9 +2581,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
2553 .irq_unmask = unmask_ioapic_irq, 2581 .irq_unmask = unmask_ioapic_irq,
2554 .irq_ack = ack_apic_edge, 2582 .irq_ack = ack_apic_edge,
2555 .irq_eoi = ack_apic_level, 2583 .irq_eoi = ack_apic_level,
2556#ifdef CONFIG_SMP
2557 .irq_set_affinity = ioapic_set_affinity, 2584 .irq_set_affinity = ioapic_set_affinity,
2558#endif
2559 .irq_retrigger = ioapic_retrigger_irq, 2585 .irq_retrigger = ioapic_retrigger_irq,
2560}; 2586};
2561 2587
@@ -3037,7 +3063,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3037 if (err) 3063 if (err)
3038 return err; 3064 return err;
3039 3065
3040 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3066 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3067 apic->target_cpus(), &dest);
3068 if (err)
3069 return err;
3041 3070
3042 if (irq_remapped(cfg)) { 3071 if (irq_remapped(cfg)) {
3043 compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id); 3072 compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
@@ -3071,7 +3100,6 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3071 return err; 3100 return err;
3072} 3101}
3073 3102
3074#ifdef CONFIG_SMP
3075static int 3103static int
3076msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3104msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3077{ 3105{
@@ -3091,9 +3119,8 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3091 3119
3092 __write_msi_msg(data->msi_desc, &msg); 3120 __write_msi_msg(data->msi_desc, &msg);
3093 3121
3094 return 0; 3122 return IRQ_SET_MASK_OK_NOCOPY;
3095} 3123}
3096#endif /* CONFIG_SMP */
3097 3124
3098/* 3125/*
3099 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, 3126 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
@@ -3104,9 +3131,7 @@ static struct irq_chip msi_chip = {
3104 .irq_unmask = unmask_msi_irq, 3131 .irq_unmask = unmask_msi_irq,
3105 .irq_mask = mask_msi_irq, 3132 .irq_mask = mask_msi_irq,
3106 .irq_ack = ack_apic_edge, 3133 .irq_ack = ack_apic_edge,
3107#ifdef CONFIG_SMP
3108 .irq_set_affinity = msi_set_affinity, 3134 .irq_set_affinity = msi_set_affinity,
3109#endif
3110 .irq_retrigger = ioapic_retrigger_irq, 3135 .irq_retrigger = ioapic_retrigger_irq,
3111}; 3136};
3112 3137
@@ -3191,7 +3216,6 @@ void native_teardown_msi_irq(unsigned int irq)
3191} 3216}
3192 3217
3193#ifdef CONFIG_DMAR_TABLE 3218#ifdef CONFIG_DMAR_TABLE
3194#ifdef CONFIG_SMP
3195static int 3219static int
3196dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3220dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3197 bool force) 3221 bool force)
@@ -3213,19 +3237,15 @@ dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3213 3237
3214 dmar_msi_write(irq, &msg); 3238 dmar_msi_write(irq, &msg);
3215 3239
3216 return 0; 3240 return IRQ_SET_MASK_OK_NOCOPY;
3217} 3241}
3218 3242
3219#endif /* CONFIG_SMP */
3220
3221static struct irq_chip dmar_msi_type = { 3243static struct irq_chip dmar_msi_type = {
3222 .name = "DMAR_MSI", 3244 .name = "DMAR_MSI",
3223 .irq_unmask = dmar_msi_unmask, 3245 .irq_unmask = dmar_msi_unmask,
3224 .irq_mask = dmar_msi_mask, 3246 .irq_mask = dmar_msi_mask,
3225 .irq_ack = ack_apic_edge, 3247 .irq_ack = ack_apic_edge,
3226#ifdef CONFIG_SMP
3227 .irq_set_affinity = dmar_msi_set_affinity, 3248 .irq_set_affinity = dmar_msi_set_affinity,
3228#endif
3229 .irq_retrigger = ioapic_retrigger_irq, 3249 .irq_retrigger = ioapic_retrigger_irq,
3230}; 3250};
3231 3251
@@ -3246,7 +3266,6 @@ int arch_setup_dmar_msi(unsigned int irq)
3246 3266
3247#ifdef CONFIG_HPET_TIMER 3267#ifdef CONFIG_HPET_TIMER
3248 3268
3249#ifdef CONFIG_SMP
3250static int hpet_msi_set_affinity(struct irq_data *data, 3269static int hpet_msi_set_affinity(struct irq_data *data,
3251 const struct cpumask *mask, bool force) 3270 const struct cpumask *mask, bool force)
3252{ 3271{
@@ -3266,19 +3285,15 @@ static int hpet_msi_set_affinity(struct irq_data *data,
3266 3285
3267 hpet_msi_write(data->handler_data, &msg); 3286 hpet_msi_write(data->handler_data, &msg);
3268 3287
3269 return 0; 3288 return IRQ_SET_MASK_OK_NOCOPY;
3270} 3289}
3271 3290
3272#endif /* CONFIG_SMP */
3273
3274static struct irq_chip hpet_msi_type = { 3291static struct irq_chip hpet_msi_type = {
3275 .name = "HPET_MSI", 3292 .name = "HPET_MSI",
3276 .irq_unmask = hpet_msi_unmask, 3293 .irq_unmask = hpet_msi_unmask,
3277 .irq_mask = hpet_msi_mask, 3294 .irq_mask = hpet_msi_mask,
3278 .irq_ack = ack_apic_edge, 3295 .irq_ack = ack_apic_edge,
3279#ifdef CONFIG_SMP
3280 .irq_set_affinity = hpet_msi_set_affinity, 3296 .irq_set_affinity = hpet_msi_set_affinity,
3281#endif
3282 .irq_retrigger = ioapic_retrigger_irq, 3297 .irq_retrigger = ioapic_retrigger_irq,
3283}; 3298};
3284 3299
@@ -3313,8 +3328,6 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
3313 */ 3328 */
3314#ifdef CONFIG_HT_IRQ 3329#ifdef CONFIG_HT_IRQ
3315 3330
3316#ifdef CONFIG_SMP
3317
3318static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) 3331static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3319{ 3332{
3320 struct ht_irq_msg msg; 3333 struct ht_irq_msg msg;
@@ -3339,25 +3352,23 @@ ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3339 return -1; 3352 return -1;
3340 3353
3341 target_ht_irq(data->irq, dest, cfg->vector); 3354 target_ht_irq(data->irq, dest, cfg->vector);
3342 return 0; 3355 return IRQ_SET_MASK_OK_NOCOPY;
3343} 3356}
3344 3357
3345#endif
3346
3347static struct irq_chip ht_irq_chip = { 3358static struct irq_chip ht_irq_chip = {
3348 .name = "PCI-HT", 3359 .name = "PCI-HT",
3349 .irq_mask = mask_ht_irq, 3360 .irq_mask = mask_ht_irq,
3350 .irq_unmask = unmask_ht_irq, 3361 .irq_unmask = unmask_ht_irq,
3351 .irq_ack = ack_apic_edge, 3362 .irq_ack = ack_apic_edge,
3352#ifdef CONFIG_SMP
3353 .irq_set_affinity = ht_set_affinity, 3363 .irq_set_affinity = ht_set_affinity,
3354#endif
3355 .irq_retrigger = ioapic_retrigger_irq, 3364 .irq_retrigger = ioapic_retrigger_irq,
3356}; 3365};
3357 3366
3358int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3367int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3359{ 3368{
3360 struct irq_cfg *cfg; 3369 struct irq_cfg *cfg;
3370 struct ht_irq_msg msg;
3371 unsigned dest;
3361 int err; 3372 int err;
3362 3373
3363 if (disable_apic) 3374 if (disable_apic)
@@ -3365,36 +3376,37 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3365 3376
3366 cfg = irq_cfg(irq); 3377 cfg = irq_cfg(irq);
3367 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3378 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3368 if (!err) { 3379 if (err)
3369 struct ht_irq_msg msg; 3380 return err;
3370 unsigned dest; 3381
3382 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3383 apic->target_cpus(), &dest);
3384 if (err)
3385 return err;
3371 3386
3372 dest = apic->cpu_mask_to_apicid_and(cfg->domain, 3387 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3373 apic->target_cpus());
3374 3388
3375 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3389 msg.address_lo =
3390 HT_IRQ_LOW_BASE |
3391 HT_IRQ_LOW_DEST_ID(dest) |
3392 HT_IRQ_LOW_VECTOR(cfg->vector) |
3393 ((apic->irq_dest_mode == 0) ?
3394 HT_IRQ_LOW_DM_PHYSICAL :
3395 HT_IRQ_LOW_DM_LOGICAL) |
3396 HT_IRQ_LOW_RQEOI_EDGE |
3397 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3398 HT_IRQ_LOW_MT_FIXED :
3399 HT_IRQ_LOW_MT_ARBITRATED) |
3400 HT_IRQ_LOW_IRQ_MASKED;
3376 3401
3377 msg.address_lo = 3402 write_ht_irq_msg(irq, &msg);
3378 HT_IRQ_LOW_BASE |
3379 HT_IRQ_LOW_DEST_ID(dest) |
3380 HT_IRQ_LOW_VECTOR(cfg->vector) |
3381 ((apic->irq_dest_mode == 0) ?
3382 HT_IRQ_LOW_DM_PHYSICAL :
3383 HT_IRQ_LOW_DM_LOGICAL) |
3384 HT_IRQ_LOW_RQEOI_EDGE |
3385 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3386 HT_IRQ_LOW_MT_FIXED :
3387 HT_IRQ_LOW_MT_ARBITRATED) |
3388 HT_IRQ_LOW_IRQ_MASKED;
3389 3403
3390 write_ht_irq_msg(irq, &msg); 3404 irq_set_chip_and_handler_name(irq, &ht_irq_chip,
3405 handle_edge_irq, "edge");
3391 3406
3392 irq_set_chip_and_handler_name(irq, &ht_irq_chip, 3407 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3393 handle_edge_irq, "edge");
3394 3408
3395 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); 3409 return 0;
3396 }
3397 return err;
3398} 3410}
3399#endif /* CONFIG_HT_IRQ */ 3411#endif /* CONFIG_HT_IRQ */
3400 3412
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index f00a68cca37a..d661ee95cabf 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -406,16 +406,13 @@ static inline int numaq_check_phys_apicid_present(int phys_apicid)
406 * We use physical apicids here, not logical, so just return the default 406 * We use physical apicids here, not logical, so just return the default
407 * physical broadcast to stop people from breaking us 407 * physical broadcast to stop people from breaking us
408 */ 408 */
409static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask) 409static int
410{
411 return 0x0F;
412}
413
414static inline unsigned int
415numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 410numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
416 const struct cpumask *andmask) 411 const struct cpumask *andmask,
412 unsigned int *apicid)
417{ 413{
418 return 0x0F; 414 *apicid = 0x0F;
415 return 0;
419} 416}
420 417
421/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ 418/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
@@ -441,20 +438,6 @@ static int probe_numaq(void)
441 return found_numaq; 438 return found_numaq;
442} 439}
443 440
444static void numaq_vector_allocation_domain(int cpu, struct cpumask *retmask)
445{
446 /* Careful. Some cpus do not strictly honor the set of cpus
447 * specified in the interrupt destination when using lowest
448 * priority interrupt delivery mode.
449 *
450 * In particular there was a hyperthreading cpu observed to
451 * deliver interrupts to the wrong hyperthread when only one
452 * hyperthread was specified in the interrupt desitination.
453 */
454 cpumask_clear(retmask);
455 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
456}
457
458static void numaq_setup_portio_remap(void) 441static void numaq_setup_portio_remap(void)
459{ 442{
460 int num_quads = num_online_nodes(); 443 int num_quads = num_online_nodes();
@@ -491,7 +474,7 @@ static struct apic __refdata apic_numaq = {
491 .check_apicid_used = numaq_check_apicid_used, 474 .check_apicid_used = numaq_check_apicid_used,
492 .check_apicid_present = numaq_check_apicid_present, 475 .check_apicid_present = numaq_check_apicid_present,
493 476
494 .vector_allocation_domain = numaq_vector_allocation_domain, 477 .vector_allocation_domain = flat_vector_allocation_domain,
495 .init_apic_ldr = numaq_init_apic_ldr, 478 .init_apic_ldr = numaq_init_apic_ldr,
496 479
497 .ioapic_phys_id_map = numaq_ioapic_phys_id_map, 480 .ioapic_phys_id_map = numaq_ioapic_phys_id_map,
@@ -509,7 +492,6 @@ static struct apic __refdata apic_numaq = {
509 .set_apic_id = NULL, 492 .set_apic_id = NULL,
510 .apic_id_mask = 0x0F << 24, 493 .apic_id_mask = 0x0F << 24,
511 494
512 .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
513 .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and, 495 .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
514 496
515 .send_IPI_mask = numaq_send_IPI_mask, 497 .send_IPI_mask = numaq_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 1b291da09e60..eb35ef9ee63f 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -66,21 +66,6 @@ static void setup_apic_flat_routing(void)
66#endif 66#endif
67} 67}
68 68
69static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
70{
71 /*
72 * Careful. Some cpus do not strictly honor the set of cpus
73 * specified in the interrupt destination when using lowest
74 * priority interrupt delivery mode.
75 *
76 * In particular there was a hyperthreading cpu observed to
77 * deliver interrupts to the wrong hyperthread when only one
78 * hyperthread was specified in the interrupt desitination.
79 */
80 cpumask_clear(retmask);
81 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
82}
83
84/* should be called last. */ 69/* should be called last. */
85static int probe_default(void) 70static int probe_default(void)
86{ 71{
@@ -105,7 +90,7 @@ static struct apic apic_default = {
105 .check_apicid_used = default_check_apicid_used, 90 .check_apicid_used = default_check_apicid_used,
106 .check_apicid_present = default_check_apicid_present, 91 .check_apicid_present = default_check_apicid_present,
107 92
108 .vector_allocation_domain = default_vector_allocation_domain, 93 .vector_allocation_domain = flat_vector_allocation_domain,
109 .init_apic_ldr = default_init_apic_ldr, 94 .init_apic_ldr = default_init_apic_ldr,
110 95
111 .ioapic_phys_id_map = default_ioapic_phys_id_map, 96 .ioapic_phys_id_map = default_ioapic_phys_id_map,
@@ -123,8 +108,7 @@ static struct apic apic_default = {
123 .set_apic_id = NULL, 108 .set_apic_id = NULL,
124 .apic_id_mask = 0x0F << 24, 109 .apic_id_mask = 0x0F << 24,
125 110
126 .cpu_mask_to_apicid = default_cpu_mask_to_apicid, 111 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
127 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
128 112
129 .send_IPI_mask = default_send_IPI_mask_logical, 113 .send_IPI_mask = default_send_IPI_mask_logical,
130 .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical, 114 .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
@@ -208,6 +192,9 @@ void __init default_setup_apic_routing(void)
208 192
209 if (apic->setup_apic_routing) 193 if (apic->setup_apic_routing)
210 apic->setup_apic_routing(); 194 apic->setup_apic_routing();
195
196 if (x86_platform.apic_post_init)
197 x86_platform.apic_post_init();
211} 198}
212 199
213void __init generic_apic_probe(void) 200void __init generic_apic_probe(void)
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 3fe986698929..1793dba7a741 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -23,11 +23,6 @@
23#include <asm/ipi.h> 23#include <asm/ipi.h>
24#include <asm/setup.h> 24#include <asm/setup.h>
25 25
26static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
27{
28 return hard_smp_processor_id() >> index_msb;
29}
30
31/* 26/*
32 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. 27 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
33 */ 28 */
@@ -48,10 +43,8 @@ void __init default_setup_apic_routing(void)
48 } 43 }
49 } 44 }
50 45
51 if (is_vsmp_box()) { 46 if (x86_platform.apic_post_init)
52 /* need to update phys_pkg_id */ 47 x86_platform.apic_post_init();
53 apic->phys_pkg_id = apicid_phys_pkg_id;
54 }
55} 48}
56 49
57/* Same for both flat and physical. */ 50/* Same for both flat and physical. */
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index e97d542ccdd0..77c95c0e1bf7 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -265,43 +265,48 @@ static int summit_check_phys_apicid_present(int physical_apicid)
265 return 1; 265 return 1;
266} 266}
267 267
268static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) 268static inline int
269summit_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
269{ 270{
270 unsigned int round = 0; 271 unsigned int round = 0;
271 int cpu, apicid = 0; 272 unsigned int cpu, apicid = 0;
272 273
273 /* 274 /*
274 * The cpus in the mask must all be on the apic cluster. 275 * The cpus in the mask must all be on the apic cluster.
275 */ 276 */
276 for_each_cpu(cpu, cpumask) { 277 for_each_cpu_and(cpu, cpumask, cpu_online_mask) {
277 int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); 278 int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
278 279
279 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { 280 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
280 pr_err("Not a valid mask!\n"); 281 pr_err("Not a valid mask!\n");
281 return BAD_APICID; 282 return -EINVAL;
282 } 283 }
283 apicid |= new_apicid; 284 apicid |= new_apicid;
284 round++; 285 round++;
285 } 286 }
286 return apicid; 287 if (!round)
288 return -EINVAL;
289 *dest_id = apicid;
290 return 0;
287} 291}
288 292
289static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, 293static int
290 const struct cpumask *andmask) 294summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
295 const struct cpumask *andmask,
296 unsigned int *apicid)
291{ 297{
292 int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
293 cpumask_var_t cpumask; 298 cpumask_var_t cpumask;
299 *apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
294 300
295 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 301 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
296 return apicid; 302 return 0;
297 303
298 cpumask_and(cpumask, inmask, andmask); 304 cpumask_and(cpumask, inmask, andmask);
299 cpumask_and(cpumask, cpumask, cpu_online_mask); 305 summit_cpu_mask_to_apicid(cpumask, apicid);
300 apicid = summit_cpu_mask_to_apicid(cpumask);
301 306
302 free_cpumask_var(cpumask); 307 free_cpumask_var(cpumask);
303 308
304 return apicid; 309 return 0;
305} 310}
306 311
307/* 312/*
@@ -322,20 +327,6 @@ static int probe_summit(void)
322 return 0; 327 return 0;
323} 328}
324 329
325static void summit_vector_allocation_domain(int cpu, struct cpumask *retmask)
326{
327 /* Careful. Some cpus do not strictly honor the set of cpus
328 * specified in the interrupt destination when using lowest
329 * priority interrupt delivery mode.
330 *
331 * In particular there was a hyperthreading cpu observed to
332 * deliver interrupts to the wrong hyperthread when only one
333 * hyperthread was specified in the interrupt desitination.
334 */
335 cpumask_clear(retmask);
336 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
337}
338
339#ifdef CONFIG_X86_SUMMIT_NUMA 330#ifdef CONFIG_X86_SUMMIT_NUMA
340static struct rio_table_hdr *rio_table_hdr; 331static struct rio_table_hdr *rio_table_hdr;
341static struct scal_detail *scal_devs[MAX_NUMNODES]; 332static struct scal_detail *scal_devs[MAX_NUMNODES];
@@ -513,7 +504,7 @@ static struct apic apic_summit = {
513 .check_apicid_used = summit_check_apicid_used, 504 .check_apicid_used = summit_check_apicid_used,
514 .check_apicid_present = summit_check_apicid_present, 505 .check_apicid_present = summit_check_apicid_present,
515 506
516 .vector_allocation_domain = summit_vector_allocation_domain, 507 .vector_allocation_domain = flat_vector_allocation_domain,
517 .init_apic_ldr = summit_init_apic_ldr, 508 .init_apic_ldr = summit_init_apic_ldr,
518 509
519 .ioapic_phys_id_map = summit_ioapic_phys_id_map, 510 .ioapic_phys_id_map = summit_ioapic_phys_id_map,
@@ -531,7 +522,6 @@ static struct apic apic_summit = {
531 .set_apic_id = NULL, 522 .set_apic_id = NULL,
532 .apic_id_mask = 0xFF << 24, 523 .apic_id_mask = 0xFF << 24,
533 524
534 .cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
535 .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and, 525 .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
536 526
537 .send_IPI_mask = summit_send_IPI_mask, 527 .send_IPI_mask = summit_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index ff35cff0e1a7..c88baa4ff0e5 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -81,7 +81,7 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
81} 81}
82 82
83static void 83static void
84 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 84x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
85{ 85{
86 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT); 86 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
87} 87}
@@ -96,36 +96,37 @@ static void x2apic_send_IPI_all(int vector)
96 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); 96 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
97} 97}
98 98
99static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 99static int
100x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
101 const struct cpumask *andmask,
102 unsigned int *apicid)
100{ 103{
101 /* 104 u32 dest = 0;
102 * We're using fixed IRQ delivery, can only return one logical APIC ID. 105 u16 cluster;
103 * May as well be the first. 106 int i;
104 */
105 int cpu = cpumask_first(cpumask);
106 107
107 if ((unsigned)cpu < nr_cpu_ids) 108 for_each_cpu_and(i, cpumask, andmask) {
108 return per_cpu(x86_cpu_to_logical_apicid, cpu); 109 if (!cpumask_test_cpu(i, cpu_online_mask))
109 else 110 continue;
110 return BAD_APICID; 111 dest = per_cpu(x86_cpu_to_logical_apicid, i);
111} 112 cluster = x2apic_cluster(i);
113 break;
114 }
112 115
113static unsigned int 116 if (!dest)
114x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 117 return -EINVAL;
115 const struct cpumask *andmask)
116{
117 int cpu;
118 118
119 /* 119 for_each_cpu_and(i, cpumask, andmask) {
120 * We're using fixed IRQ delivery, can only return one logical APIC ID. 120 if (!cpumask_test_cpu(i, cpu_online_mask))
121 * May as well be the first. 121 continue;
122 */ 122 if (cluster != x2apic_cluster(i))
123 for_each_cpu_and(cpu, cpumask, andmask) { 123 continue;
124 if (cpumask_test_cpu(cpu, cpu_online_mask)) 124 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
125 break;
126 } 125 }
127 126
128 return per_cpu(x86_cpu_to_logical_apicid, cpu); 127 *apicid = dest;
128
129 return 0;
129} 130}
130 131
131static void init_x2apic_ldr(void) 132static void init_x2apic_ldr(void)
@@ -208,6 +209,32 @@ static int x2apic_cluster_probe(void)
208 return 0; 209 return 0;
209} 210}
210 211
212static const struct cpumask *x2apic_cluster_target_cpus(void)
213{
214 return cpu_all_mask;
215}
216
217/*
218 * Each x2apic cluster is an allocation domain.
219 */
220static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
221 const struct cpumask *mask)
222{
223 /*
224 * To minimize vector pressure, default case of boot, device bringup
225 * etc will use a single cpu for the interrupt destination.
226 *
227 * On explicit migration requests coming from irqbalance etc,
228 * interrupts will be routed to the x2apic cluster (cluster-id
229 * derived from the first cpu in the mask) members specified
230 * in the mask.
231 */
232 if (mask == x2apic_cluster_target_cpus())
233 cpumask_copy(retmask, cpumask_of(cpu));
234 else
235 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
236}
237
211static struct apic apic_x2apic_cluster = { 238static struct apic apic_x2apic_cluster = {
212 239
213 .name = "cluster x2apic", 240 .name = "cluster x2apic",
@@ -219,13 +246,13 @@ static struct apic apic_x2apic_cluster = {
219 .irq_delivery_mode = dest_LowestPrio, 246 .irq_delivery_mode = dest_LowestPrio,
220 .irq_dest_mode = 1, /* logical */ 247 .irq_dest_mode = 1, /* logical */
221 248
222 .target_cpus = x2apic_target_cpus, 249 .target_cpus = x2apic_cluster_target_cpus,
223 .disable_esr = 0, 250 .disable_esr = 0,
224 .dest_logical = APIC_DEST_LOGICAL, 251 .dest_logical = APIC_DEST_LOGICAL,
225 .check_apicid_used = NULL, 252 .check_apicid_used = NULL,
226 .check_apicid_present = NULL, 253 .check_apicid_present = NULL,
227 254
228 .vector_allocation_domain = x2apic_vector_allocation_domain, 255 .vector_allocation_domain = cluster_vector_allocation_domain,
229 .init_apic_ldr = init_x2apic_ldr, 256 .init_apic_ldr = init_x2apic_ldr,
230 257
231 .ioapic_phys_id_map = NULL, 258 .ioapic_phys_id_map = NULL,
@@ -243,7 +270,6 @@ static struct apic apic_x2apic_cluster = {
243 .set_apic_id = x2apic_set_apic_id, 270 .set_apic_id = x2apic_set_apic_id,
244 .apic_id_mask = 0xFFFFFFFFu, 271 .apic_id_mask = 0xFFFFFFFFu,
245 272
246 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
247 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, 273 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
248 274
249 .send_IPI_mask = x2apic_send_IPI_mask, 275 .send_IPI_mask = x2apic_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index c17e982db275..e03a1e180e81 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -76,38 +76,6 @@ static void x2apic_send_IPI_all(int vector)
76 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); 76 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
77} 77}
78 78
79static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
80{
81 /*
82 * We're using fixed IRQ delivery, can only return one phys APIC ID.
83 * May as well be the first.
84 */
85 int cpu = cpumask_first(cpumask);
86
87 if ((unsigned)cpu < nr_cpu_ids)
88 return per_cpu(x86_cpu_to_apicid, cpu);
89 else
90 return BAD_APICID;
91}
92
93static unsigned int
94x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
95 const struct cpumask *andmask)
96{
97 int cpu;
98
99 /*
100 * We're using fixed IRQ delivery, can only return one phys APIC ID.
101 * May as well be the first.
102 */
103 for_each_cpu_and(cpu, cpumask, andmask) {
104 if (cpumask_test_cpu(cpu, cpu_online_mask))
105 break;
106 }
107
108 return per_cpu(x86_cpu_to_apicid, cpu);
109}
110
111static void init_x2apic_ldr(void) 79static void init_x2apic_ldr(void)
112{ 80{
113} 81}
@@ -131,13 +99,13 @@ static struct apic apic_x2apic_phys = {
131 .irq_delivery_mode = dest_Fixed, 99 .irq_delivery_mode = dest_Fixed,
132 .irq_dest_mode = 0, /* physical */ 100 .irq_dest_mode = 0, /* physical */
133 101
134 .target_cpus = x2apic_target_cpus, 102 .target_cpus = online_target_cpus,
135 .disable_esr = 0, 103 .disable_esr = 0,
136 .dest_logical = 0, 104 .dest_logical = 0,
137 .check_apicid_used = NULL, 105 .check_apicid_used = NULL,
138 .check_apicid_present = NULL, 106 .check_apicid_present = NULL,
139 107
140 .vector_allocation_domain = x2apic_vector_allocation_domain, 108 .vector_allocation_domain = default_vector_allocation_domain,
141 .init_apic_ldr = init_x2apic_ldr, 109 .init_apic_ldr = init_x2apic_ldr,
142 110
143 .ioapic_phys_id_map = NULL, 111 .ioapic_phys_id_map = NULL,
@@ -155,8 +123,7 @@ static struct apic apic_x2apic_phys = {
155 .set_apic_id = x2apic_set_apic_id, 123 .set_apic_id = x2apic_set_apic_id,
156 .apic_id_mask = 0xFFFFFFFFu, 124 .apic_id_mask = 0xFFFFFFFFu,
157 125
158 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 126 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
159 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
160 127
161 .send_IPI_mask = x2apic_send_IPI_mask, 128 .send_IPI_mask = x2apic_send_IPI_mask,
162 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, 129 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index c6d03f7a4401..8cfade9510a4 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -185,17 +185,6 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
185unsigned long sn_rtc_cycles_per_second; 185unsigned long sn_rtc_cycles_per_second;
186EXPORT_SYMBOL(sn_rtc_cycles_per_second); 186EXPORT_SYMBOL(sn_rtc_cycles_per_second);
187 187
188static const struct cpumask *uv_target_cpus(void)
189{
190 return cpu_online_mask;
191}
192
193static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
194{
195 cpumask_clear(retmask);
196 cpumask_set_cpu(cpu, retmask);
197}
198
199static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) 188static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
200{ 189{
201#ifdef CONFIG_SMP 190#ifdef CONFIG_SMP
@@ -280,25 +269,12 @@ static void uv_init_apic_ldr(void)
280{ 269{
281} 270}
282 271
283static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) 272static int
284{
285 /*
286 * We're using fixed IRQ delivery, can only return one phys APIC ID.
287 * May as well be the first.
288 */
289 int cpu = cpumask_first(cpumask);
290
291 if ((unsigned)cpu < nr_cpu_ids)
292 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
293 else
294 return BAD_APICID;
295}
296
297static unsigned int
298uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 273uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
299 const struct cpumask *andmask) 274 const struct cpumask *andmask,
275 unsigned int *apicid)
300{ 276{
301 int cpu; 277 int unsigned cpu;
302 278
303 /* 279 /*
304 * We're using fixed IRQ delivery, can only return one phys APIC ID. 280 * We're using fixed IRQ delivery, can only return one phys APIC ID.
@@ -308,7 +284,13 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
308 if (cpumask_test_cpu(cpu, cpu_online_mask)) 284 if (cpumask_test_cpu(cpu, cpu_online_mask))
309 break; 285 break;
310 } 286 }
311 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; 287
288 if (likely(cpu < nr_cpu_ids)) {
289 *apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
290 return 0;
291 }
292
293 return -EINVAL;
312} 294}
313 295
314static unsigned int x2apic_get_apic_id(unsigned long x) 296static unsigned int x2apic_get_apic_id(unsigned long x)
@@ -362,13 +344,13 @@ static struct apic __refdata apic_x2apic_uv_x = {
362 .irq_delivery_mode = dest_Fixed, 344 .irq_delivery_mode = dest_Fixed,
363 .irq_dest_mode = 0, /* physical */ 345 .irq_dest_mode = 0, /* physical */
364 346
365 .target_cpus = uv_target_cpus, 347 .target_cpus = online_target_cpus,
366 .disable_esr = 0, 348 .disable_esr = 0,
367 .dest_logical = APIC_DEST_LOGICAL, 349 .dest_logical = APIC_DEST_LOGICAL,
368 .check_apicid_used = NULL, 350 .check_apicid_used = NULL,
369 .check_apicid_present = NULL, 351 .check_apicid_present = NULL,
370 352
371 .vector_allocation_domain = uv_vector_allocation_domain, 353 .vector_allocation_domain = default_vector_allocation_domain,
372 .init_apic_ldr = uv_init_apic_ldr, 354 .init_apic_ldr = uv_init_apic_ldr,
373 355
374 .ioapic_phys_id_map = NULL, 356 .ioapic_phys_id_map = NULL,
@@ -386,7 +368,6 @@ static struct apic __refdata apic_x2apic_uv_x = {
386 .set_apic_id = set_apic_id, 368 .set_apic_id = set_apic_id,
387 .apic_id_mask = 0xFFFFFFFFu, 369 .apic_id_mask = 0xFFFFFFFFu,
388 370
389 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
390 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, 371 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
391 372
392 .send_IPI_mask = uv_send_IPI_mask, 373 .send_IPI_mask = uv_send_IPI_mask,
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 16be6dc14db1..f4b9b80e1b95 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1031,8 +1031,6 @@ void __init setup_arch(char **cmdline_p)
1031 1031
1032 x86_init.timers.wallclock_init(); 1032 x86_init.timers.wallclock_init();
1033 1033
1034 x86_platform.wallclock_init();
1035
1036 mcheck_init(); 1034 mcheck_init();
1037 1035
1038 arch_init_ideal_nops(); 1036 arch_init_ideal_nops();
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 8eeb55a551b4..992f890283e9 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -16,6 +16,7 @@
16#include <linux/pci_ids.h> 16#include <linux/pci_ids.h>
17#include <linux/pci_regs.h> 17#include <linux/pci_regs.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/irq.h>
19 20
20#include <asm/apic.h> 21#include <asm/apic.h>
21#include <asm/pci-direct.h> 22#include <asm/pci-direct.h>
@@ -95,6 +96,18 @@ static void __init set_vsmp_pv_ops(void)
95 ctl = readl(address + 4); 96 ctl = readl(address + 4);
96 printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n", 97 printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n",
97 cap, ctl); 98 cap, ctl);
99
100 /* If possible, let the vSMP foundation route the interrupt optimally */
101#ifdef CONFIG_SMP
102 if (cap & ctl & BIT(8)) {
103 ctl &= ~BIT(8);
104#ifdef CONFIG_PROC_FS
105 /* Don't let users change irq affinity via procfs */
106 no_irq_affinity = 1;
107#endif
108 }
109#endif
110
98 if (cap & ctl & (1 << 4)) { 111 if (cap & ctl & (1 << 4)) {
99 /* Setup irq ops and turn on vSMP IRQ fastpath handling */ 112 /* Setup irq ops and turn on vSMP IRQ fastpath handling */
100 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); 113 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
@@ -102,12 +115,11 @@ static void __init set_vsmp_pv_ops(void)
102 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); 115 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
103 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); 116 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
104 pv_init_ops.patch = vsmp_patch; 117 pv_init_ops.patch = vsmp_patch;
105
106 ctl &= ~(1 << 4); 118 ctl &= ~(1 << 4);
107 writel(ctl, address + 4);
108 ctl = readl(address + 4);
109 printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl);
110 } 119 }
120 writel(ctl, address + 4);
121 ctl = readl(address + 4);
122 pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
111 123
112 early_iounmap(address, 8); 124 early_iounmap(address, 8);
113} 125}
@@ -187,12 +199,36 @@ static void __init vsmp_cap_cpus(void)
187#endif 199#endif
188} 200}
189 201
202static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
203{
204 return hard_smp_processor_id() >> index_msb;
205}
206
207/*
208 * In vSMP, all cpus should be capable of handling interrupts, regardless of
209 * the APIC used.
210 */
211static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
212 const struct cpumask *mask)
213{
214 cpumask_setall(retmask);
215}
216
217static void vsmp_apic_post_init(void)
218{
219 /* need to update phys_pkg_id */
220 apic->phys_pkg_id = apicid_phys_pkg_id;
221 apic->vector_allocation_domain = fill_vector_allocation_domain;
222}
223
190void __init vsmp_init(void) 224void __init vsmp_init(void)
191{ 225{
192 detect_vsmp_box(); 226 detect_vsmp_box();
193 if (!is_vsmp_box()) 227 if (!is_vsmp_box())
194 return; 228 return;
195 229
230 x86_platform.apic_post_init = vsmp_apic_post_init;
231
196 vsmp_cap_cpus(); 232 vsmp_cap_cpus();
197 233
198 set_vsmp_pv_ops(); 234 set_vsmp_pv_ops();
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 35c5e543f550..9f3167e891ef 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -29,7 +29,6 @@ void __init x86_init_uint_noop(unsigned int unused) { }
29void __init x86_init_pgd_noop(pgd_t *unused) { } 29void __init x86_init_pgd_noop(pgd_t *unused) { }
30int __init iommu_init_noop(void) { return 0; } 30int __init iommu_init_noop(void) { return 0; }
31void iommu_shutdown_noop(void) { } 31void iommu_shutdown_noop(void) { }
32void wallclock_init_noop(void) { }
33 32
34/* 33/*
35 * The platform setup functions are preset with the default functions 34 * The platform setup functions are preset with the default functions
@@ -101,7 +100,6 @@ static int default_i8042_detect(void) { return 1; };
101 100
102struct x86_platform_ops x86_platform = { 101struct x86_platform_ops x86_platform = {
103 .calibrate_tsc = native_calibrate_tsc, 102 .calibrate_tsc = native_calibrate_tsc,
104 .wallclock_init = wallclock_init_noop,
105 .get_wallclock = mach_get_cmos_time, 103 .get_wallclock = mach_get_cmos_time,
106 .set_wallclock = mach_set_rtc_mmss, 104 .set_wallclock = mach_set_rtc_mmss,
107 .iommu_shutdown = iommu_shutdown_noop, 105 .iommu_shutdown = iommu_shutdown_noop,
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index f25c2765a5c9..acf7752da952 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -135,6 +135,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
135 unsigned long mmr_value; 135 unsigned long mmr_value;
136 struct uv_IO_APIC_route_entry *entry; 136 struct uv_IO_APIC_route_entry *entry;
137 int mmr_pnode, err; 137 int mmr_pnode, err;
138 unsigned int dest;
138 139
139 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != 140 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
140 sizeof(unsigned long)); 141 sizeof(unsigned long));
@@ -143,6 +144,10 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
143 if (err != 0) 144 if (err != 0)
144 return err; 145 return err;
145 146
147 err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest);
148 if (err != 0)
149 return err;
150
146 if (limit == UV_AFFINITY_CPU) 151 if (limit == UV_AFFINITY_CPU)
147 irq_set_status_flags(irq, IRQ_NO_BALANCING); 152 irq_set_status_flags(irq, IRQ_NO_BALANCING);
148 else 153 else
@@ -159,7 +164,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
159 entry->polarity = 0; 164 entry->polarity = 0;
160 entry->trigger = 0; 165 entry->trigger = 0;
161 entry->mask = 0; 166 entry->mask = 0;
162 entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); 167 entry->dest = dest;
163 168
164 mmr_pnode = uv_blade_to_pnode(mmr_blade); 169 mmr_pnode = uv_blade_to_pnode(mmr_blade);
165 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 170 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -222,7 +227,7 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
222 if (cfg->move_in_progress) 227 if (cfg->move_in_progress)
223 send_cleanup_vector(cfg); 228 send_cleanup_vector(cfg);
224 229
225 return 0; 230 return IRQ_SET_MASK_OK_NOCOPY;
226} 231}
227 232
228/* 233/*
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 6d347064b8b0..e0b18f3ae9a8 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -902,7 +902,6 @@ static int intel_setup_ioapic_entry(int irq,
902 return 0; 902 return 0;
903} 903}
904 904
905#ifdef CONFIG_SMP
906/* 905/*
907 * Migrate the IO-APIC irq in the presence of intr-remapping. 906 * Migrate the IO-APIC irq in the presence of intr-remapping.
908 * 907 *
@@ -924,6 +923,10 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
924 struct irq_cfg *cfg = data->chip_data; 923 struct irq_cfg *cfg = data->chip_data;
925 unsigned int dest, irq = data->irq; 924 unsigned int dest, irq = data->irq;
926 struct irte irte; 925 struct irte irte;
926 int err;
927
928 if (!config_enabled(CONFIG_SMP))
929 return -EINVAL;
927 930
928 if (!cpumask_intersects(mask, cpu_online_mask)) 931 if (!cpumask_intersects(mask, cpu_online_mask))
929 return -EINVAL; 932 return -EINVAL;
@@ -931,10 +934,16 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
931 if (get_irte(irq, &irte)) 934 if (get_irte(irq, &irte))
932 return -EBUSY; 935 return -EBUSY;
933 936
934 if (assign_irq_vector(irq, cfg, mask)) 937 err = assign_irq_vector(irq, cfg, mask);
935 return -EBUSY; 938 if (err)
939 return err;
936 940
937 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 941 err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
942 if (err) {
943 if (assign_irq_vector(irq, cfg, data->affinity))
944 pr_err("Failed to recover vector for irq %d\n", irq);
945 return err;
946 }
938 947
939 irte.vector = cfg->vector; 948 irte.vector = cfg->vector;
940 irte.dest_id = IRTE_DEST(dest); 949 irte.dest_id = IRTE_DEST(dest);
@@ -956,7 +965,6 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
956 cpumask_copy(data->affinity, mask); 965 cpumask_copy(data->affinity, mask);
957 return 0; 966 return 0;
958} 967}
959#endif
960 968
961static void intel_compose_msi_msg(struct pci_dev *pdev, 969static void intel_compose_msi_msg(struct pci_dev *pdev,
962 unsigned int irq, unsigned int dest, 970 unsigned int irq, unsigned int dest,
@@ -1058,9 +1066,7 @@ struct irq_remap_ops intel_irq_remap_ops = {
1058 .reenable = reenable_irq_remapping, 1066 .reenable = reenable_irq_remapping,
1059 .enable_faulting = enable_drhd_fault_handling, 1067 .enable_faulting = enable_drhd_fault_handling,
1060 .setup_ioapic_entry = intel_setup_ioapic_entry, 1068 .setup_ioapic_entry = intel_setup_ioapic_entry,
1061#ifdef CONFIG_SMP
1062 .set_affinity = intel_ioapic_set_affinity, 1069 .set_affinity = intel_ioapic_set_affinity,
1063#endif
1064 .free_irq = free_irte, 1070 .free_irq = free_irte,
1065 .compose_msi_msg = intel_compose_msi_msg, 1071 .compose_msi_msg = intel_compose_msi_msg,
1066 .msi_alloc_irq = intel_msi_alloc_irq, 1072 .msi_alloc_irq = intel_msi_alloc_irq,
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 40cda8e98d87..1d29b1c66e72 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -111,16 +111,15 @@ int setup_ioapic_remapped_entry(int irq,
111 vector, attr); 111 vector, attr);
112} 112}
113 113
114#ifdef CONFIG_SMP
115int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask, 114int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
116 bool force) 115 bool force)
117{ 116{
118 if (!remap_ops || !remap_ops->set_affinity) 117 if (!config_enabled(CONFIG_SMP) || !remap_ops ||
118 !remap_ops->set_affinity)
119 return 0; 119 return 0;
120 120
121 return remap_ops->set_affinity(data, mask, force); 121 return remap_ops->set_affinity(data, mask, force);
122} 122}
123#endif
124 123
125void free_remapped_irq(int irq) 124void free_remapped_irq(int irq)
126{ 125{
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index be9d72950c51..b12974cc1dfe 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -59,11 +59,9 @@ struct irq_remap_ops {
59 unsigned int, int, 59 unsigned int, int,
60 struct io_apic_irq_attr *); 60 struct io_apic_irq_attr *);
61 61
62#ifdef CONFIG_SMP
63 /* Set the CPU affinity of a remapped interrupt */ 62 /* Set the CPU affinity of a remapped interrupt */
64 int (*set_affinity)(struct irq_data *data, const struct cpumask *mask, 63 int (*set_affinity)(struct irq_data *data, const struct cpumask *mask,
65 bool force); 64 bool force);
66#endif
67 65
68 /* Free an IRQ */ 66 /* Free an IRQ */
69 int (*free_irq)(int); 67 int (*free_irq)(int);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a5261e3d2e3c..553fb66da130 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -150,9 +150,7 @@ struct irq_data {
150 void *handler_data; 150 void *handler_data;
151 void *chip_data; 151 void *chip_data;
152 struct msi_desc *msi_desc; 152 struct msi_desc *msi_desc;
153#ifdef CONFIG_SMP
154 cpumask_var_t affinity; 153 cpumask_var_t affinity;
155#endif
156}; 154};
157 155
158/* 156/*