aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-06-18 05:09:49 -0400
committerIngo Molnar <mingo@kernel.org>2012-06-18 05:09:49 -0400
commit8461689c67fc2ad3040bc019cccb541bccb5648f (patch)
treeeafcf756975739ef04aa91be2e053d6e1e8b9ccb
parentd48daf37a3d2e2b28a61e615c0fc538301edb0dd (diff)
parent7eb9ae0799b1e9f0b77733b432bc5f6f055b020b (diff)
Merge branch 'x86/apic' into x86/platform
Merge in x86/apic to solve a vector_allocation_domain() API change semantic merge conflict. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/apic.h60
-rw-r--r--arch/x86/include/asm/x2apic.h18
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/kernel/apic/apic.c19
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c76
-rw-r--r--arch/x86/kernel/apic/apic_noop.c9
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c50
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c48
-rw-r--r--arch/x86/kernel/apic/es7000_32.c51
-rw-r--r--arch/x86/kernel/apic/io_apic.c288
-rw-r--r--arch/x86/kernel/apic/numaq_32.c30
-rw-r--r--arch/x86/kernel/apic/probe_32.c20
-rw-r--r--arch/x86/kernel/apic/summit_32.c46
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c66
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c39
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c45
-rw-r--r--arch/x86/kernel/early_printk.c12
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/x86_init.c2
-rw-r--r--arch/x86/platform/uv/uv_irq.c9
-rw-r--r--drivers/iommu/intel_irq_remapping.c20
-rw-r--r--drivers/iommu/irq_remapping.c5
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--include/linux/irq.h2
24 files changed, 359 insertions, 562 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index eaff4790ed96..eec240e12091 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -306,7 +306,7 @@ struct apic {
306 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); 306 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
307 unsigned long (*check_apicid_present)(int apicid); 307 unsigned long (*check_apicid_present)(int apicid);
308 308
309 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); 309 bool (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
310 void (*init_apic_ldr)(void); 310 void (*init_apic_ldr)(void);
311 311
312 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); 312 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
@@ -331,9 +331,9 @@ struct apic {
331 unsigned long (*set_apic_id)(unsigned int id); 331 unsigned long (*set_apic_id)(unsigned int id);
332 unsigned long apic_id_mask; 332 unsigned long apic_id_mask;
333 333
334 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); 334 int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
335 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, 335 const struct cpumask *andmask,
336 const struct cpumask *andmask); 336 unsigned int *apicid);
337 337
338 /* ipi */ 338 /* ipi */
339 void (*send_IPI_mask)(const struct cpumask *mask, int vector); 339 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
@@ -537,6 +537,11 @@ static inline const struct cpumask *default_target_cpus(void)
537#endif 537#endif
538} 538}
539 539
540static inline const struct cpumask *online_target_cpus(void)
541{
542 return cpu_online_mask;
543}
544
540DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 545DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
541 546
542 547
@@ -586,21 +591,50 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
586 591
587#endif 592#endif
588 593
589static inline unsigned int 594static inline int
590default_cpu_mask_to_apicid(const struct cpumask *cpumask) 595flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
596 const struct cpumask *andmask,
597 unsigned int *apicid)
591{ 598{
592 return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; 599 unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
600 cpumask_bits(andmask)[0] &
601 cpumask_bits(cpu_online_mask)[0] &
602 APIC_ALL_CPUS;
603
604 if (likely(cpu_mask)) {
605 *apicid = (unsigned int)cpu_mask;
606 return 0;
607 } else {
608 return -EINVAL;
609 }
593} 610}
594 611
595static inline unsigned int 612extern int
596default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 613default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
597 const struct cpumask *andmask) 614 const struct cpumask *andmask,
615 unsigned int *apicid);
616
617static inline bool
618flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
598{ 619{
599 unsigned long mask1 = cpumask_bits(cpumask)[0]; 620 /* Careful. Some cpus do not strictly honor the set of cpus
600 unsigned long mask2 = cpumask_bits(andmask)[0]; 621 * specified in the interrupt destination when using lowest
601 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; 622 * priority interrupt delivery mode.
623 *
624 * In particular there was a hyperthreading cpu observed to
625 * deliver interrupts to the wrong hyperthread when only one
626 * hyperthread was specified in the interrupt desitination.
627 */
628 cpumask_clear(retmask);
629 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
630 return false;
631}
602 632
603 return (unsigned int)(mask1 & mask2 & mask3); 633static inline bool
634default_vector_allocation_domain(int cpu, struct cpumask *retmask)
635{
636 cpumask_copy(retmask, cpumask_of(cpu));
637 return true;
604} 638}
605 639
606static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) 640static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h
index 92e54abf89e0..f90f0a587c66 100644
--- a/arch/x86/include/asm/x2apic.h
+++ b/arch/x86/include/asm/x2apic.h
@@ -9,15 +9,6 @@
9#include <asm/ipi.h> 9#include <asm/ipi.h>
10#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11 11
12/*
13 * Need to use more than cpu 0, because we need more vectors
14 * when MSI-X are used.
15 */
16static const struct cpumask *x2apic_target_cpus(void)
17{
18 return cpu_online_mask;
19}
20
21static int x2apic_apic_id_valid(int apicid) 12static int x2apic_apic_id_valid(int apicid)
22{ 13{
23 return 1; 14 return 1;
@@ -28,15 +19,6 @@ static int x2apic_apic_id_registered(void)
28 return 1; 19 return 1;
29} 20}
30 21
31/*
32 * For now each logical cpu is in its own vector allocation domain.
33 */
34static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
35{
36 cpumask_clear(retmask);
37 cpumask_set_cpu(cpu, retmask);
38}
39
40static void 22static void
41__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) 23__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
42{ 24{
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index c377d9ccb696..38155f667144 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -156,7 +156,6 @@ struct x86_cpuinit_ops {
156/** 156/**
157 * struct x86_platform_ops - platform specific runtime functions 157 * struct x86_platform_ops - platform specific runtime functions
158 * @calibrate_tsc: calibrate TSC 158 * @calibrate_tsc: calibrate TSC
159 * @wallclock_init: init the wallclock device
160 * @get_wallclock: get time from HW clock like RTC etc. 159 * @get_wallclock: get time from HW clock like RTC etc.
161 * @set_wallclock: set time back to HW clock 160 * @set_wallclock: set time back to HW clock
162 * @is_untracked_pat_range exclude from PAT logic 161 * @is_untracked_pat_range exclude from PAT logic
@@ -168,7 +167,6 @@ struct x86_cpuinit_ops {
168 */ 167 */
169struct x86_platform_ops { 168struct x86_platform_ops {
170 unsigned long (*calibrate_tsc)(void); 169 unsigned long (*calibrate_tsc)(void);
171 void (*wallclock_init)(void);
172 unsigned long (*get_wallclock)(void); 170 unsigned long (*get_wallclock)(void);
173 int (*set_wallclock)(unsigned long nowtime); 171 int (*set_wallclock)(unsigned long nowtime);
174 void (*iommu_shutdown)(void); 172 void (*iommu_shutdown)(void);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 39a222e094af..c421512ca5eb 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2123,6 +2123,25 @@ void default_init_apic_ldr(void)
2123 apic_write(APIC_LDR, val); 2123 apic_write(APIC_LDR, val);
2124} 2124}
2125 2125
2126int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
2127 const struct cpumask *andmask,
2128 unsigned int *apicid)
2129{
2130 unsigned int cpu;
2131
2132 for_each_cpu_and(cpu, cpumask, andmask) {
2133 if (cpumask_test_cpu(cpu, cpu_online_mask))
2134 break;
2135 }
2136
2137 if (likely(cpu < nr_cpu_ids)) {
2138 *apicid = per_cpu(x86_cpu_to_apicid, cpu);
2139 return 0;
2140 }
2141
2142 return -EINVAL;
2143}
2144
2126/* 2145/*
2127 * Power management 2146 * Power management
2128 */ 2147 */
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 0e881c46e8c8..00c77cf78e9e 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -36,25 +36,6 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
36 return 1; 36 return 1;
37} 37}
38 38
39static const struct cpumask *flat_target_cpus(void)
40{
41 return cpu_online_mask;
42}
43
44static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
45{
46 /* Careful. Some cpus do not strictly honor the set of cpus
47 * specified in the interrupt destination when using lowest
48 * priority interrupt delivery mode.
49 *
50 * In particular there was a hyperthreading cpu observed to
51 * deliver interrupts to the wrong hyperthread when only one
52 * hyperthread was specified in the interrupt desitination.
53 */
54 cpumask_clear(retmask);
55 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
56}
57
58/* 39/*
59 * Set up the logical destination ID. 40 * Set up the logical destination ID.
60 * 41 *
@@ -92,7 +73,7 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
92} 73}
93 74
94static void 75static void
95 flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) 76flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
96{ 77{
97 unsigned long mask = cpumask_bits(cpumask)[0]; 78 unsigned long mask = cpumask_bits(cpumask)[0];
98 int cpu = smp_processor_id(); 79 int cpu = smp_processor_id();
@@ -186,7 +167,7 @@ static struct apic apic_flat = {
186 .irq_delivery_mode = dest_LowestPrio, 167 .irq_delivery_mode = dest_LowestPrio,
187 .irq_dest_mode = 1, /* logical */ 168 .irq_dest_mode = 1, /* logical */
188 169
189 .target_cpus = flat_target_cpus, 170 .target_cpus = online_target_cpus,
190 .disable_esr = 0, 171 .disable_esr = 0,
191 .dest_logical = APIC_DEST_LOGICAL, 172 .dest_logical = APIC_DEST_LOGICAL,
192 .check_apicid_used = NULL, 173 .check_apicid_used = NULL,
@@ -210,8 +191,7 @@ static struct apic apic_flat = {
210 .set_apic_id = set_apic_id, 191 .set_apic_id = set_apic_id,
211 .apic_id_mask = 0xFFu << 24, 192 .apic_id_mask = 0xFFu << 24,
212 193
213 .cpu_mask_to_apicid = default_cpu_mask_to_apicid, 194 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
214 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
215 195
216 .send_IPI_mask = flat_send_IPI_mask, 196 .send_IPI_mask = flat_send_IPI_mask,
217 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, 197 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
@@ -262,17 +242,6 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
262 return 0; 242 return 0;
263} 243}
264 244
265static const struct cpumask *physflat_target_cpus(void)
266{
267 return cpu_online_mask;
268}
269
270static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
271{
272 cpumask_clear(retmask);
273 cpumask_set_cpu(cpu, retmask);
274}
275
276static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) 245static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
277{ 246{
278 default_send_IPI_mask_sequence_phys(cpumask, vector); 247 default_send_IPI_mask_sequence_phys(cpumask, vector);
@@ -294,38 +263,6 @@ static void physflat_send_IPI_all(int vector)
294 physflat_send_IPI_mask(cpu_online_mask, vector); 263 physflat_send_IPI_mask(cpu_online_mask, vector);
295} 264}
296 265
297static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
298{
299 int cpu;
300
301 /*
302 * We're using fixed IRQ delivery, can only return one phys APIC ID.
303 * May as well be the first.
304 */
305 cpu = cpumask_first(cpumask);
306 if ((unsigned)cpu < nr_cpu_ids)
307 return per_cpu(x86_cpu_to_apicid, cpu);
308 else
309 return BAD_APICID;
310}
311
312static unsigned int
313physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
314 const struct cpumask *andmask)
315{
316 int cpu;
317
318 /*
319 * We're using fixed IRQ delivery, can only return one phys APIC ID.
320 * May as well be the first.
321 */
322 for_each_cpu_and(cpu, cpumask, andmask) {
323 if (cpumask_test_cpu(cpu, cpu_online_mask))
324 break;
325 }
326 return per_cpu(x86_cpu_to_apicid, cpu);
327}
328
329static int physflat_probe(void) 266static int physflat_probe(void)
330{ 267{
331 if (apic == &apic_physflat || num_possible_cpus() > 8) 268 if (apic == &apic_physflat || num_possible_cpus() > 8)
@@ -345,13 +282,13 @@ static struct apic apic_physflat = {
345 .irq_delivery_mode = dest_Fixed, 282 .irq_delivery_mode = dest_Fixed,
346 .irq_dest_mode = 0, /* physical */ 283 .irq_dest_mode = 0, /* physical */
347 284
348 .target_cpus = physflat_target_cpus, 285 .target_cpus = online_target_cpus,
349 .disable_esr = 0, 286 .disable_esr = 0,
350 .dest_logical = 0, 287 .dest_logical = 0,
351 .check_apicid_used = NULL, 288 .check_apicid_used = NULL,
352 .check_apicid_present = NULL, 289 .check_apicid_present = NULL,
353 290
354 .vector_allocation_domain = physflat_vector_allocation_domain, 291 .vector_allocation_domain = default_vector_allocation_domain,
355 /* not needed, but shouldn't hurt: */ 292 /* not needed, but shouldn't hurt: */
356 .init_apic_ldr = flat_init_apic_ldr, 293 .init_apic_ldr = flat_init_apic_ldr,
357 294
@@ -370,8 +307,7 @@ static struct apic apic_physflat = {
370 .set_apic_id = set_apic_id, 307 .set_apic_id = set_apic_id,
371 .apic_id_mask = 0xFFu << 24, 308 .apic_id_mask = 0xFFu << 24,
372 309
373 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, 310 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
374 .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
375 311
376 .send_IPI_mask = physflat_send_IPI_mask, 312 .send_IPI_mask = physflat_send_IPI_mask,
377 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, 313 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index a6e4c6e06c08..65c07fc630a1 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -100,12 +100,12 @@ static unsigned long noop_check_apicid_present(int bit)
100 return physid_isset(bit, phys_cpu_present_map); 100 return physid_isset(bit, phys_cpu_present_map);
101} 101}
102 102
103static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) 103static bool noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
104{ 104{
105 if (cpu != 0) 105 if (cpu != 0)
106 pr_warning("APIC: Vector allocated for non-BSP cpu\n"); 106 pr_warning("APIC: Vector allocated for non-BSP cpu\n");
107 cpumask_clear(retmask); 107 cpumask_copy(retmask, cpumask_of(cpu));
108 cpumask_set_cpu(cpu, retmask); 108 return true;
109} 109}
110 110
111static u32 noop_apic_read(u32 reg) 111static u32 noop_apic_read(u32 reg)
@@ -159,8 +159,7 @@ struct apic apic_noop = {
159 .set_apic_id = NULL, 159 .set_apic_id = NULL,
160 .apic_id_mask = 0x0F << 24, 160 .apic_id_mask = 0x0F << 24,
161 161
162 .cpu_mask_to_apicid = default_cpu_mask_to_apicid, 162 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
163 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
164 163
165 .send_IPI_mask = noop_send_IPI_mask, 164 .send_IPI_mask = noop_send_IPI_mask,
166 .send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself, 165 .send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself,
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 6ec6d5d297c3..bc552cff2578 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -72,17 +72,6 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
72 return initial_apic_id >> index_msb; 72 return initial_apic_id >> index_msb;
73} 73}
74 74
75static const struct cpumask *numachip_target_cpus(void)
76{
77 return cpu_online_mask;
78}
79
80static void numachip_vector_allocation_domain(int cpu, struct cpumask *retmask)
81{
82 cpumask_clear(retmask);
83 cpumask_set_cpu(cpu, retmask);
84}
85
86static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) 75static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
87{ 76{
88 union numachip_csr_g3_ext_irq_gen int_gen; 77 union numachip_csr_g3_ext_irq_gen int_gen;
@@ -157,38 +146,6 @@ static void numachip_send_IPI_self(int vector)
157 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); 146 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
158} 147}
159 148
160static unsigned int numachip_cpu_mask_to_apicid(const struct cpumask *cpumask)
161{
162 int cpu;
163
164 /*
165 * We're using fixed IRQ delivery, can only return one phys APIC ID.
166 * May as well be the first.
167 */
168 cpu = cpumask_first(cpumask);
169 if (likely((unsigned)cpu < nr_cpu_ids))
170 return per_cpu(x86_cpu_to_apicid, cpu);
171
172 return BAD_APICID;
173}
174
175static unsigned int
176numachip_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
177 const struct cpumask *andmask)
178{
179 int cpu;
180
181 /*
182 * We're using fixed IRQ delivery, can only return one phys APIC ID.
183 * May as well be the first.
184 */
185 for_each_cpu_and(cpu, cpumask, andmask) {
186 if (cpumask_test_cpu(cpu, cpu_online_mask))
187 break;
188 }
189 return per_cpu(x86_cpu_to_apicid, cpu);
190}
191
192static int __init numachip_probe(void) 149static int __init numachip_probe(void)
193{ 150{
194 return apic == &apic_numachip; 151 return apic == &apic_numachip;
@@ -253,13 +210,13 @@ static struct apic apic_numachip __refconst = {
253 .irq_delivery_mode = dest_Fixed, 210 .irq_delivery_mode = dest_Fixed,
254 .irq_dest_mode = 0, /* physical */ 211 .irq_dest_mode = 0, /* physical */
255 212
256 .target_cpus = numachip_target_cpus, 213 .target_cpus = online_target_cpus,
257 .disable_esr = 0, 214 .disable_esr = 0,
258 .dest_logical = 0, 215 .dest_logical = 0,
259 .check_apicid_used = NULL, 216 .check_apicid_used = NULL,
260 .check_apicid_present = NULL, 217 .check_apicid_present = NULL,
261 218
262 .vector_allocation_domain = numachip_vector_allocation_domain, 219 .vector_allocation_domain = default_vector_allocation_domain,
263 .init_apic_ldr = flat_init_apic_ldr, 220 .init_apic_ldr = flat_init_apic_ldr,
264 221
265 .ioapic_phys_id_map = NULL, 222 .ioapic_phys_id_map = NULL,
@@ -277,8 +234,7 @@ static struct apic apic_numachip __refconst = {
277 .set_apic_id = set_apic_id, 234 .set_apic_id = set_apic_id,
278 .apic_id_mask = 0xffU << 24, 235 .apic_id_mask = 0xffU << 24,
279 236
280 .cpu_mask_to_apicid = numachip_cpu_mask_to_apicid, 237 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
281 .cpu_mask_to_apicid_and = numachip_cpu_mask_to_apicid_and,
282 238
283 .send_IPI_mask = numachip_send_IPI_mask, 239 .send_IPI_mask = numachip_send_IPI_mask,
284 .send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself, 240 .send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself,
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 31fbdbfbf960..d50e3640d5ae 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -26,15 +26,6 @@ static int bigsmp_apic_id_registered(void)
26 return 1; 26 return 1;
27} 27}
28 28
29static const struct cpumask *bigsmp_target_cpus(void)
30{
31#ifdef CONFIG_SMP
32 return cpu_online_mask;
33#else
34 return cpumask_of(0);
35#endif
36}
37
38static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid) 29static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
39{ 30{
40 return 0; 31 return 0;
@@ -105,32 +96,6 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid)
105 return 1; 96 return 1;
106} 97}
107 98
108/* As we are using single CPU as destination, pick only one CPU here */
109static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
110{
111 int cpu = cpumask_first(cpumask);
112
113 if (cpu < nr_cpu_ids)
114 return cpu_physical_id(cpu);
115 return BAD_APICID;
116}
117
118static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
119 const struct cpumask *andmask)
120{
121 int cpu;
122
123 /*
124 * We're using fixed IRQ delivery, can only return one phys APIC ID.
125 * May as well be the first.
126 */
127 for_each_cpu_and(cpu, cpumask, andmask) {
128 if (cpumask_test_cpu(cpu, cpu_online_mask))
129 return cpu_physical_id(cpu);
130 }
131 return BAD_APICID;
132}
133
134static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) 99static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
135{ 100{
136 return cpuid_apic >> index_msb; 101 return cpuid_apic >> index_msb;
@@ -177,12 +142,6 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
177 { } /* NULL entry stops DMI scanning */ 142 { } /* NULL entry stops DMI scanning */
178}; 143};
179 144
180static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
181{
182 cpumask_clear(retmask);
183 cpumask_set_cpu(cpu, retmask);
184}
185
186static int probe_bigsmp(void) 145static int probe_bigsmp(void)
187{ 146{
188 if (def_to_bigsmp) 147 if (def_to_bigsmp)
@@ -205,13 +164,13 @@ static struct apic apic_bigsmp = {
205 /* phys delivery to target CPU: */ 164 /* phys delivery to target CPU: */
206 .irq_dest_mode = 0, 165 .irq_dest_mode = 0,
207 166
208 .target_cpus = bigsmp_target_cpus, 167 .target_cpus = default_target_cpus,
209 .disable_esr = 1, 168 .disable_esr = 1,
210 .dest_logical = 0, 169 .dest_logical = 0,
211 .check_apicid_used = bigsmp_check_apicid_used, 170 .check_apicid_used = bigsmp_check_apicid_used,
212 .check_apicid_present = bigsmp_check_apicid_present, 171 .check_apicid_present = bigsmp_check_apicid_present,
213 172
214 .vector_allocation_domain = bigsmp_vector_allocation_domain, 173 .vector_allocation_domain = default_vector_allocation_domain,
215 .init_apic_ldr = bigsmp_init_apic_ldr, 174 .init_apic_ldr = bigsmp_init_apic_ldr,
216 175
217 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, 176 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
@@ -229,8 +188,7 @@ static struct apic apic_bigsmp = {
229 .set_apic_id = NULL, 188 .set_apic_id = NULL,
230 .apic_id_mask = 0xFF << 24, 189 .apic_id_mask = 0xFF << 24,
231 190
232 .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, 191 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
233 .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
234 192
235 .send_IPI_mask = bigsmp_send_IPI_mask, 193 .send_IPI_mask = bigsmp_send_IPI_mask,
236 .send_IPI_mask_allbutself = NULL, 194 .send_IPI_mask_allbutself = NULL,
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index db4ab1be3c79..0874799a98c6 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -394,21 +394,6 @@ static void es7000_enable_apic_mode(void)
394 WARN(1, "Command failed, status = %x\n", mip_status); 394 WARN(1, "Command failed, status = %x\n", mip_status);
395} 395}
396 396
397static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask)
398{
399 /* Careful. Some cpus do not strictly honor the set of cpus
400 * specified in the interrupt destination when using lowest
401 * priority interrupt delivery mode.
402 *
403 * In particular there was a hyperthreading cpu observed to
404 * deliver interrupts to the wrong hyperthread when only one
405 * hyperthread was specified in the interrupt desitination.
406 */
407 cpumask_clear(retmask);
408 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
409}
410
411
412static void es7000_wait_for_init_deassert(atomic_t *deassert) 397static void es7000_wait_for_init_deassert(atomic_t *deassert)
413{ 398{
414 while (!atomic_read(deassert)) 399 while (!atomic_read(deassert))
@@ -540,45 +525,49 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
540 return 1; 525 return 1;
541} 526}
542 527
543static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) 528static inline int
529es7000_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
544{ 530{
545 unsigned int round = 0; 531 unsigned int round = 0;
546 int cpu, uninitialized_var(apicid); 532 unsigned int cpu, uninitialized_var(apicid);
547 533
548 /* 534 /*
549 * The cpus in the mask must all be on the apic cluster. 535 * The cpus in the mask must all be on the apic cluster.
550 */ 536 */
551 for_each_cpu(cpu, cpumask) { 537 for_each_cpu_and(cpu, cpumask, cpu_online_mask) {
552 int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); 538 int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
553 539
554 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { 540 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
555 WARN(1, "Not a valid mask!"); 541 WARN(1, "Not a valid mask!");
556 542
557 return BAD_APICID; 543 return -EINVAL;
558 } 544 }
559 apicid = new_apicid; 545 apicid |= new_apicid;
560 round++; 546 round++;
561 } 547 }
562 return apicid; 548 if (!round)
549 return -EINVAL;
550 *dest_id = apicid;
551 return 0;
563} 552}
564 553
565static unsigned int 554static int
566es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, 555es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
567 const struct cpumask *andmask) 556 const struct cpumask *andmask,
557 unsigned int *apicid)
568{ 558{
569 int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
570 cpumask_var_t cpumask; 559 cpumask_var_t cpumask;
560 *apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
571 561
572 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 562 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
573 return apicid; 563 return 0;
574 564
575 cpumask_and(cpumask, inmask, andmask); 565 cpumask_and(cpumask, inmask, andmask);
576 cpumask_and(cpumask, cpumask, cpu_online_mask); 566 es7000_cpu_mask_to_apicid(cpumask, apicid);
577 apicid = es7000_cpu_mask_to_apicid(cpumask);
578 567
579 free_cpumask_var(cpumask); 568 free_cpumask_var(cpumask);
580 569
581 return apicid; 570 return 0;
582} 571}
583 572
584static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) 573static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
@@ -638,7 +627,7 @@ static struct apic __refdata apic_es7000_cluster = {
638 .check_apicid_used = es7000_check_apicid_used, 627 .check_apicid_used = es7000_check_apicid_used,
639 .check_apicid_present = es7000_check_apicid_present, 628 .check_apicid_present = es7000_check_apicid_present,
640 629
641 .vector_allocation_domain = es7000_vector_allocation_domain, 630 .vector_allocation_domain = flat_vector_allocation_domain,
642 .init_apic_ldr = es7000_init_apic_ldr_cluster, 631 .init_apic_ldr = es7000_init_apic_ldr_cluster,
643 632
644 .ioapic_phys_id_map = es7000_ioapic_phys_id_map, 633 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
@@ -656,7 +645,6 @@ static struct apic __refdata apic_es7000_cluster = {
656 .set_apic_id = NULL, 645 .set_apic_id = NULL,
657 .apic_id_mask = 0xFF << 24, 646 .apic_id_mask = 0xFF << 24,
658 647
659 .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
660 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, 648 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
661 649
662 .send_IPI_mask = es7000_send_IPI_mask, 650 .send_IPI_mask = es7000_send_IPI_mask,
@@ -705,7 +693,7 @@ static struct apic __refdata apic_es7000 = {
705 .check_apicid_used = es7000_check_apicid_used, 693 .check_apicid_used = es7000_check_apicid_used,
706 .check_apicid_present = es7000_check_apicid_present, 694 .check_apicid_present = es7000_check_apicid_present,
707 695
708 .vector_allocation_domain = es7000_vector_allocation_domain, 696 .vector_allocation_domain = flat_vector_allocation_domain,
709 .init_apic_ldr = es7000_init_apic_ldr, 697 .init_apic_ldr = es7000_init_apic_ldr,
710 698
711 .ioapic_phys_id_map = es7000_ioapic_phys_id_map, 699 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
@@ -723,7 +711,6 @@ static struct apic __refdata apic_es7000 = {
723 .set_apic_id = NULL, 711 .set_apic_id = NULL,
724 .apic_id_mask = 0xFF << 24, 712 .apic_id_mask = 0xFF << 24,
725 713
726 .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
727 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, 714 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
728 715
729 .send_IPI_mask = es7000_send_IPI_mask, 716 .send_IPI_mask = es7000_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ac96561d1a99..a951ef7decb1 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1112,7 +1112,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1112 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1112 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1113 */ 1113 */
1114 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 1114 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1115 static int current_offset = VECTOR_OFFSET_START % 8; 1115 static int current_offset = VECTOR_OFFSET_START % 16;
1116 unsigned int old_vector; 1116 unsigned int old_vector;
1117 int cpu, err; 1117 int cpu, err;
1118 cpumask_var_t tmp_mask; 1118 cpumask_var_t tmp_mask;
@@ -1126,8 +1126,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1126 old_vector = cfg->vector; 1126 old_vector = cfg->vector;
1127 if (old_vector) { 1127 if (old_vector) {
1128 cpumask_and(tmp_mask, mask, cpu_online_mask); 1128 cpumask_and(tmp_mask, mask, cpu_online_mask);
1129 cpumask_and(tmp_mask, cfg->domain, tmp_mask); 1129 if (cpumask_subset(tmp_mask, cfg->domain)) {
1130 if (!cpumask_empty(tmp_mask)) {
1131 free_cpumask_var(tmp_mask); 1130 free_cpumask_var(tmp_mask);
1132 return 0; 1131 return 0;
1133 } 1132 }
@@ -1138,20 +1137,30 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1138 for_each_cpu_and(cpu, mask, cpu_online_mask) { 1137 for_each_cpu_and(cpu, mask, cpu_online_mask) {
1139 int new_cpu; 1138 int new_cpu;
1140 int vector, offset; 1139 int vector, offset;
1140 bool more_domains;
1141 1141
1142 apic->vector_allocation_domain(cpu, tmp_mask); 1142 more_domains = apic->vector_allocation_domain(cpu, tmp_mask);
1143
1144 if (cpumask_subset(tmp_mask, cfg->domain)) {
1145 free_cpumask_var(tmp_mask);
1146 return 0;
1147 }
1143 1148
1144 vector = current_vector; 1149 vector = current_vector;
1145 offset = current_offset; 1150 offset = current_offset;
1146next: 1151next:
1147 vector += 8; 1152 vector += 16;
1148 if (vector >= first_system_vector) { 1153 if (vector >= first_system_vector) {
1149 /* If out of vectors on large boxen, must share them. */ 1154 offset = (offset + 1) % 16;
1150 offset = (offset + 1) % 8;
1151 vector = FIRST_EXTERNAL_VECTOR + offset; 1155 vector = FIRST_EXTERNAL_VECTOR + offset;
1152 } 1156 }
1153 if (unlikely(current_vector == vector)) 1157
1154 continue; 1158 if (unlikely(current_vector == vector)) {
1159 if (more_domains)
1160 continue;
1161 else
1162 break;
1163 }
1155 1164
1156 if (test_bit(vector, used_vectors)) 1165 if (test_bit(vector, used_vectors))
1157 goto next; 1166 goto next;
@@ -1346,18 +1355,18 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1346 1355
1347 if (!IO_APIC_IRQ(irq)) 1356 if (!IO_APIC_IRQ(irq))
1348 return; 1357 return;
1349 /*
1350 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
1351 * controllers like 8259. Now that IO-APIC can handle this irq, update
1352 * the cfg->domain.
1353 */
1354 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1355 apic->vector_allocation_domain(0, cfg->domain);
1356 1358
1357 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1359 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1358 return; 1360 return;
1359 1361
1360 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 1362 if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(),
1363 &dest)) {
1364 pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
1365 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1366 __clear_irq_vector(irq, cfg);
1367
1368 return;
1369 }
1361 1370
1362 apic_printk(APIC_VERBOSE,KERN_DEBUG 1371 apic_printk(APIC_VERBOSE,KERN_DEBUG
1363 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1372 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
@@ -1366,7 +1375,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1366 cfg->vector, irq, attr->trigger, attr->polarity, dest); 1375 cfg->vector, irq, attr->trigger, attr->polarity, dest);
1367 1376
1368 if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) { 1377 if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) {
1369 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1378 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1370 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); 1379 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1371 __clear_irq_vector(irq, cfg); 1380 __clear_irq_vector(irq, cfg);
1372 1381
@@ -1469,9 +1478,10 @@ void setup_IO_APIC_irq_extra(u32 gsi)
1469 * Set up the timer pin, possibly with the 8259A-master behind. 1478 * Set up the timer pin, possibly with the 8259A-master behind.
1470 */ 1479 */
1471static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx, 1480static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1472 unsigned int pin, int vector) 1481 unsigned int pin, int vector)
1473{ 1482{
1474 struct IO_APIC_route_entry entry; 1483 struct IO_APIC_route_entry entry;
1484 unsigned int dest;
1475 1485
1476 if (irq_remapping_enabled) 1486 if (irq_remapping_enabled)
1477 return; 1487 return;
@@ -1482,9 +1492,13 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1482 * We use logical delivery to get the timer IRQ 1492 * We use logical delivery to get the timer IRQ
1483 * to the first CPU. 1493 * to the first CPU.
1484 */ 1494 */
1495 if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(),
1496 apic->target_cpus(), &dest)))
1497 dest = BAD_APICID;
1498
1485 entry.dest_mode = apic->irq_dest_mode; 1499 entry.dest_mode = apic->irq_dest_mode;
1486 entry.mask = 0; /* don't mask IRQ for edge */ 1500 entry.mask = 0; /* don't mask IRQ for edge */
1487 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); 1501 entry.dest = dest;
1488 entry.delivery_mode = apic->irq_delivery_mode; 1502 entry.delivery_mode = apic->irq_delivery_mode;
1489 entry.polarity = 0; 1503 entry.polarity = 0;
1490 entry.trigger = 0; 1504 entry.trigger = 0;
@@ -2210,71 +2224,6 @@ void send_cleanup_vector(struct irq_cfg *cfg)
2210 cfg->move_in_progress = 0; 2224 cfg->move_in_progress = 0;
2211} 2225}
2212 2226
2213static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2214{
2215 int apic, pin;
2216 struct irq_pin_list *entry;
2217 u8 vector = cfg->vector;
2218
2219 for_each_irq_pin(entry, cfg->irq_2_pin) {
2220 unsigned int reg;
2221
2222 apic = entry->apic;
2223 pin = entry->pin;
2224 /*
2225 * With interrupt-remapping, destination information comes
2226 * from interrupt-remapping table entry.
2227 */
2228 if (!irq_remapped(cfg))
2229 io_apic_write(apic, 0x11 + pin*2, dest);
2230 reg = io_apic_read(apic, 0x10 + pin*2);
2231 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2232 reg |= vector;
2233 io_apic_modify(apic, 0x10 + pin*2, reg);
2234 }
2235}
2236
2237/*
2238 * Either sets data->affinity to a valid value, and returns
2239 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2240 * leaves data->affinity untouched.
2241 */
2242int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2243 unsigned int *dest_id)
2244{
2245 struct irq_cfg *cfg = data->chip_data;
2246
2247 if (!cpumask_intersects(mask, cpu_online_mask))
2248 return -1;
2249
2250 if (assign_irq_vector(data->irq, data->chip_data, mask))
2251 return -1;
2252
2253 cpumask_copy(data->affinity, mask);
2254
2255 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain);
2256 return 0;
2257}
2258
2259static int
2260ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2261 bool force)
2262{
2263 unsigned int dest, irq = data->irq;
2264 unsigned long flags;
2265 int ret;
2266
2267 raw_spin_lock_irqsave(&ioapic_lock, flags);
2268 ret = __ioapic_set_affinity(data, mask, &dest);
2269 if (!ret) {
2270 /* Only the high 8 bits are valid. */
2271 dest = SET_APIC_LOGICAL_ID(dest);
2272 __target_IO_APIC_irq(irq, dest, data->chip_data);
2273 }
2274 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2275 return ret;
2276}
2277
2278asmlinkage void smp_irq_move_cleanup_interrupt(void) 2227asmlinkage void smp_irq_move_cleanup_interrupt(void)
2279{ 2228{
2280 unsigned vector, me; 2229 unsigned vector, me;
@@ -2362,6 +2311,87 @@ void irq_force_complete_move(int irq)
2362static inline void irq_complete_move(struct irq_cfg *cfg) { } 2311static inline void irq_complete_move(struct irq_cfg *cfg) { }
2363#endif 2312#endif
2364 2313
2314static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2315{
2316 int apic, pin;
2317 struct irq_pin_list *entry;
2318 u8 vector = cfg->vector;
2319
2320 for_each_irq_pin(entry, cfg->irq_2_pin) {
2321 unsigned int reg;
2322
2323 apic = entry->apic;
2324 pin = entry->pin;
2325 /*
2326 * With interrupt-remapping, destination information comes
2327 * from interrupt-remapping table entry.
2328 */
2329 if (!irq_remapped(cfg))
2330 io_apic_write(apic, 0x11 + pin*2, dest);
2331 reg = io_apic_read(apic, 0x10 + pin*2);
2332 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2333 reg |= vector;
2334 io_apic_modify(apic, 0x10 + pin*2, reg);
2335 }
2336}
2337
2338/*
2339 * Either sets data->affinity to a valid value, and returns
2340 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2341 * leaves data->affinity untouched.
2342 */
2343int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2344 unsigned int *dest_id)
2345{
2346 struct irq_cfg *cfg = data->chip_data;
2347 unsigned int irq = data->irq;
2348 int err;
2349
2350 if (!config_enabled(CONFIG_SMP))
2351 return -1;
2352
2353 if (!cpumask_intersects(mask, cpu_online_mask))
2354 return -EINVAL;
2355
2356 err = assign_irq_vector(irq, cfg, mask);
2357 if (err)
2358 return err;
2359
2360 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
2361 if (err) {
2362 if (assign_irq_vector(irq, cfg, data->affinity))
2363 pr_err("Failed to recover vector for irq %d\n", irq);
2364 return err;
2365 }
2366
2367 cpumask_copy(data->affinity, mask);
2368
2369 return 0;
2370}
2371
2372static int
2373ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2374 bool force)
2375{
2376 unsigned int dest, irq = data->irq;
2377 unsigned long flags;
2378 int ret;
2379
2380 if (!config_enabled(CONFIG_SMP))
2381 return -1;
2382
2383 raw_spin_lock_irqsave(&ioapic_lock, flags);
2384 ret = __ioapic_set_affinity(data, mask, &dest);
2385 if (!ret) {
2386 /* Only the high 8 bits are valid. */
2387 dest = SET_APIC_LOGICAL_ID(dest);
2388 __target_IO_APIC_irq(irq, dest, data->chip_data);
2389 ret = IRQ_SET_MASK_OK_NOCOPY;
2390 }
2391 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2392 return ret;
2393}
2394
2365static void ack_apic_edge(struct irq_data *data) 2395static void ack_apic_edge(struct irq_data *data)
2366{ 2396{
2367 irq_complete_move(data->chip_data); 2397 irq_complete_move(data->chip_data);
@@ -2541,9 +2571,7 @@ static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
2541 chip->irq_ack = ir_ack_apic_edge; 2571 chip->irq_ack = ir_ack_apic_edge;
2542 chip->irq_eoi = ir_ack_apic_level; 2572 chip->irq_eoi = ir_ack_apic_level;
2543 2573
2544#ifdef CONFIG_SMP
2545 chip->irq_set_affinity = set_remapped_irq_affinity; 2574 chip->irq_set_affinity = set_remapped_irq_affinity;
2546#endif
2547} 2575}
2548#endif /* CONFIG_IRQ_REMAP */ 2576#endif /* CONFIG_IRQ_REMAP */
2549 2577
@@ -2554,9 +2582,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
2554 .irq_unmask = unmask_ioapic_irq, 2582 .irq_unmask = unmask_ioapic_irq,
2555 .irq_ack = ack_apic_edge, 2583 .irq_ack = ack_apic_edge,
2556 .irq_eoi = ack_apic_level, 2584 .irq_eoi = ack_apic_level,
2557#ifdef CONFIG_SMP
2558 .irq_set_affinity = ioapic_set_affinity, 2585 .irq_set_affinity = ioapic_set_affinity,
2559#endif
2560 .irq_retrigger = ioapic_retrigger_irq, 2586 .irq_retrigger = ioapic_retrigger_irq,
2561}; 2587};
2562 2588
@@ -3038,7 +3064,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3038 if (err) 3064 if (err)
3039 return err; 3065 return err;
3040 3066
3041 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3067 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3068 apic->target_cpus(), &dest);
3069 if (err)
3070 return err;
3042 3071
3043 if (irq_remapped(cfg)) { 3072 if (irq_remapped(cfg)) {
3044 compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id); 3073 compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
@@ -3072,7 +3101,6 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3072 return err; 3101 return err;
3073} 3102}
3074 3103
3075#ifdef CONFIG_SMP
3076static int 3104static int
3077msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3105msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3078{ 3106{
@@ -3092,9 +3120,8 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3092 3120
3093 __write_msi_msg(data->msi_desc, &msg); 3121 __write_msi_msg(data->msi_desc, &msg);
3094 3122
3095 return 0; 3123 return IRQ_SET_MASK_OK_NOCOPY;
3096} 3124}
3097#endif /* CONFIG_SMP */
3098 3125
3099/* 3126/*
3100 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, 3127 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
@@ -3105,9 +3132,7 @@ static struct irq_chip msi_chip = {
3105 .irq_unmask = unmask_msi_irq, 3132 .irq_unmask = unmask_msi_irq,
3106 .irq_mask = mask_msi_irq, 3133 .irq_mask = mask_msi_irq,
3107 .irq_ack = ack_apic_edge, 3134 .irq_ack = ack_apic_edge,
3108#ifdef CONFIG_SMP
3109 .irq_set_affinity = msi_set_affinity, 3135 .irq_set_affinity = msi_set_affinity,
3110#endif
3111 .irq_retrigger = ioapic_retrigger_irq, 3136 .irq_retrigger = ioapic_retrigger_irq,
3112}; 3137};
3113 3138
@@ -3192,7 +3217,6 @@ void native_teardown_msi_irq(unsigned int irq)
3192} 3217}
3193 3218
3194#ifdef CONFIG_DMAR_TABLE 3219#ifdef CONFIG_DMAR_TABLE
3195#ifdef CONFIG_SMP
3196static int 3220static int
3197dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3221dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3198 bool force) 3222 bool force)
@@ -3214,19 +3238,15 @@ dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3214 3238
3215 dmar_msi_write(irq, &msg); 3239 dmar_msi_write(irq, &msg);
3216 3240
3217 return 0; 3241 return IRQ_SET_MASK_OK_NOCOPY;
3218} 3242}
3219 3243
3220#endif /* CONFIG_SMP */
3221
3222static struct irq_chip dmar_msi_type = { 3244static struct irq_chip dmar_msi_type = {
3223 .name = "DMAR_MSI", 3245 .name = "DMAR_MSI",
3224 .irq_unmask = dmar_msi_unmask, 3246 .irq_unmask = dmar_msi_unmask,
3225 .irq_mask = dmar_msi_mask, 3247 .irq_mask = dmar_msi_mask,
3226 .irq_ack = ack_apic_edge, 3248 .irq_ack = ack_apic_edge,
3227#ifdef CONFIG_SMP
3228 .irq_set_affinity = dmar_msi_set_affinity, 3249 .irq_set_affinity = dmar_msi_set_affinity,
3229#endif
3230 .irq_retrigger = ioapic_retrigger_irq, 3250 .irq_retrigger = ioapic_retrigger_irq,
3231}; 3251};
3232 3252
@@ -3247,7 +3267,6 @@ int arch_setup_dmar_msi(unsigned int irq)
3247 3267
3248#ifdef CONFIG_HPET_TIMER 3268#ifdef CONFIG_HPET_TIMER
3249 3269
3250#ifdef CONFIG_SMP
3251static int hpet_msi_set_affinity(struct irq_data *data, 3270static int hpet_msi_set_affinity(struct irq_data *data,
3252 const struct cpumask *mask, bool force) 3271 const struct cpumask *mask, bool force)
3253{ 3272{
@@ -3267,19 +3286,15 @@ static int hpet_msi_set_affinity(struct irq_data *data,
3267 3286
3268 hpet_msi_write(data->handler_data, &msg); 3287 hpet_msi_write(data->handler_data, &msg);
3269 3288
3270 return 0; 3289 return IRQ_SET_MASK_OK_NOCOPY;
3271} 3290}
3272 3291
3273#endif /* CONFIG_SMP */
3274
3275static struct irq_chip hpet_msi_type = { 3292static struct irq_chip hpet_msi_type = {
3276 .name = "HPET_MSI", 3293 .name = "HPET_MSI",
3277 .irq_unmask = hpet_msi_unmask, 3294 .irq_unmask = hpet_msi_unmask,
3278 .irq_mask = hpet_msi_mask, 3295 .irq_mask = hpet_msi_mask,
3279 .irq_ack = ack_apic_edge, 3296 .irq_ack = ack_apic_edge,
3280#ifdef CONFIG_SMP
3281 .irq_set_affinity = hpet_msi_set_affinity, 3297 .irq_set_affinity = hpet_msi_set_affinity,
3282#endif
3283 .irq_retrigger = ioapic_retrigger_irq, 3298 .irq_retrigger = ioapic_retrigger_irq,
3284}; 3299};
3285 3300
@@ -3314,8 +3329,6 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
3314 */ 3329 */
3315#ifdef CONFIG_HT_IRQ 3330#ifdef CONFIG_HT_IRQ
3316 3331
3317#ifdef CONFIG_SMP
3318
3319static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) 3332static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3320{ 3333{
3321 struct ht_irq_msg msg; 3334 struct ht_irq_msg msg;
@@ -3340,25 +3353,23 @@ ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3340 return -1; 3353 return -1;
3341 3354
3342 target_ht_irq(data->irq, dest, cfg->vector); 3355 target_ht_irq(data->irq, dest, cfg->vector);
3343 return 0; 3356 return IRQ_SET_MASK_OK_NOCOPY;
3344} 3357}
3345 3358
3346#endif
3347
3348static struct irq_chip ht_irq_chip = { 3359static struct irq_chip ht_irq_chip = {
3349 .name = "PCI-HT", 3360 .name = "PCI-HT",
3350 .irq_mask = mask_ht_irq, 3361 .irq_mask = mask_ht_irq,
3351 .irq_unmask = unmask_ht_irq, 3362 .irq_unmask = unmask_ht_irq,
3352 .irq_ack = ack_apic_edge, 3363 .irq_ack = ack_apic_edge,
3353#ifdef CONFIG_SMP
3354 .irq_set_affinity = ht_set_affinity, 3364 .irq_set_affinity = ht_set_affinity,
3355#endif
3356 .irq_retrigger = ioapic_retrigger_irq, 3365 .irq_retrigger = ioapic_retrigger_irq,
3357}; 3366};
3358 3367
3359int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3368int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3360{ 3369{
3361 struct irq_cfg *cfg; 3370 struct irq_cfg *cfg;
3371 struct ht_irq_msg msg;
3372 unsigned dest;
3362 int err; 3373 int err;
3363 3374
3364 if (disable_apic) 3375 if (disable_apic)
@@ -3366,36 +3377,37 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3366 3377
3367 cfg = irq_cfg(irq); 3378 cfg = irq_cfg(irq);
3368 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3379 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3369 if (!err) { 3380 if (err)
3370 struct ht_irq_msg msg; 3381 return err;
3371 unsigned dest; 3382
3383 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3384 apic->target_cpus(), &dest);
3385 if (err)
3386 return err;
3372 3387
3373 dest = apic->cpu_mask_to_apicid_and(cfg->domain, 3388 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3374 apic->target_cpus());
3375 3389
3376 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3390 msg.address_lo =
3391 HT_IRQ_LOW_BASE |
3392 HT_IRQ_LOW_DEST_ID(dest) |
3393 HT_IRQ_LOW_VECTOR(cfg->vector) |
3394 ((apic->irq_dest_mode == 0) ?
3395 HT_IRQ_LOW_DM_PHYSICAL :
3396 HT_IRQ_LOW_DM_LOGICAL) |
3397 HT_IRQ_LOW_RQEOI_EDGE |
3398 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3399 HT_IRQ_LOW_MT_FIXED :
3400 HT_IRQ_LOW_MT_ARBITRATED) |
3401 HT_IRQ_LOW_IRQ_MASKED;
3377 3402
3378 msg.address_lo = 3403 write_ht_irq_msg(irq, &msg);
3379 HT_IRQ_LOW_BASE |
3380 HT_IRQ_LOW_DEST_ID(dest) |
3381 HT_IRQ_LOW_VECTOR(cfg->vector) |
3382 ((apic->irq_dest_mode == 0) ?
3383 HT_IRQ_LOW_DM_PHYSICAL :
3384 HT_IRQ_LOW_DM_LOGICAL) |
3385 HT_IRQ_LOW_RQEOI_EDGE |
3386 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3387 HT_IRQ_LOW_MT_FIXED :
3388 HT_IRQ_LOW_MT_ARBITRATED) |
3389 HT_IRQ_LOW_IRQ_MASKED;
3390 3404
3391 write_ht_irq_msg(irq, &msg); 3405 irq_set_chip_and_handler_name(irq, &ht_irq_chip,
3406 handle_edge_irq, "edge");
3392 3407
3393 irq_set_chip_and_handler_name(irq, &ht_irq_chip, 3408 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3394 handle_edge_irq, "edge");
3395 3409
3396 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); 3410 return 0;
3397 }
3398 return err;
3399} 3411}
3400#endif /* CONFIG_HT_IRQ */ 3412#endif /* CONFIG_HT_IRQ */
3401 3413
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index f00a68cca37a..d661ee95cabf 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -406,16 +406,13 @@ static inline int numaq_check_phys_apicid_present(int phys_apicid)
406 * We use physical apicids here, not logical, so just return the default 406 * We use physical apicids here, not logical, so just return the default
407 * physical broadcast to stop people from breaking us 407 * physical broadcast to stop people from breaking us
408 */ 408 */
409static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask) 409static int
410{
411 return 0x0F;
412}
413
414static inline unsigned int
415numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 410numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
416 const struct cpumask *andmask) 411 const struct cpumask *andmask,
412 unsigned int *apicid)
417{ 413{
418 return 0x0F; 414 *apicid = 0x0F;
415 return 0;
419} 416}
420 417
421/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ 418/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
@@ -441,20 +438,6 @@ static int probe_numaq(void)
441 return found_numaq; 438 return found_numaq;
442} 439}
443 440
444static void numaq_vector_allocation_domain(int cpu, struct cpumask *retmask)
445{
446 /* Careful. Some cpus do not strictly honor the set of cpus
447 * specified in the interrupt destination when using lowest
448 * priority interrupt delivery mode.
449 *
450 * In particular there was a hyperthreading cpu observed to
451 * deliver interrupts to the wrong hyperthread when only one
452 * hyperthread was specified in the interrupt desitination.
453 */
454 cpumask_clear(retmask);
455 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
456}
457
458static void numaq_setup_portio_remap(void) 441static void numaq_setup_portio_remap(void)
459{ 442{
460 int num_quads = num_online_nodes(); 443 int num_quads = num_online_nodes();
@@ -491,7 +474,7 @@ static struct apic __refdata apic_numaq = {
491 .check_apicid_used = numaq_check_apicid_used, 474 .check_apicid_used = numaq_check_apicid_used,
492 .check_apicid_present = numaq_check_apicid_present, 475 .check_apicid_present = numaq_check_apicid_present,
493 476
494 .vector_allocation_domain = numaq_vector_allocation_domain, 477 .vector_allocation_domain = flat_vector_allocation_domain,
495 .init_apic_ldr = numaq_init_apic_ldr, 478 .init_apic_ldr = numaq_init_apic_ldr,
496 479
497 .ioapic_phys_id_map = numaq_ioapic_phys_id_map, 480 .ioapic_phys_id_map = numaq_ioapic_phys_id_map,
@@ -509,7 +492,6 @@ static struct apic __refdata apic_numaq = {
509 .set_apic_id = NULL, 492 .set_apic_id = NULL,
510 .apic_id_mask = 0x0F << 24, 493 .apic_id_mask = 0x0F << 24,
511 494
512 .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
513 .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and, 495 .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
514 496
515 .send_IPI_mask = numaq_send_IPI_mask, 497 .send_IPI_mask = numaq_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 8616d5198e16..eb35ef9ee63f 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -66,21 +66,6 @@ static void setup_apic_flat_routing(void)
66#endif 66#endif
67} 67}
68 68
69static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
70{
71 /*
72 * Careful. Some cpus do not strictly honor the set of cpus
73 * specified in the interrupt destination when using lowest
74 * priority interrupt delivery mode.
75 *
76 * In particular there was a hyperthreading cpu observed to
77 * deliver interrupts to the wrong hyperthread when only one
78 * hyperthread was specified in the interrupt desitination.
79 */
80 cpumask_clear(retmask);
81 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
82}
83
84/* should be called last. */ 69/* should be called last. */
85static int probe_default(void) 70static int probe_default(void)
86{ 71{
@@ -105,7 +90,7 @@ static struct apic apic_default = {
105 .check_apicid_used = default_check_apicid_used, 90 .check_apicid_used = default_check_apicid_used,
106 .check_apicid_present = default_check_apicid_present, 91 .check_apicid_present = default_check_apicid_present,
107 92
108 .vector_allocation_domain = default_vector_allocation_domain, 93 .vector_allocation_domain = flat_vector_allocation_domain,
109 .init_apic_ldr = default_init_apic_ldr, 94 .init_apic_ldr = default_init_apic_ldr,
110 95
111 .ioapic_phys_id_map = default_ioapic_phys_id_map, 96 .ioapic_phys_id_map = default_ioapic_phys_id_map,
@@ -123,8 +108,7 @@ static struct apic apic_default = {
123 .set_apic_id = NULL, 108 .set_apic_id = NULL,
124 .apic_id_mask = 0x0F << 24, 109 .apic_id_mask = 0x0F << 24,
125 110
126 .cpu_mask_to_apicid = default_cpu_mask_to_apicid, 111 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
127 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
128 112
129 .send_IPI_mask = default_send_IPI_mask_logical, 113 .send_IPI_mask = default_send_IPI_mask_logical,
130 .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical, 114 .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index 659897c00755..b6e61857c29f 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -263,43 +263,48 @@ static int summit_check_phys_apicid_present(int physical_apicid)
263 return 1; 263 return 1;
264} 264}
265 265
266static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) 266static inline int
267summit_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
267{ 268{
268 unsigned int round = 0; 269 unsigned int round = 0;
269 int cpu, apicid = 0; 270 unsigned int cpu, apicid = 0;
270 271
271 /* 272 /*
272 * The cpus in the mask must all be on the apic cluster. 273 * The cpus in the mask must all be on the apic cluster.
273 */ 274 */
274 for_each_cpu(cpu, cpumask) { 275 for_each_cpu_and(cpu, cpumask, cpu_online_mask) {
275 int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); 276 int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
276 277
277 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { 278 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
278 printk("%s: Not a valid mask!\n", __func__); 279 printk("%s: Not a valid mask!\n", __func__);
279 return BAD_APICID; 280 return -EINVAL;
280 } 281 }
281 apicid |= new_apicid; 282 apicid |= new_apicid;
282 round++; 283 round++;
283 } 284 }
284 return apicid; 285 if (!round)
286 return -EINVAL;
287 *dest_id = apicid;
288 return 0;
285} 289}
286 290
287static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, 291static int
288 const struct cpumask *andmask) 292summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
293 const struct cpumask *andmask,
294 unsigned int *apicid)
289{ 295{
290 int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
291 cpumask_var_t cpumask; 296 cpumask_var_t cpumask;
297 *apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
292 298
293 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 299 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
294 return apicid; 300 return 0;
295 301
296 cpumask_and(cpumask, inmask, andmask); 302 cpumask_and(cpumask, inmask, andmask);
297 cpumask_and(cpumask, cpumask, cpu_online_mask); 303 summit_cpu_mask_to_apicid(cpumask, apicid);
298 apicid = summit_cpu_mask_to_apicid(cpumask);
299 304
300 free_cpumask_var(cpumask); 305 free_cpumask_var(cpumask);
301 306
302 return apicid; 307 return 0;
303} 308}
304 309
305/* 310/*
@@ -320,20 +325,6 @@ static int probe_summit(void)
320 return 0; 325 return 0;
321} 326}
322 327
323static void summit_vector_allocation_domain(int cpu, struct cpumask *retmask)
324{
325 /* Careful. Some cpus do not strictly honor the set of cpus
326 * specified in the interrupt destination when using lowest
327 * priority interrupt delivery mode.
328 *
329 * In particular there was a hyperthreading cpu observed to
330 * deliver interrupts to the wrong hyperthread when only one
331 * hyperthread was specified in the interrupt desitination.
332 */
333 cpumask_clear(retmask);
334 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
335}
336
337#ifdef CONFIG_X86_SUMMIT_NUMA 328#ifdef CONFIG_X86_SUMMIT_NUMA
338static struct rio_table_hdr *rio_table_hdr; 329static struct rio_table_hdr *rio_table_hdr;
339static struct scal_detail *scal_devs[MAX_NUMNODES]; 330static struct scal_detail *scal_devs[MAX_NUMNODES];
@@ -509,7 +500,7 @@ static struct apic apic_summit = {
509 .check_apicid_used = summit_check_apicid_used, 500 .check_apicid_used = summit_check_apicid_used,
510 .check_apicid_present = summit_check_apicid_present, 501 .check_apicid_present = summit_check_apicid_present,
511 502
512 .vector_allocation_domain = summit_vector_allocation_domain, 503 .vector_allocation_domain = flat_vector_allocation_domain,
513 .init_apic_ldr = summit_init_apic_ldr, 504 .init_apic_ldr = summit_init_apic_ldr,
514 505
515 .ioapic_phys_id_map = summit_ioapic_phys_id_map, 506 .ioapic_phys_id_map = summit_ioapic_phys_id_map,
@@ -527,7 +518,6 @@ static struct apic apic_summit = {
527 .set_apic_id = NULL, 518 .set_apic_id = NULL,
528 .apic_id_mask = 0xFF << 24, 519 .apic_id_mask = 0xFF << 24,
529 520
530 .cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
531 .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and, 521 .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
532 522
533 .send_IPI_mask = summit_send_IPI_mask, 523 .send_IPI_mask = summit_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index ff35cff0e1a7..943d03fc6fc4 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -81,7 +81,7 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
81} 81}
82 82
83static void 83static void
84 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 84x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
85{ 85{
86 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT); 86 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
87} 87}
@@ -96,36 +96,37 @@ static void x2apic_send_IPI_all(int vector)
96 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); 96 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
97} 97}
98 98
99static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 99static int
100x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
101 const struct cpumask *andmask,
102 unsigned int *apicid)
100{ 103{
101 /* 104 u32 dest = 0;
102 * We're using fixed IRQ delivery, can only return one logical APIC ID. 105 u16 cluster;
103 * May as well be the first. 106 int i;
104 */
105 int cpu = cpumask_first(cpumask);
106 107
107 if ((unsigned)cpu < nr_cpu_ids) 108 for_each_cpu_and(i, cpumask, andmask) {
108 return per_cpu(x86_cpu_to_logical_apicid, cpu); 109 if (!cpumask_test_cpu(i, cpu_online_mask))
109 else 110 continue;
110 return BAD_APICID; 111 dest = per_cpu(x86_cpu_to_logical_apicid, i);
111} 112 cluster = x2apic_cluster(i);
113 break;
114 }
112 115
113static unsigned int 116 if (!dest)
114x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 117 return -EINVAL;
115 const struct cpumask *andmask)
116{
117 int cpu;
118 118
119 /* 119 for_each_cpu_and(i, cpumask, andmask) {
120 * We're using fixed IRQ delivery, can only return one logical APIC ID. 120 if (!cpumask_test_cpu(i, cpu_online_mask))
121 * May as well be the first. 121 continue;
122 */ 122 if (cluster != x2apic_cluster(i))
123 for_each_cpu_and(cpu, cpumask, andmask) { 123 continue;
124 if (cpumask_test_cpu(cpu, cpu_online_mask)) 124 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
125 break;
126 } 125 }
127 126
128 return per_cpu(x86_cpu_to_logical_apicid, cpu); 127 *apicid = dest;
128
129 return 0;
129} 130}
130 131
131static void init_x2apic_ldr(void) 132static void init_x2apic_ldr(void)
@@ -208,6 +209,16 @@ static int x2apic_cluster_probe(void)
208 return 0; 209 return 0;
209} 210}
210 211
212/*
213 * Each x2apic cluster is an allocation domain.
214 */
215static bool cluster_vector_allocation_domain(int cpu, struct cpumask *retmask)
216{
217 cpumask_clear(retmask);
218 cpumask_copy(retmask, per_cpu(cpus_in_cluster, cpu));
219 return true;
220}
221
211static struct apic apic_x2apic_cluster = { 222static struct apic apic_x2apic_cluster = {
212 223
213 .name = "cluster x2apic", 224 .name = "cluster x2apic",
@@ -219,13 +230,13 @@ static struct apic apic_x2apic_cluster = {
219 .irq_delivery_mode = dest_LowestPrio, 230 .irq_delivery_mode = dest_LowestPrio,
220 .irq_dest_mode = 1, /* logical */ 231 .irq_dest_mode = 1, /* logical */
221 232
222 .target_cpus = x2apic_target_cpus, 233 .target_cpus = online_target_cpus,
223 .disable_esr = 0, 234 .disable_esr = 0,
224 .dest_logical = APIC_DEST_LOGICAL, 235 .dest_logical = APIC_DEST_LOGICAL,
225 .check_apicid_used = NULL, 236 .check_apicid_used = NULL,
226 .check_apicid_present = NULL, 237 .check_apicid_present = NULL,
227 238
228 .vector_allocation_domain = x2apic_vector_allocation_domain, 239 .vector_allocation_domain = cluster_vector_allocation_domain,
229 .init_apic_ldr = init_x2apic_ldr, 240 .init_apic_ldr = init_x2apic_ldr,
230 241
231 .ioapic_phys_id_map = NULL, 242 .ioapic_phys_id_map = NULL,
@@ -243,7 +254,6 @@ static struct apic apic_x2apic_cluster = {
243 .set_apic_id = x2apic_set_apic_id, 254 .set_apic_id = x2apic_set_apic_id,
244 .apic_id_mask = 0xFFFFFFFFu, 255 .apic_id_mask = 0xFFFFFFFFu,
245 256
246 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
247 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, 257 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
248 258
249 .send_IPI_mask = x2apic_send_IPI_mask, 259 .send_IPI_mask = x2apic_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index c17e982db275..e03a1e180e81 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -76,38 +76,6 @@ static void x2apic_send_IPI_all(int vector)
76 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); 76 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
77} 77}
78 78
79static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
80{
81 /*
82 * We're using fixed IRQ delivery, can only return one phys APIC ID.
83 * May as well be the first.
84 */
85 int cpu = cpumask_first(cpumask);
86
87 if ((unsigned)cpu < nr_cpu_ids)
88 return per_cpu(x86_cpu_to_apicid, cpu);
89 else
90 return BAD_APICID;
91}
92
93static unsigned int
94x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
95 const struct cpumask *andmask)
96{
97 int cpu;
98
99 /*
100 * We're using fixed IRQ delivery, can only return one phys APIC ID.
101 * May as well be the first.
102 */
103 for_each_cpu_and(cpu, cpumask, andmask) {
104 if (cpumask_test_cpu(cpu, cpu_online_mask))
105 break;
106 }
107
108 return per_cpu(x86_cpu_to_apicid, cpu);
109}
110
111static void init_x2apic_ldr(void) 79static void init_x2apic_ldr(void)
112{ 80{
113} 81}
@@ -131,13 +99,13 @@ static struct apic apic_x2apic_phys = {
131 .irq_delivery_mode = dest_Fixed, 99 .irq_delivery_mode = dest_Fixed,
132 .irq_dest_mode = 0, /* physical */ 100 .irq_dest_mode = 0, /* physical */
133 101
134 .target_cpus = x2apic_target_cpus, 102 .target_cpus = online_target_cpus,
135 .disable_esr = 0, 103 .disable_esr = 0,
136 .dest_logical = 0, 104 .dest_logical = 0,
137 .check_apicid_used = NULL, 105 .check_apicid_used = NULL,
138 .check_apicid_present = NULL, 106 .check_apicid_present = NULL,
139 107
140 .vector_allocation_domain = x2apic_vector_allocation_domain, 108 .vector_allocation_domain = default_vector_allocation_domain,
141 .init_apic_ldr = init_x2apic_ldr, 109 .init_apic_ldr = init_x2apic_ldr,
142 110
143 .ioapic_phys_id_map = NULL, 111 .ioapic_phys_id_map = NULL,
@@ -155,8 +123,7 @@ static struct apic apic_x2apic_phys = {
155 .set_apic_id = x2apic_set_apic_id, 123 .set_apic_id = x2apic_set_apic_id,
156 .apic_id_mask = 0xFFFFFFFFu, 124 .apic_id_mask = 0xFFFFFFFFu,
157 125
158 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 126 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
159 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
160 127
161 .send_IPI_mask = x2apic_send_IPI_mask, 128 .send_IPI_mask = x2apic_send_IPI_mask,
162 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, 129 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index c6d03f7a4401..8cfade9510a4 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -185,17 +185,6 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
185unsigned long sn_rtc_cycles_per_second; 185unsigned long sn_rtc_cycles_per_second;
186EXPORT_SYMBOL(sn_rtc_cycles_per_second); 186EXPORT_SYMBOL(sn_rtc_cycles_per_second);
187 187
188static const struct cpumask *uv_target_cpus(void)
189{
190 return cpu_online_mask;
191}
192
193static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
194{
195 cpumask_clear(retmask);
196 cpumask_set_cpu(cpu, retmask);
197}
198
199static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) 188static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
200{ 189{
201#ifdef CONFIG_SMP 190#ifdef CONFIG_SMP
@@ -280,25 +269,12 @@ static void uv_init_apic_ldr(void)
280{ 269{
281} 270}
282 271
283static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) 272static int
284{
285 /*
286 * We're using fixed IRQ delivery, can only return one phys APIC ID.
287 * May as well be the first.
288 */
289 int cpu = cpumask_first(cpumask);
290
291 if ((unsigned)cpu < nr_cpu_ids)
292 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
293 else
294 return BAD_APICID;
295}
296
297static unsigned int
298uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 273uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
299 const struct cpumask *andmask) 274 const struct cpumask *andmask,
275 unsigned int *apicid)
300{ 276{
301 int cpu; 277 int unsigned cpu;
302 278
303 /* 279 /*
304 * We're using fixed IRQ delivery, can only return one phys APIC ID. 280 * We're using fixed IRQ delivery, can only return one phys APIC ID.
@@ -308,7 +284,13 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
308 if (cpumask_test_cpu(cpu, cpu_online_mask)) 284 if (cpumask_test_cpu(cpu, cpu_online_mask))
309 break; 285 break;
310 } 286 }
311 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; 287
288 if (likely(cpu < nr_cpu_ids)) {
289 *apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
290 return 0;
291 }
292
293 return -EINVAL;
312} 294}
313 295
314static unsigned int x2apic_get_apic_id(unsigned long x) 296static unsigned int x2apic_get_apic_id(unsigned long x)
@@ -362,13 +344,13 @@ static struct apic __refdata apic_x2apic_uv_x = {
362 .irq_delivery_mode = dest_Fixed, 344 .irq_delivery_mode = dest_Fixed,
363 .irq_dest_mode = 0, /* physical */ 345 .irq_dest_mode = 0, /* physical */
364 346
365 .target_cpus = uv_target_cpus, 347 .target_cpus = online_target_cpus,
366 .disable_esr = 0, 348 .disable_esr = 0,
367 .dest_logical = APIC_DEST_LOGICAL, 349 .dest_logical = APIC_DEST_LOGICAL,
368 .check_apicid_used = NULL, 350 .check_apicid_used = NULL,
369 .check_apicid_present = NULL, 351 .check_apicid_present = NULL,
370 352
371 .vector_allocation_domain = uv_vector_allocation_domain, 353 .vector_allocation_domain = default_vector_allocation_domain,
372 .init_apic_ldr = uv_init_apic_ldr, 354 .init_apic_ldr = uv_init_apic_ldr,
373 355
374 .ioapic_phys_id_map = NULL, 356 .ioapic_phys_id_map = NULL,
@@ -386,7 +368,6 @@ static struct apic __refdata apic_x2apic_uv_x = {
386 .set_apic_id = set_apic_id, 368 .set_apic_id = set_apic_id,
387 .apic_id_mask = 0xFFFFFFFFu, 369 .apic_id_mask = 0xFFFFFFFFu,
388 370
389 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
390 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, 371 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
391 372
392 .send_IPI_mask = uv_send_IPI_mask, 373 .send_IPI_mask = uv_send_IPI_mask,
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 9b9f18b49918..5e4771266f1a 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -119,7 +119,7 @@ static __init void early_serial_init(char *s)
119 unsigned char c; 119 unsigned char c;
120 unsigned divisor; 120 unsigned divisor;
121 unsigned baud = DEFAULT_BAUD; 121 unsigned baud = DEFAULT_BAUD;
122 char *e; 122 ssize_t ret;
123 123
124 if (*s == ',') 124 if (*s == ',')
125 ++s; 125 ++s;
@@ -127,14 +127,14 @@ static __init void early_serial_init(char *s)
127 if (*s) { 127 if (*s) {
128 unsigned port; 128 unsigned port;
129 if (!strncmp(s, "0x", 2)) { 129 if (!strncmp(s, "0x", 2)) {
130 early_serial_base = simple_strtoul(s, &e, 16); 130 ret = kstrtoint(s, 16, &early_serial_base);
131 } else { 131 } else {
132 static const int __initconst bases[] = { 0x3f8, 0x2f8 }; 132 static const int __initconst bases[] = { 0x3f8, 0x2f8 };
133 133
134 if (!strncmp(s, "ttyS", 4)) 134 if (!strncmp(s, "ttyS", 4))
135 s += 4; 135 s += 4;
136 port = simple_strtoul(s, &e, 10); 136 ret = kstrtouint(s, 10, &port);
137 if (port > 1 || s == e) 137 if (ret || port > 1)
138 port = 0; 138 port = 0;
139 early_serial_base = bases[port]; 139 early_serial_base = bases[port];
140 } 140 }
@@ -149,8 +149,8 @@ static __init void early_serial_init(char *s)
149 outb(0x3, early_serial_base + MCR); /* DTR + RTS */ 149 outb(0x3, early_serial_base + MCR); /* DTR + RTS */
150 150
151 if (*s) { 151 if (*s) {
152 baud = simple_strtoul(s, &e, 0); 152 ret = kstrtouint(s, 0, &baud);
153 if (baud == 0 || s == e) 153 if (ret || baud == 0)
154 baud = DEFAULT_BAUD; 154 baud = DEFAULT_BAUD;
155 } 155 }
156 156
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 16be6dc14db1..f4b9b80e1b95 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1031,8 +1031,6 @@ void __init setup_arch(char **cmdline_p)
1031 1031
1032 x86_init.timers.wallclock_init(); 1032 x86_init.timers.wallclock_init();
1033 1033
1034 x86_platform.wallclock_init();
1035
1036 mcheck_init(); 1034 mcheck_init();
1037 1035
1038 arch_init_ideal_nops(); 1036 arch_init_ideal_nops();
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 35c5e543f550..9f3167e891ef 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -29,7 +29,6 @@ void __init x86_init_uint_noop(unsigned int unused) { }
29void __init x86_init_pgd_noop(pgd_t *unused) { } 29void __init x86_init_pgd_noop(pgd_t *unused) { }
30int __init iommu_init_noop(void) { return 0; } 30int __init iommu_init_noop(void) { return 0; }
31void iommu_shutdown_noop(void) { } 31void iommu_shutdown_noop(void) { }
32void wallclock_init_noop(void) { }
33 32
34/* 33/*
35 * The platform setup functions are preset with the default functions 34 * The platform setup functions are preset with the default functions
@@ -101,7 +100,6 @@ static int default_i8042_detect(void) { return 1; };
101 100
102struct x86_platform_ops x86_platform = { 101struct x86_platform_ops x86_platform = {
103 .calibrate_tsc = native_calibrate_tsc, 102 .calibrate_tsc = native_calibrate_tsc,
104 .wallclock_init = wallclock_init_noop,
105 .get_wallclock = mach_get_cmos_time, 103 .get_wallclock = mach_get_cmos_time,
106 .set_wallclock = mach_set_rtc_mmss, 104 .set_wallclock = mach_set_rtc_mmss,
107 .iommu_shutdown = iommu_shutdown_noop, 105 .iommu_shutdown = iommu_shutdown_noop,
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index f25c2765a5c9..acf7752da952 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -135,6 +135,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
135 unsigned long mmr_value; 135 unsigned long mmr_value;
136 struct uv_IO_APIC_route_entry *entry; 136 struct uv_IO_APIC_route_entry *entry;
137 int mmr_pnode, err; 137 int mmr_pnode, err;
138 unsigned int dest;
138 139
139 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != 140 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
140 sizeof(unsigned long)); 141 sizeof(unsigned long));
@@ -143,6 +144,10 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
143 if (err != 0) 144 if (err != 0)
144 return err; 145 return err;
145 146
147 err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest);
148 if (err != 0)
149 return err;
150
146 if (limit == UV_AFFINITY_CPU) 151 if (limit == UV_AFFINITY_CPU)
147 irq_set_status_flags(irq, IRQ_NO_BALANCING); 152 irq_set_status_flags(irq, IRQ_NO_BALANCING);
148 else 153 else
@@ -159,7 +164,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
159 entry->polarity = 0; 164 entry->polarity = 0;
160 entry->trigger = 0; 165 entry->trigger = 0;
161 entry->mask = 0; 166 entry->mask = 0;
162 entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); 167 entry->dest = dest;
163 168
164 mmr_pnode = uv_blade_to_pnode(mmr_blade); 169 mmr_pnode = uv_blade_to_pnode(mmr_blade);
165 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 170 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -222,7 +227,7 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
222 if (cfg->move_in_progress) 227 if (cfg->move_in_progress)
223 send_cleanup_vector(cfg); 228 send_cleanup_vector(cfg);
224 229
225 return 0; 230 return IRQ_SET_MASK_OK_NOCOPY;
226} 231}
227 232
228/* 233/*
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 6d347064b8b0..e0b18f3ae9a8 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -902,7 +902,6 @@ static int intel_setup_ioapic_entry(int irq,
902 return 0; 902 return 0;
903} 903}
904 904
905#ifdef CONFIG_SMP
906/* 905/*
907 * Migrate the IO-APIC irq in the presence of intr-remapping. 906 * Migrate the IO-APIC irq in the presence of intr-remapping.
908 * 907 *
@@ -924,6 +923,10 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
924 struct irq_cfg *cfg = data->chip_data; 923 struct irq_cfg *cfg = data->chip_data;
925 unsigned int dest, irq = data->irq; 924 unsigned int dest, irq = data->irq;
926 struct irte irte; 925 struct irte irte;
926 int err;
927
928 if (!config_enabled(CONFIG_SMP))
929 return -EINVAL;
927 930
928 if (!cpumask_intersects(mask, cpu_online_mask)) 931 if (!cpumask_intersects(mask, cpu_online_mask))
929 return -EINVAL; 932 return -EINVAL;
@@ -931,10 +934,16 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
931 if (get_irte(irq, &irte)) 934 if (get_irte(irq, &irte))
932 return -EBUSY; 935 return -EBUSY;
933 936
934 if (assign_irq_vector(irq, cfg, mask)) 937 err = assign_irq_vector(irq, cfg, mask);
935 return -EBUSY; 938 if (err)
939 return err;
936 940
937 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 941 err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
942 if (err) {
943 if (assign_irq_vector(irq, cfg, data->affinity))
944 pr_err("Failed to recover vector for irq %d\n", irq);
945 return err;
946 }
938 947
939 irte.vector = cfg->vector; 948 irte.vector = cfg->vector;
940 irte.dest_id = IRTE_DEST(dest); 949 irte.dest_id = IRTE_DEST(dest);
@@ -956,7 +965,6 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
956 cpumask_copy(data->affinity, mask); 965 cpumask_copy(data->affinity, mask);
957 return 0; 966 return 0;
958} 967}
959#endif
960 968
961static void intel_compose_msi_msg(struct pci_dev *pdev, 969static void intel_compose_msi_msg(struct pci_dev *pdev,
962 unsigned int irq, unsigned int dest, 970 unsigned int irq, unsigned int dest,
@@ -1058,9 +1066,7 @@ struct irq_remap_ops intel_irq_remap_ops = {
1058 .reenable = reenable_irq_remapping, 1066 .reenable = reenable_irq_remapping,
1059 .enable_faulting = enable_drhd_fault_handling, 1067 .enable_faulting = enable_drhd_fault_handling,
1060 .setup_ioapic_entry = intel_setup_ioapic_entry, 1068 .setup_ioapic_entry = intel_setup_ioapic_entry,
1061#ifdef CONFIG_SMP
1062 .set_affinity = intel_ioapic_set_affinity, 1069 .set_affinity = intel_ioapic_set_affinity,
1063#endif
1064 .free_irq = free_irte, 1070 .free_irq = free_irte,
1065 .compose_msi_msg = intel_compose_msi_msg, 1071 .compose_msi_msg = intel_compose_msi_msg,
1066 .msi_alloc_irq = intel_msi_alloc_irq, 1072 .msi_alloc_irq = intel_msi_alloc_irq,
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 40cda8e98d87..1d29b1c66e72 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -111,16 +111,15 @@ int setup_ioapic_remapped_entry(int irq,
111 vector, attr); 111 vector, attr);
112} 112}
113 113
114#ifdef CONFIG_SMP
115int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask, 114int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
116 bool force) 115 bool force)
117{ 116{
118 if (!remap_ops || !remap_ops->set_affinity) 117 if (!config_enabled(CONFIG_SMP) || !remap_ops ||
118 !remap_ops->set_affinity)
119 return 0; 119 return 0;
120 120
121 return remap_ops->set_affinity(data, mask, force); 121 return remap_ops->set_affinity(data, mask, force);
122} 122}
123#endif
124 123
125void free_remapped_irq(int irq) 124void free_remapped_irq(int irq)
126{ 125{
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index be9d72950c51..b12974cc1dfe 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -59,11 +59,9 @@ struct irq_remap_ops {
59 unsigned int, int, 59 unsigned int, int,
60 struct io_apic_irq_attr *); 60 struct io_apic_irq_attr *);
61 61
62#ifdef CONFIG_SMP
63 /* Set the CPU affinity of a remapped interrupt */ 62 /* Set the CPU affinity of a remapped interrupt */
64 int (*set_affinity)(struct irq_data *data, const struct cpumask *mask, 63 int (*set_affinity)(struct irq_data *data, const struct cpumask *mask,
65 bool force); 64 bool force);
66#endif
67 65
68 /* Free an IRQ */ 66 /* Free an IRQ */
69 int (*free_irq)(int); 67 int (*free_irq)(int);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 61f5cec031e0..47a937cd84af 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -150,9 +150,7 @@ struct irq_data {
150 void *handler_data; 150 void *handler_data;
151 void *chip_data; 151 void *chip_data;
152 struct msi_desc *msi_desc; 152 struct msi_desc *msi_desc;
153#ifdef CONFIG_SMP
154 cpumask_var_t affinity; 153 cpumask_var_t affinity;
155#endif
156}; 154};
157 155
158/* 156/*