aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig13
-rw-r--r--arch/x86/include/asm/bigsmp/apic.h32
-rw-r--r--arch/x86/include/asm/bigsmp/ipi.h13
-rw-r--r--arch/x86/include/asm/desc.h10
-rw-r--r--arch/x86/include/asm/es7000/apic.h82
-rw-r--r--arch/x86/include/asm/es7000/ipi.h12
-rw-r--r--arch/x86/include/asm/genapic_32.h13
-rw-r--r--arch/x86/include/asm/genapic_64.h14
-rw-r--r--arch/x86/include/asm/ipi.h23
-rw-r--r--arch/x86/include/asm/irq.h3
-rw-r--r--arch/x86/include/asm/mach-default/mach_apic.h28
-rw-r--r--arch/x86/include/asm/mach-default/mach_ipi.h18
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apic.h1
-rw-r--r--arch/x86/include/asm/numaq/apic.h12
-rw-r--r--arch/x86/include/asm/numaq/ipi.h13
-rw-r--r--arch/x86/include/asm/smp.h6
-rw-r--r--arch/x86/include/asm/summit/apic.h55
-rw-r--r--arch/x86/include/asm/summit/ipi.h9
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/kernel/apic.c34
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c41
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c108
-rw-r--r--arch/x86/kernel/genapic_flat_64.c107
-rw-r--r--arch/x86/kernel/genx2apic_cluster.c81
-rw-r--r--arch/x86/kernel/genx2apic_phys.c74
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c61
-rw-r--r--arch/x86/kernel/io_apic.c358
-rw-r--r--arch/x86/kernel/ipi.c28
-rw-r--r--arch/x86/kernel/irq.c3
-rw-r--r--arch/x86/kernel/irq_32.c13
-rw-r--r--arch/x86/kernel/irq_64.c15
-rw-r--r--arch/x86/kernel/irqinit_32.c16
-rw-r--r--arch/x86/kernel/irqinit_64.c13
-rw-r--r--arch/x86/kernel/reboot.c5
-rw-r--r--arch/x86/kernel/setup_percpu.c17
-rw-r--r--arch/x86/kernel/smp.c8
-rw-r--r--arch/x86/kernel/smpboot.c27
-rw-r--r--arch/x86/kernel/tlb_32.c2
-rw-r--r--arch/x86/kernel/tlb_64.c2
-rw-r--r--arch/x86/kernel/traps.c12
-rw-r--r--arch/x86/mach-generic/bigsmp.c5
-rw-r--r--arch/x86/mach-generic/es7000.c5
-rw-r--r--arch/x86/mach-generic/numaq.c5
-rw-r--r--arch/x86/mach-generic/summit.c5
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c2
-rw-r--r--arch/x86/mm/numa_64.c4
-rw-r--r--arch/x86/mm/srat_64.c2
-rw-r--r--arch/x86/xen/mmu.c20
-rw-r--r--arch/x86/xen/smp.c27
-rw-r--r--arch/x86/xen/suspend.c3
-rw-r--r--arch/x86/xen/xen-ops.h2
51 files changed, 920 insertions, 544 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0f44add3e0b7..249d1e0824b5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -601,19 +601,20 @@ config IOMMU_HELPER
601 601
602config MAXSMP 602config MAXSMP
603 bool "Configure Maximum number of SMP Processors and NUMA Nodes" 603 bool "Configure Maximum number of SMP Processors and NUMA Nodes"
604 depends on X86_64 && SMP && BROKEN 604 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
605 select CPUMASK_OFFSTACK
605 default n 606 default n
606 help 607 help
607 Configure maximum number of CPUS and NUMA Nodes for this architecture. 608 Configure maximum number of CPUS and NUMA Nodes for this architecture.
608 If unsure, say N. 609 If unsure, say N.
609 610
610config NR_CPUS 611config NR_CPUS
611 int "Maximum number of CPUs (2-512)" if !MAXSMP 612 int "Maximum number of CPUs" if SMP && !MAXSMP
612 range 2 512 613 range 2 512 if SMP && !MAXSMP
613 depends on SMP 614 default "1" if !SMP
614 default "4096" if MAXSMP 615 default "4096" if MAXSMP
615 default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 616 default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000)
616 default "8" 617 default "8" if SMP
617 help 618 help
618 This allows you to specify the maximum number of CPUs which this 619 This allows you to specify the maximum number of CPUs which this
619 kernel will support. The maximum supported value is 512 and the 620 kernel will support. The maximum supported value is 512 and the
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
index ce547f24a1cd..d8dd9f537911 100644
--- a/arch/x86/include/asm/bigsmp/apic.h
+++ b/arch/x86/include/asm/bigsmp/apic.h
@@ -9,12 +9,12 @@ static inline int apic_id_registered(void)
9 return (1); 9 return (1);
10} 10}
11 11
12static inline cpumask_t target_cpus(void) 12static inline const cpumask_t *target_cpus(void)
13{ 13{
14#ifdef CONFIG_SMP 14#ifdef CONFIG_SMP
15 return cpu_online_map; 15 return &cpu_online_map;
16#else 16#else
17 return cpumask_of_cpu(0); 17 return &cpumask_of_cpu(0);
18#endif 18#endif
19} 19}
20 20
@@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid)
79 79
80static inline int cpu_present_to_apicid(int mps_cpu) 80static inline int cpu_present_to_apicid(int mps_cpu)
81{ 81{
82 if (mps_cpu < NR_CPUS) 82 if (mps_cpu < nr_cpu_ids)
83 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); 83 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
84 84
85 return BAD_APICID; 85 return BAD_APICID;
@@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[];
94/* Mapping from cpu number to logical apicid */ 94/* Mapping from cpu number to logical apicid */
95static inline int cpu_to_logical_apicid(int cpu) 95static inline int cpu_to_logical_apicid(int cpu)
96{ 96{
97 if (cpu >= NR_CPUS) 97 if (cpu >= nr_cpu_ids)
98 return BAD_APICID; 98 return BAD_APICID;
99 return cpu_physical_id(cpu); 99 return cpu_physical_id(cpu);
100} 100}
@@ -119,16 +119,34 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
119} 119}
120 120
121/* As we are using single CPU as destination, pick only one CPU here */ 121/* As we are using single CPU as destination, pick only one CPU here */
122static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 122static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
123{ 123{
124 int cpu; 124 int cpu;
125 int apicid; 125 int apicid;
126 126
127 cpu = first_cpu(cpumask); 127 cpu = first_cpu(*cpumask);
128 apicid = cpu_to_logical_apicid(cpu); 128 apicid = cpu_to_logical_apicid(cpu);
129 return apicid; 129 return apicid;
130} 130}
131 131
132static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 const struct cpumask *andmask)
134{
135 int cpu;
136
137 /*
138 * We're using fixed IRQ delivery, can only return one phys APIC ID.
139 * May as well be the first.
140 */
141 for_each_cpu_and(cpu, cpumask, andmask)
142 if (cpumask_test_cpu(cpu, cpu_online_mask))
143 break;
144 if (cpu < nr_cpu_ids)
145 return cpu_to_logical_apicid(cpu);
146
147 return BAD_APICID;
148}
149
132static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) 150static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
133{ 151{
134 return cpuid_apic >> index_msb; 152 return cpuid_apic >> index_msb;
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h
index 9404c535b7ec..27fcd01b3ae6 100644
--- a/arch/x86/include/asm/bigsmp/ipi.h
+++ b/arch/x86/include/asm/bigsmp/ipi.h
@@ -1,25 +1,22 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define __ASM_MACH_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
5 6
6static inline void send_IPI_mask(cpumask_t mask, int vector) 7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
7{ 8{
8 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
9} 10}
10 11
11static inline void send_IPI_allbutself(int vector) 12static inline void send_IPI_allbutself(int vector)
12{ 13{
13 cpumask_t mask = cpu_online_map; 14 send_IPI_mask_allbutself(cpu_online_mask, vector);
14 cpu_clear(smp_processor_id(), mask);
15
16 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector);
18} 15}
19 16
20static inline void send_IPI_all(int vector) 17static inline void send_IPI_all(int vector)
21{ 18{
22 send_IPI_mask(cpu_online_map, vector); 19 send_IPI_mask(cpu_online_mask, vector);
23} 20}
24 21
25#endif /* __ASM_MACH_IPI_H */ 22#endif /* __ASM_MACH_IPI_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index e6b82b17b072..dc27705f5443 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -320,16 +320,14 @@ static inline void set_intr_gate(unsigned int n, void *addr)
320 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); 320 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
321} 321}
322 322
323#define SYS_VECTOR_FREE 0
324#define SYS_VECTOR_ALLOCED 1
325
326extern int first_system_vector; 323extern int first_system_vector;
327extern char system_vectors[]; 324/* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
325extern unsigned long used_vectors[];
328 326
329static inline void alloc_system_vector(int vector) 327static inline void alloc_system_vector(int vector)
330{ 328{
331 if (system_vectors[vector] == SYS_VECTOR_FREE) { 329 if (!test_bit(vector, used_vectors)) {
332 system_vectors[vector] = SYS_VECTOR_ALLOCED; 330 set_bit(vector, used_vectors);
333 if (first_system_vector > vector) 331 if (first_system_vector > vector)
334 first_system_vector = vector; 332 first_system_vector = vector;
335 } else 333 } else
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
index e24ef876915f..51ac1230294e 100644
--- a/arch/x86/include/asm/es7000/apic.h
+++ b/arch/x86/include/asm/es7000/apic.h
@@ -9,14 +9,14 @@ static inline int apic_id_registered(void)
9 return (1); 9 return (1);
10} 10}
11 11
12static inline cpumask_t target_cpus_cluster(void) 12static inline const cpumask_t *target_cpus_cluster(void)
13{ 13{
14 return CPU_MASK_ALL; 14 return &CPU_MASK_ALL;
15} 15}
16 16
17static inline cpumask_t target_cpus(void) 17static inline const cpumask_t *target_cpus(void)
18{ 18{
19 return cpumask_of_cpu(smp_processor_id()); 19 return &cpumask_of_cpu(smp_processor_id());
20} 20}
21 21
22#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) 22#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
@@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS];
80static inline void setup_apic_routing(void) 80static inline void setup_apic_routing(void)
81{ 81{
82 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); 82 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
83 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", 83 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
84 (apic_version[apic] == 0x14) ? 84 (apic_version[apic] == 0x14) ?
85 "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); 85 "Physical Cluster" : "Logical Cluster",
86 nr_ioapics, cpus_addr(*target_cpus())[0]);
86} 87}
87 88
88static inline int multi_timer_check(int apic, int irq) 89static inline int multi_timer_check(int apic, int irq)
@@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu)
100{ 101{
101 if (!mps_cpu) 102 if (!mps_cpu)
102 return boot_cpu_physical_apicid; 103 return boot_cpu_physical_apicid;
103 else if (mps_cpu < NR_CPUS) 104 else if (mps_cpu < nr_cpu_ids)
104 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); 105 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
105 else 106 else
106 return BAD_APICID; 107 return BAD_APICID;
@@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[];
120static inline int cpu_to_logical_apicid(int cpu) 121static inline int cpu_to_logical_apicid(int cpu)
121{ 122{
122#ifdef CONFIG_SMP 123#ifdef CONFIG_SMP
123 if (cpu >= NR_CPUS) 124 if (cpu >= nr_cpu_ids)
124 return BAD_APICID; 125 return BAD_APICID;
125 return (int)cpu_2_logical_apicid[cpu]; 126 return (int)cpu_2_logical_apicid[cpu];
126#else 127#else
127 return logical_smp_processor_id(); 128 return logical_smp_processor_id();
128#endif 129#endif
@@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid)
146 return (1); 147 return (1);
147} 148}
148 149
149static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) 150static inline unsigned int
151cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
150{ 152{
151 int num_bits_set; 153 int num_bits_set;
152 int cpus_found = 0; 154 int cpus_found = 0;
153 int cpu; 155 int cpu;
154 int apicid; 156 int apicid;
155 157
156 num_bits_set = cpus_weight(cpumask); 158 num_bits_set = cpumask_weight(cpumask);
157 /* Return id to all */ 159 /* Return id to all */
158 if (num_bits_set == NR_CPUS) 160 if (num_bits_set == NR_CPUS)
159 return 0xFF; 161 return 0xFF;
@@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
161 * The cpus in the mask must all be on the apic cluster. If are not 163 * The cpus in the mask must all be on the apic cluster. If are not
162 * on the same apicid cluster return default value of TARGET_CPUS. 164 * on the same apicid cluster return default value of TARGET_CPUS.
163 */ 165 */
164 cpu = first_cpu(cpumask); 166 cpu = cpumask_first(cpumask);
165 apicid = cpu_to_logical_apicid(cpu); 167 apicid = cpu_to_logical_apicid(cpu);
166 while (cpus_found < num_bits_set) { 168 while (cpus_found < num_bits_set) {
167 if (cpu_isset(cpu, cpumask)) { 169 if (cpumask_test_cpu(cpu, cpumask)) {
168 int new_apicid = cpu_to_logical_apicid(cpu); 170 int new_apicid = cpu_to_logical_apicid(cpu);
169 if (apicid_cluster(apicid) != 171 if (apicid_cluster(apicid) !=
170 apicid_cluster(new_apicid)){ 172 apicid_cluster(new_apicid)){
@@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
179 return apicid; 181 return apicid;
180} 182}
181 183
182static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 184static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
183{ 185{
184 int num_bits_set; 186 int num_bits_set;
185 int cpus_found = 0; 187 int cpus_found = 0;
186 int cpu; 188 int cpu;
187 int apicid; 189 int apicid;
188 190
189 num_bits_set = cpus_weight(cpumask); 191 num_bits_set = cpus_weight(*cpumask);
190 /* Return id to all */ 192 /* Return id to all */
191 if (num_bits_set == NR_CPUS) 193 if (num_bits_set == NR_CPUS)
192 return cpu_to_logical_apicid(0); 194 return cpu_to_logical_apicid(0);
@@ -194,10 +196,52 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
194 * The cpus in the mask must all be on the apic cluster. If are not 196 * The cpus in the mask must all be on the apic cluster. If are not
195 * on the same apicid cluster return default value of TARGET_CPUS. 197 * on the same apicid cluster return default value of TARGET_CPUS.
196 */ 198 */
197 cpu = first_cpu(cpumask); 199 cpu = first_cpu(*cpumask);
200 apicid = cpu_to_logical_apicid(cpu);
201 while (cpus_found < num_bits_set) {
202 if (cpu_isset(cpu, *cpumask)) {
203 int new_apicid = cpu_to_logical_apicid(cpu);
204 if (apicid_cluster(apicid) !=
205 apicid_cluster(new_apicid)){
206 printk ("%s: Not a valid mask!\n", __func__);
207 return cpu_to_logical_apicid(0);
208 }
209 apicid = new_apicid;
210 cpus_found++;
211 }
212 cpu++;
213 }
214 return apicid;
215}
216
217
218static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
219 const struct cpumask *andmask)
220{
221 int num_bits_set;
222 int cpus_found = 0;
223 int cpu;
224 int apicid = cpu_to_logical_apicid(0);
225 cpumask_var_t cpumask;
226
227 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
228 return apicid;
229
230 cpumask_and(cpumask, inmask, andmask);
231 cpumask_and(cpumask, cpumask, cpu_online_mask);
232
233 num_bits_set = cpumask_weight(cpumask);
234 /* Return id to all */
235 if (num_bits_set == NR_CPUS)
236 goto exit;
237 /*
238 * The cpus in the mask must all be on the apic cluster. If are not
239 * on the same apicid cluster return default value of TARGET_CPUS.
240 */
241 cpu = cpumask_first(cpumask);
198 apicid = cpu_to_logical_apicid(cpu); 242 apicid = cpu_to_logical_apicid(cpu);
199 while (cpus_found < num_bits_set) { 243 while (cpus_found < num_bits_set) {
200 if (cpu_isset(cpu, cpumask)) { 244 if (cpumask_test_cpu(cpu, cpumask)) {
201 int new_apicid = cpu_to_logical_apicid(cpu); 245 int new_apicid = cpu_to_logical_apicid(cpu);
202 if (apicid_cluster(apicid) != 246 if (apicid_cluster(apicid) !=
203 apicid_cluster(new_apicid)){ 247 apicid_cluster(new_apicid)){
@@ -209,6 +253,8 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
209 } 253 }
210 cpu++; 254 cpu++;
211 } 255 }
256exit:
257 free_cpumask_var(cpumask);
212 return apicid; 258 return apicid;
213} 259}
214 260
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h
index 632a955fcc0a..7e8ed24d4b8a 100644
--- a/arch/x86/include/asm/es7000/ipi.h
+++ b/arch/x86/include/asm/es7000/ipi.h
@@ -1,24 +1,22 @@
1#ifndef __ASM_ES7000_IPI_H 1#ifndef __ASM_ES7000_IPI_H
2#define __ASM_ES7000_IPI_H 2#define __ASM_ES7000_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
5 6
6static inline void send_IPI_mask(cpumask_t mask, int vector) 7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
7{ 8{
8 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
9} 10}
10 11
11static inline void send_IPI_allbutself(int vector) 12static inline void send_IPI_allbutself(int vector)
12{ 13{
13 cpumask_t mask = cpu_online_map; 14 send_IPI_mask_allbutself(cpu_online_mask, vector);
14 cpu_clear(smp_processor_id(), mask);
15 if (!cpus_empty(mask))
16 send_IPI_mask(mask, vector);
17} 15}
18 16
19static inline void send_IPI_all(int vector) 17static inline void send_IPI_all(int vector)
20{ 18{
21 send_IPI_mask(cpu_online_map, vector); 19 send_IPI_mask(cpu_online_mask, vector);
22} 20}
23 21
24#endif /* __ASM_ES7000_IPI_H */ 22#endif /* __ASM_ES7000_IPI_H */
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
index 0ac17d33a8c7..746f37a7963a 100644
--- a/arch/x86/include/asm/genapic_32.h
+++ b/arch/x86/include/asm/genapic_32.h
@@ -24,7 +24,7 @@ struct genapic {
24 int (*probe)(void); 24 int (*probe)(void);
25 25
26 int (*apic_id_registered)(void); 26 int (*apic_id_registered)(void);
27 cpumask_t (*target_cpus)(void); 27 const struct cpumask *(*target_cpus)(void);
28 int int_delivery_mode; 28 int int_delivery_mode;
29 int int_dest_mode; 29 int int_dest_mode;
30 int ESR_DISABLE; 30 int ESR_DISABLE;
@@ -57,12 +57,16 @@ struct genapic {
57 57
58 unsigned (*get_apic_id)(unsigned long x); 58 unsigned (*get_apic_id)(unsigned long x);
59 unsigned long apic_id_mask; 59 unsigned long apic_id_mask;
60 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); 60 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
61 cpumask_t (*vector_allocation_domain)(int cpu); 61 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
62 const struct cpumask *andmask);
63 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
62 64
63#ifdef CONFIG_SMP 65#ifdef CONFIG_SMP
64 /* ipi */ 66 /* ipi */
65 void (*send_IPI_mask)(cpumask_t mask, int vector); 67 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
68 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
69 int vector);
66 void (*send_IPI_allbutself)(int vector); 70 void (*send_IPI_allbutself)(int vector);
67 void (*send_IPI_all)(int vector); 71 void (*send_IPI_all)(int vector);
68#endif 72#endif
@@ -114,6 +118,7 @@ struct genapic {
114 APICFUNC(get_apic_id) \ 118 APICFUNC(get_apic_id) \
115 .apic_id_mask = APIC_ID_MASK, \ 119 .apic_id_mask = APIC_ID_MASK, \
116 APICFUNC(cpu_mask_to_apicid) \ 120 APICFUNC(cpu_mask_to_apicid) \
121 APICFUNC(cpu_mask_to_apicid_and) \
117 APICFUNC(vector_allocation_domain) \ 122 APICFUNC(vector_allocation_domain) \
118 APICFUNC(acpi_madt_oem_check) \ 123 APICFUNC(acpi_madt_oem_check) \
119 IPIFUNC(send_IPI_mask) \ 124 IPIFUNC(send_IPI_mask) \
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
index 2cae011668b7..adf32fb56aa6 100644
--- a/arch/x86/include/asm/genapic_64.h
+++ b/arch/x86/include/asm/genapic_64.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_GENAPIC_64_H 1#ifndef _ASM_X86_GENAPIC_64_H
2#define _ASM_X86_GENAPIC_64_H 2#define _ASM_X86_GENAPIC_64_H
3 3
4#include <linux/cpumask.h>
5
4/* 6/*
5 * Copyright 2004 James Cleverdon, IBM. 7 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2 8 * Subject to the GNU Public License, v.2
@@ -18,16 +20,20 @@ struct genapic {
18 u32 int_delivery_mode; 20 u32 int_delivery_mode;
19 u32 int_dest_mode; 21 u32 int_dest_mode;
20 int (*apic_id_registered)(void); 22 int (*apic_id_registered)(void);
21 cpumask_t (*target_cpus)(void); 23 const struct cpumask *(*target_cpus)(void);
22 cpumask_t (*vector_allocation_domain)(int cpu); 24 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
23 void (*init_apic_ldr)(void); 25 void (*init_apic_ldr)(void);
24 /* ipi */ 26 /* ipi */
25 void (*send_IPI_mask)(cpumask_t mask, int vector); 27 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
28 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
29 int vector);
26 void (*send_IPI_allbutself)(int vector); 30 void (*send_IPI_allbutself)(int vector);
27 void (*send_IPI_all)(int vector); 31 void (*send_IPI_all)(int vector);
28 void (*send_IPI_self)(int vector); 32 void (*send_IPI_self)(int vector);
29 /* */ 33 /* */
30 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); 34 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
35 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
36 const struct cpumask *andmask);
31 unsigned int (*phys_pkg_id)(int index_msb); 37 unsigned int (*phys_pkg_id)(int index_msb);
32 unsigned int (*get_apic_id)(unsigned long x); 38 unsigned int (*get_apic_id)(unsigned long x);
33 unsigned long (*set_apic_id)(unsigned int id); 39 unsigned long (*set_apic_id)(unsigned int id);
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index f89dffb28aa9..c745a306f7d3 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -117,7 +117,8 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
117 native_apic_mem_write(APIC_ICR, cfg); 117 native_apic_mem_write(APIC_ICR, cfg);
118} 118}
119 119
120static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) 120static inline void send_IPI_mask_sequence(const struct cpumask *mask,
121 int vector)
121{ 122{
122 unsigned long flags; 123 unsigned long flags;
123 unsigned long query_cpu; 124 unsigned long query_cpu;
@@ -128,11 +129,29 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
128 * - mbligh 129 * - mbligh
129 */ 130 */
130 local_irq_save(flags); 131 local_irq_save(flags);
131 for_each_cpu_mask_nr(query_cpu, mask) { 132 for_each_cpu(query_cpu, mask) {
132 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 133 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
133 vector, APIC_DEST_PHYSICAL); 134 vector, APIC_DEST_PHYSICAL);
134 } 135 }
135 local_irq_restore(flags); 136 local_irq_restore(flags);
136} 137}
137 138
139static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
140 int vector)
141{
142 unsigned long flags;
143 unsigned int query_cpu;
144 unsigned int this_cpu = smp_processor_id();
145
146 /* See Hack comment above */
147
148 local_irq_save(flags);
149 for_each_cpu(query_cpu, mask)
150 if (query_cpu != this_cpu)
151 __send_IPI_dest_field(
152 per_cpu(x86_cpu_to_apicid, query_cpu),
153 vector, APIC_DEST_PHYSICAL);
154 local_irq_restore(flags);
155}
156
138#endif /* _ASM_X86_IPI_H */ 157#endif /* _ASM_X86_IPI_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 28e409fc73f3..592688ed04d3 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -33,7 +33,7 @@ static inline int irq_canonicalize(int irq)
33 33
34#ifdef CONFIG_HOTPLUG_CPU 34#ifdef CONFIG_HOTPLUG_CPU
35#include <linux/cpumask.h> 35#include <linux/cpumask.h>
36extern void fixup_irqs(cpumask_t map); 36extern void fixup_irqs(void);
37#endif 37#endif
38 38
39extern unsigned int do_IRQ(struct pt_regs *regs); 39extern unsigned int do_IRQ(struct pt_regs *regs);
@@ -42,5 +42,6 @@ extern void native_init_IRQ(void);
42 42
43/* Interrupt vector management */ 43/* Interrupt vector management */
44extern DECLARE_BITMAP(used_vectors, NR_VECTORS); 44extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
45extern int vector_used_by_percpu_irq(unsigned int vector);
45 46
46#endif /* _ASM_X86_IRQ_H */ 47#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
index 6cb3a467e067..cc09cbbee27e 100644
--- a/arch/x86/include/asm/mach-default/mach_apic.h
+++ b/arch/x86/include/asm/mach-default/mach_apic.h
@@ -8,12 +8,12 @@
8 8
9#define APIC_DFR_VALUE (APIC_DFR_FLAT) 9#define APIC_DFR_VALUE (APIC_DFR_FLAT)
10 10
11static inline cpumask_t target_cpus(void) 11static inline const struct cpumask *target_cpus(void)
12{ 12{
13#ifdef CONFIG_SMP 13#ifdef CONFIG_SMP
14 return cpu_online_map; 14 return cpu_online_mask;
15#else 15#else
16 return cpumask_of_cpu(0); 16 return cpumask_of(0);
17#endif 17#endif
18} 18}
19 19
@@ -28,6 +28,7 @@ static inline cpumask_t target_cpus(void)
28#define apic_id_registered (genapic->apic_id_registered) 28#define apic_id_registered (genapic->apic_id_registered)
29#define init_apic_ldr (genapic->init_apic_ldr) 29#define init_apic_ldr (genapic->init_apic_ldr)
30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) 30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
31#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
31#define phys_pkg_id (genapic->phys_pkg_id) 32#define phys_pkg_id (genapic->phys_pkg_id)
32#define vector_allocation_domain (genapic->vector_allocation_domain) 33#define vector_allocation_domain (genapic->vector_allocation_domain)
33#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) 34#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
@@ -61,9 +62,19 @@ static inline int apic_id_registered(void)
61 return physid_isset(read_apic_id(), phys_cpu_present_map); 62 return physid_isset(read_apic_id(), phys_cpu_present_map);
62} 63}
63 64
64static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 65static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
65{ 66{
66 return cpus_addr(cpumask)[0]; 67 return cpumask_bits(cpumask)[0];
68}
69
70static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
71 const struct cpumask *andmask)
72{
73 unsigned long mask1 = cpumask_bits(cpumask)[0];
74 unsigned long mask2 = cpumask_bits(andmask)[0];
75 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
76
77 return (unsigned int)(mask1 & mask2 & mask3);
67} 78}
68 79
69static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) 80static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
@@ -88,7 +99,7 @@ static inline int apicid_to_node(int logical_apicid)
88#endif 99#endif
89} 100}
90 101
91static inline cpumask_t vector_allocation_domain(int cpu) 102static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
92{ 103{
93 /* Careful. Some cpus do not strictly honor the set of cpus 104 /* Careful. Some cpus do not strictly honor the set of cpus
94 * specified in the interrupt destination when using lowest 105 * specified in the interrupt destination when using lowest
@@ -98,8 +109,7 @@ static inline cpumask_t vector_allocation_domain(int cpu)
98 * deliver interrupts to the wrong hyperthread when only one 109 * deliver interrupts to the wrong hyperthread when only one
99 * hyperthread was specified in the interrupt desitination. 110 * hyperthread was specified in the interrupt desitination.
100 */ 111 */
101 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 112 *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
102 return domain;
103} 113}
104#endif 114#endif
105 115
@@ -131,7 +141,7 @@ static inline int cpu_to_logical_apicid(int cpu)
131 141
132static inline int cpu_present_to_apicid(int mps_cpu) 142static inline int cpu_present_to_apicid(int mps_cpu)
133{ 143{
134 if (mps_cpu < NR_CPUS && cpu_present(mps_cpu)) 144 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
135 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 145 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
136 else 146 else
137 return BAD_APICID; 147 return BAD_APICID;
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h
index fabca01ebacf..191312d155da 100644
--- a/arch/x86/include/asm/mach-default/mach_ipi.h
+++ b/arch/x86/include/asm/mach-default/mach_ipi.h
@@ -4,7 +4,8 @@
4/* Avoid include hell */ 4/* Avoid include hell */
5#define NMI_VECTOR 0x02 5#define NMI_VECTOR 0x02
6 6
7void send_IPI_mask_bitmask(cpumask_t mask, int vector); 7void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
8void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
8void __send_IPI_shortcut(unsigned int shortcut, int vector); 9void __send_IPI_shortcut(unsigned int shortcut, int vector);
9 10
10extern int no_broadcast; 11extern int no_broadcast;
@@ -12,28 +13,27 @@ extern int no_broadcast;
12#ifdef CONFIG_X86_64 13#ifdef CONFIG_X86_64
13#include <asm/genapic.h> 14#include <asm/genapic.h>
14#define send_IPI_mask (genapic->send_IPI_mask) 15#define send_IPI_mask (genapic->send_IPI_mask)
16#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
15#else 17#else
16static inline void send_IPI_mask(cpumask_t mask, int vector) 18static inline void send_IPI_mask(const struct cpumask *mask, int vector)
17{ 19{
18 send_IPI_mask_bitmask(mask, vector); 20 send_IPI_mask_bitmask(mask, vector);
19} 21}
22void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
20#endif 23#endif
21 24
22static inline void __local_send_IPI_allbutself(int vector) 25static inline void __local_send_IPI_allbutself(int vector)
23{ 26{
24 if (no_broadcast || vector == NMI_VECTOR) { 27 if (no_broadcast || vector == NMI_VECTOR)
25 cpumask_t mask = cpu_online_map; 28 send_IPI_mask_allbutself(cpu_online_mask, vector);
26 29 else
27 cpu_clear(smp_processor_id(), mask);
28 send_IPI_mask(mask, vector);
29 } else
30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); 30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
31} 31}
32 32
33static inline void __local_send_IPI_all(int vector) 33static inline void __local_send_IPI_all(int vector)
34{ 34{
35 if (no_broadcast || vector == NMI_VECTOR) 35 if (no_broadcast || vector == NMI_VECTOR)
36 send_IPI_mask(cpu_online_map, vector); 36 send_IPI_mask(cpu_online_mask, vector);
37 else 37 else
38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector); 38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
39} 39}
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h
index e430f47df667..48553e958ad5 100644
--- a/arch/x86/include/asm/mach-generic/mach_apic.h
+++ b/arch/x86/include/asm/mach-generic/mach_apic.h
@@ -24,6 +24,7 @@
24#define check_phys_apicid_present (genapic->check_phys_apicid_present) 24#define check_phys_apicid_present (genapic->check_phys_apicid_present)
25#define check_apicid_used (genapic->check_apicid_used) 25#define check_apicid_used (genapic->check_apicid_used)
26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) 26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
27#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
27#define vector_allocation_domain (genapic->vector_allocation_domain) 28#define vector_allocation_domain (genapic->vector_allocation_domain)
28#define enable_apic_mode (genapic->enable_apic_mode) 29#define enable_apic_mode (genapic->enable_apic_mode)
29#define phys_pkg_id (genapic->phys_pkg_id) 30#define phys_pkg_id (genapic->phys_pkg_id)
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
index 0bf2a06b7a4e..c80f00d29965 100644
--- a/arch/x86/include/asm/numaq/apic.h
+++ b/arch/x86/include/asm/numaq/apic.h
@@ -7,9 +7,9 @@
7 7
8#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) 8#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
9 9
10static inline cpumask_t target_cpus(void) 10static inline const cpumask_t *target_cpus(void)
11{ 11{
12 return CPU_MASK_ALL; 12 return &CPU_MASK_ALL;
13} 13}
14 14
15#define NO_BALANCE_IRQ (1) 15#define NO_BALANCE_IRQ (1)
@@ -122,7 +122,13 @@ static inline void enable_apic_mode(void)
122 * We use physical apicids here, not logical, so just return the default 122 * We use physical apicids here, not logical, so just return the default
123 * physical broadcast to stop people from breaking us 123 * physical broadcast to stop people from breaking us
124 */ 124 */
125static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 125static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
126{
127 return (int) 0xF;
128}
129
130static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
131 const struct cpumask *andmask)
126{ 132{
127 return (int) 0xF; 133 return (int) 0xF;
128} 134}
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h
index 935588d286cf..a8374c652778 100644
--- a/arch/x86/include/asm/numaq/ipi.h
+++ b/arch/x86/include/asm/numaq/ipi.h
@@ -1,25 +1,22 @@
1#ifndef __ASM_NUMAQ_IPI_H 1#ifndef __ASM_NUMAQ_IPI_H
2#define __ASM_NUMAQ_IPI_H 2#define __ASM_NUMAQ_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t, int vector); 4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
5 6
6static inline void send_IPI_mask(cpumask_t mask, int vector) 7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
7{ 8{
8 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
9} 10}
10 11
11static inline void send_IPI_allbutself(int vector) 12static inline void send_IPI_allbutself(int vector)
12{ 13{
13 cpumask_t mask = cpu_online_map; 14 send_IPI_mask_allbutself(cpu_online_mask, vector);
14 cpu_clear(smp_processor_id(), mask);
15
16 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector);
18} 15}
19 16
20static inline void send_IPI_all(int vector) 17static inline void send_IPI_all(int vector)
21{ 18{
22 send_IPI_mask(cpu_online_map, vector); 19 send_IPI_mask(cpu_online_mask, vector);
23} 20}
24 21
25#endif /* __ASM_NUMAQ_IPI_H */ 22#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index d12811ce51d9..830b9fcb6427 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -60,7 +60,7 @@ struct smp_ops {
60 void (*cpu_die)(unsigned int cpu); 60 void (*cpu_die)(unsigned int cpu);
61 void (*play_dead)(void); 61 void (*play_dead)(void);
62 62
63 void (*send_call_func_ipi)(cpumask_t mask); 63 void (*send_call_func_ipi)(const struct cpumask *mask);
64 void (*send_call_func_single_ipi)(int cpu); 64 void (*send_call_func_single_ipi)(int cpu);
65}; 65};
66 66
@@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu)
125 125
126static inline void arch_send_call_function_ipi(cpumask_t mask) 126static inline void arch_send_call_function_ipi(cpumask_t mask)
127{ 127{
128 smp_ops.send_call_func_ipi(mask); 128 smp_ops.send_call_func_ipi(&mask);
129} 129}
130 130
131void cpu_disable_common(void); 131void cpu_disable_common(void);
@@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu);
138void native_play_dead(void); 138void native_play_dead(void);
139void play_dead_common(void); 139void play_dead_common(void);
140 140
141void native_send_call_func_ipi(cpumask_t mask); 141void native_send_call_func_ipi(const struct cpumask *mask);
142void native_send_call_func_single_ipi(int cpu); 142void native_send_call_func_single_ipi(int cpu);
143 143
144extern void prefill_possible_map(void); 144extern void prefill_possible_map(void);
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
index 9b3070f1c2ac..99327d1be49f 100644
--- a/arch/x86/include/asm/summit/apic.h
+++ b/arch/x86/include/asm/summit/apic.h
@@ -14,13 +14,13 @@
14 14
15#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) 15#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
16 16
17static inline cpumask_t target_cpus(void) 17static inline const cpumask_t *target_cpus(void)
18{ 18{
19 /* CPU_MASK_ALL (0xff) has undefined behaviour with 19 /* CPU_MASK_ALL (0xff) has undefined behaviour with
20 * dest_LowestPrio mode logical clustered apic interrupt routing 20 * dest_LowestPrio mode logical clustered apic interrupt routing
21 * Just start on cpu 0. IRQ balancing will spread load 21 * Just start on cpu 0. IRQ balancing will spread load
22 */ 22 */
23 return cpumask_of_cpu(0); 23 return &cpumask_of_cpu(0);
24} 24}
25 25
26#define INT_DELIVERY_MODE (dest_LowestPrio) 26#define INT_DELIVERY_MODE (dest_LowestPrio)
@@ -137,14 +137,14 @@ static inline void enable_apic_mode(void)
137{ 137{
138} 138}
139 139
140static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 140static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
141{ 141{
142 int num_bits_set; 142 int num_bits_set;
143 int cpus_found = 0; 143 int cpus_found = 0;
144 int cpu; 144 int cpu;
145 int apicid; 145 int apicid;
146 146
147 num_bits_set = cpus_weight(cpumask); 147 num_bits_set = cpus_weight(*cpumask);
148 /* Return id to all */ 148 /* Return id to all */
149 if (num_bits_set == NR_CPUS) 149 if (num_bits_set == NR_CPUS)
150 return (int) 0xFF; 150 return (int) 0xFF;
@@ -152,10 +152,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
152 * The cpus in the mask must all be on the apic cluster. If are not 152 * The cpus in the mask must all be on the apic cluster. If are not
153 * on the same apicid cluster return default value of TARGET_CPUS. 153 * on the same apicid cluster return default value of TARGET_CPUS.
154 */ 154 */
155 cpu = first_cpu(cpumask); 155 cpu = first_cpu(*cpumask);
156 apicid = cpu_to_logical_apicid(cpu); 156 apicid = cpu_to_logical_apicid(cpu);
157 while (cpus_found < num_bits_set) { 157 while (cpus_found < num_bits_set) {
158 if (cpu_isset(cpu, cpumask)) { 158 if (cpu_isset(cpu, *cpumask)) {
159 int new_apicid = cpu_to_logical_apicid(cpu); 159 int new_apicid = cpu_to_logical_apicid(cpu);
160 if (apicid_cluster(apicid) != 160 if (apicid_cluster(apicid) !=
161 apicid_cluster(new_apicid)){ 161 apicid_cluster(new_apicid)){
@@ -170,6 +170,49 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
170 return apicid; 170 return apicid;
171} 171}
172 172
173static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
174 const struct cpumask *andmask)
175{
176 int num_bits_set;
177 int cpus_found = 0;
178 int cpu;
179 int apicid = 0xFF;
180 cpumask_var_t cpumask;
181
182 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
183 return (int) 0xFF;
184
185 cpumask_and(cpumask, inmask, andmask);
186 cpumask_and(cpumask, cpumask, cpu_online_mask);
187
188 num_bits_set = cpumask_weight(cpumask);
189 /* Return id to all */
190 if (num_bits_set == nr_cpu_ids)
191 goto exit;
192 /*
193 * The cpus in the mask must all be on the apic cluster. If are not
194 * on the same apicid cluster return default value of TARGET_CPUS.
195 */
196 cpu = cpumask_first(cpumask);
197 apicid = cpu_to_logical_apicid(cpu);
198 while (cpus_found < num_bits_set) {
199 if (cpumask_test_cpu(cpu, cpumask)) {
200 int new_apicid = cpu_to_logical_apicid(cpu);
201 if (apicid_cluster(apicid) !=
202 apicid_cluster(new_apicid)){
203 printk ("%s: Not a valid mask!\n", __func__);
204 return 0xFF;
205 }
206 apicid = apicid | new_apicid;
207 cpus_found++;
208 }
209 cpu++;
210 }
211exit:
212 free_cpumask_var(cpumask);
213 return apicid;
214}
215
173/* cpuid returns the value latched in the HW at reset, not the APIC ID 216/* cpuid returns the value latched in the HW at reset, not the APIC ID
174 * register's value. For any box whose BIOS changes APIC IDs, like 217 * register's value. For any box whose BIOS changes APIC IDs, like
175 * clustered APIC systems, we must use hard_smp_processor_id. 218 * clustered APIC systems, we must use hard_smp_processor_id.
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h
index 53bd1e7bd7b4..a8a2c24f50cc 100644
--- a/arch/x86/include/asm/summit/ipi.h
+++ b/arch/x86/include/asm/summit/ipi.h
@@ -1,9 +1,10 @@
1#ifndef __ASM_SUMMIT_IPI_H 1#ifndef __ASM_SUMMIT_IPI_H
2#define __ASM_SUMMIT_IPI_H 2#define __ASM_SUMMIT_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
5void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
5 6
6static inline void send_IPI_mask(cpumask_t mask, int vector) 7static inline void send_IPI_mask(const cpumask_t *mask, int vector)
7{ 8{
8 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
9} 10}
@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector)
14 cpu_clear(smp_processor_id(), mask); 15 cpu_clear(smp_processor_id(), mask);
15 16
16 if (!cpus_empty(mask)) 17 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector); 18 send_IPI_mask(&mask, vector);
18} 19}
19 20
20static inline void send_IPI_all(int vector) 21static inline void send_IPI_all(int vector)
21{ 22{
22 send_IPI_mask(cpu_online_map, vector); 23 send_IPI_mask(&cpu_online_map, vector);
23} 24}
24 25
25#endif /* __ASM_SUMMIT_IPI_H */ 26#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 168203c0c316..4e2f2e0aab27 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -238,6 +238,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
238#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 238#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
239#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 239#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
240#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 240#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
241#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
242#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
241 243
242/* indicates that pointers to the topology cpumask_t maps are valid */ 244/* indicates that pointers to the topology cpumask_t maps are valid */
243#define arch_provides_topology_pointers yes 245#define arch_provides_topology_pointers yes
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 6107b41da9a5..6b7f824db160 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -119,8 +119,6 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
119 119
120int first_system_vector = 0xfe; 120int first_system_vector = 0xfe;
121 121
122char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
123
124/* 122/*
125 * Debug level, exported for io_apic.c 123 * Debug level, exported for io_apic.c
126 */ 124 */
@@ -142,7 +140,7 @@ static int lapic_next_event(unsigned long delta,
142 struct clock_event_device *evt); 140 struct clock_event_device *evt);
143static void lapic_timer_setup(enum clock_event_mode mode, 141static void lapic_timer_setup(enum clock_event_mode mode,
144 struct clock_event_device *evt); 142 struct clock_event_device *evt);
145static void lapic_timer_broadcast(const struct cpumask *mask); 143static void lapic_timer_broadcast(const cpumask_t *mask);
146static void apic_pm_activate(void); 144static void apic_pm_activate(void);
147 145
148/* 146/*
@@ -455,10 +453,10 @@ static void lapic_timer_setup(enum clock_event_mode mode,
455/* 453/*
456 * Local APIC timer broadcast function 454 * Local APIC timer broadcast function
457 */ 455 */
458static void lapic_timer_broadcast(const struct cpumask *mask) 456static void lapic_timer_broadcast(const cpumask_t *mask)
459{ 457{
460#ifdef CONFIG_SMP 458#ifdef CONFIG_SMP
461 send_IPI_mask(*mask, LOCAL_TIMER_VECTOR); 459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
462#endif 460#endif
463} 461}
464 462
@@ -1807,28 +1805,32 @@ void disconnect_bsp_APIC(int virt_wire_setup)
1807void __cpuinit generic_processor_info(int apicid, int version) 1805void __cpuinit generic_processor_info(int apicid, int version)
1808{ 1806{
1809 int cpu; 1807 int cpu;
1810 cpumask_t tmp_map;
1811 1808
1812 /* 1809 /*
1813 * Validate version 1810 * Validate version
1814 */ 1811 */
1815 if (version == 0x0) { 1812 if (version == 0x0) {
1816 pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " 1813 pr_warning("BIOS bug, APIC version is 0 for CPU#%d! "
1817 "fixing up to 0x10. (tell your hw vendor)\n", 1814 "fixing up to 0x10. (tell your hw vendor)\n",
1818 version); 1815 version);
1819 version = 0x10; 1816 version = 0x10;
1820 } 1817 }
1821 apic_version[apicid] = version; 1818 apic_version[apicid] = version;
1822 1819
1823 if (num_processors >= NR_CPUS) { 1820 if (num_processors >= nr_cpu_ids) {
1824 pr_warning("WARNING: NR_CPUS limit of %i reached." 1821 int max = nr_cpu_ids;
1825 " Processor ignored.\n", NR_CPUS); 1822 int thiscpu = max + disabled_cpus;
1823
1824 pr_warning(
1825 "ACPI: NR_CPUS/possible_cpus limit of %i reached."
1826 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
1827
1828 disabled_cpus++;
1826 return; 1829 return;
1827 } 1830 }
1828 1831
1829 num_processors++; 1832 num_processors++;
1830 cpus_complement(tmp_map, cpu_present_map); 1833 cpu = cpumask_next_zero(-1, cpu_present_mask);
1831 cpu = first_cpu(tmp_map);
1832 1834
1833 physid_set(apicid, phys_cpu_present_map); 1835 physid_set(apicid, phys_cpu_present_map);
1834 if (apicid == boot_cpu_physical_apicid) { 1836 if (apicid == boot_cpu_physical_apicid) {
@@ -1878,8 +1880,8 @@ void __cpuinit generic_processor_info(int apicid, int version)
1878 } 1880 }
1879#endif 1881#endif
1880 1882
1881 cpu_set(cpu, cpu_possible_map); 1883 set_cpu_possible(cpu, true);
1882 cpu_set(cpu, cpu_present_map); 1884 set_cpu_present(cpu, true);
1883} 1885}
1884 1886
1885#ifdef CONFIG_X86_64 1887#ifdef CONFIG_X86_64
@@ -2081,7 +2083,7 @@ __cpuinit int apic_is_clustered_box(void)
2081 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); 2083 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
2082 bitmap_zero(clustermap, NUM_APIC_CLUSTERS); 2084 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
2083 2085
2084 for (i = 0; i < NR_CPUS; i++) { 2086 for (i = 0; i < nr_cpu_ids; i++) {
2085 /* are we being called early in kernel startup? */ 2087 /* are we being called early in kernel startup? */
2086 if (bios_cpu_apicid) { 2088 if (bios_cpu_apicid) {
2087 id = bios_cpu_apicid[i]; 2089 id = bios_cpu_apicid[i];
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 15cf14e9bf26..48533d77be78 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -534,31 +534,16 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
534 per_cpu(cpuid4_info, cpu) = NULL; 534 per_cpu(cpuid4_info, cpu) = NULL;
535} 535}
536 536
537static int __cpuinit detect_cache_attributes(unsigned int cpu) 537static void __cpuinit get_cpu_leaves(void *_retval)
538{ 538{
539 struct _cpuid4_info *this_leaf; 539 int j, *retval = _retval, cpu = smp_processor_id();
540 unsigned long j;
541 int retval;
542 cpumask_t oldmask;
543
544 if (num_cache_leaves == 0)
545 return -ENOENT;
546
547 per_cpu(cpuid4_info, cpu) = kzalloc(
548 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
549 if (per_cpu(cpuid4_info, cpu) == NULL)
550 return -ENOMEM;
551
552 oldmask = current->cpus_allowed;
553 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
554 if (retval)
555 goto out;
556 540
557 /* Do cpuid and store the results */ 541 /* Do cpuid and store the results */
558 for (j = 0; j < num_cache_leaves; j++) { 542 for (j = 0; j < num_cache_leaves; j++) {
543 struct _cpuid4_info *this_leaf;
559 this_leaf = CPUID4_INFO_IDX(cpu, j); 544 this_leaf = CPUID4_INFO_IDX(cpu, j);
560 retval = cpuid4_cache_lookup(j, this_leaf); 545 *retval = cpuid4_cache_lookup(j, this_leaf);
561 if (unlikely(retval < 0)) { 546 if (unlikely(*retval < 0)) {
562 int i; 547 int i;
563 548
564 for (i = 0; i < j; i++) 549 for (i = 0; i < j; i++)
@@ -567,9 +552,21 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
567 } 552 }
568 cache_shared_cpu_map_setup(cpu, j); 553 cache_shared_cpu_map_setup(cpu, j);
569 } 554 }
570 set_cpus_allowed_ptr(current, &oldmask); 555}
556
557static int __cpuinit detect_cache_attributes(unsigned int cpu)
558{
559 int retval;
560
561 if (num_cache_leaves == 0)
562 return -ENOENT;
563
564 per_cpu(cpuid4_info, cpu) = kzalloc(
565 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
566 if (per_cpu(cpuid4_info, cpu) == NULL)
567 return -ENOMEM;
571 568
572out: 569 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
573 if (retval) { 570 if (retval) {
574 kfree(per_cpu(cpuid4_info, cpu)); 571 kfree(per_cpu(cpuid4_info, cpu));
575 per_cpu(cpuid4_info, cpu) = NULL; 572 per_cpu(cpuid4_info, cpu) = NULL;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 748c8f9e7a05..a5a5e0530370 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -83,34 +83,41 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
83 * CPU Initialization 83 * CPU Initialization
84 */ 84 */
85 85
86struct thresh_restart {
87 struct threshold_block *b;
88 int reset;
89 u16 old_limit;
90};
91
86/* must be called with correct cpu affinity */ 92/* must be called with correct cpu affinity */
87static void threshold_restart_bank(struct threshold_block *b, 93static long threshold_restart_bank(void *_tr)
88 int reset, u16 old_limit)
89{ 94{
95 struct thresh_restart *tr = _tr;
90 u32 mci_misc_hi, mci_misc_lo; 96 u32 mci_misc_hi, mci_misc_lo;
91 97
92 rdmsr(b->address, mci_misc_lo, mci_misc_hi); 98 rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
93 99
94 if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) 100 if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
95 reset = 1; /* limit cannot be lower than err count */ 101 tr->reset = 1; /* limit cannot be lower than err count */
96 102
97 if (reset) { /* reset err count and overflow bit */ 103 if (tr->reset) { /* reset err count and overflow bit */
98 mci_misc_hi = 104 mci_misc_hi =
99 (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | 105 (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
100 (THRESHOLD_MAX - b->threshold_limit); 106 (THRESHOLD_MAX - tr->b->threshold_limit);
101 } else if (old_limit) { /* change limit w/o reset */ 107 } else if (tr->old_limit) { /* change limit w/o reset */
102 int new_count = (mci_misc_hi & THRESHOLD_MAX) + 108 int new_count = (mci_misc_hi & THRESHOLD_MAX) +
103 (old_limit - b->threshold_limit); 109 (tr->old_limit - tr->b->threshold_limit);
104 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | 110 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
105 (new_count & THRESHOLD_MAX); 111 (new_count & THRESHOLD_MAX);
106 } 112 }
107 113
108 b->interrupt_enable ? 114 tr->b->interrupt_enable ?
109 (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : 115 (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
110 (mci_misc_hi &= ~MASK_INT_TYPE_HI); 116 (mci_misc_hi &= ~MASK_INT_TYPE_HI);
111 117
112 mci_misc_hi |= MASK_COUNT_EN_HI; 118 mci_misc_hi |= MASK_COUNT_EN_HI;
113 wrmsr(b->address, mci_misc_lo, mci_misc_hi); 119 wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
120 return 0;
114} 121}
115 122
116/* cpu init entry point, called from mce.c with preempt off */ 123/* cpu init entry point, called from mce.c with preempt off */
@@ -120,6 +127,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
120 unsigned int cpu = smp_processor_id(); 127 unsigned int cpu = smp_processor_id();
121 u8 lvt_off; 128 u8 lvt_off;
122 u32 low = 0, high = 0, address = 0; 129 u32 low = 0, high = 0, address = 0;
130 struct thresh_restart tr;
123 131
124 for (bank = 0; bank < NR_BANKS; ++bank) { 132 for (bank = 0; bank < NR_BANKS; ++bank) {
125 for (block = 0; block < NR_BLOCKS; ++block) { 133 for (block = 0; block < NR_BLOCKS; ++block) {
@@ -162,7 +170,10 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
162 wrmsr(address, low, high); 170 wrmsr(address, low, high);
163 171
164 threshold_defaults.address = address; 172 threshold_defaults.address = address;
165 threshold_restart_bank(&threshold_defaults, 0, 0); 173 tr.b = &threshold_defaults;
174 tr.reset = 0;
175 tr.old_limit = 0;
176 threshold_restart_bank(&tr);
166 } 177 }
167 } 178 }
168} 179}
@@ -251,20 +262,6 @@ struct threshold_attr {
251 ssize_t(*store) (struct threshold_block *, const char *, size_t count); 262 ssize_t(*store) (struct threshold_block *, const char *, size_t count);
252}; 263};
253 264
254static void affinity_set(unsigned int cpu, cpumask_t *oldmask,
255 cpumask_t *newmask)
256{
257 *oldmask = current->cpus_allowed;
258 cpus_clear(*newmask);
259 cpu_set(cpu, *newmask);
260 set_cpus_allowed_ptr(current, newmask);
261}
262
263static void affinity_restore(const cpumask_t *oldmask)
264{
265 set_cpus_allowed_ptr(current, oldmask);
266}
267
268#define SHOW_FIELDS(name) \ 265#define SHOW_FIELDS(name) \
269static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ 266static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
270{ \ 267{ \
@@ -277,15 +274,16 @@ static ssize_t store_interrupt_enable(struct threshold_block *b,
277 const char *buf, size_t count) 274 const char *buf, size_t count)
278{ 275{
279 char *end; 276 char *end;
280 cpumask_t oldmask, newmask; 277 struct thresh_restart tr;
281 unsigned long new = simple_strtoul(buf, &end, 0); 278 unsigned long new = simple_strtoul(buf, &end, 0);
282 if (end == buf) 279 if (end == buf)
283 return -EINVAL; 280 return -EINVAL;
284 b->interrupt_enable = !!new; 281 b->interrupt_enable = !!new;
285 282
286 affinity_set(b->cpu, &oldmask, &newmask); 283 tr.b = b;
287 threshold_restart_bank(b, 0, 0); 284 tr.reset = 0;
288 affinity_restore(&oldmask); 285 tr.old_limit = 0;
286 work_on_cpu(b->cpu, threshold_restart_bank, &tr);
289 287
290 return end - buf; 288 return end - buf;
291} 289}
@@ -294,8 +292,7 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
294 const char *buf, size_t count) 292 const char *buf, size_t count)
295{ 293{
296 char *end; 294 char *end;
297 cpumask_t oldmask, newmask; 295 struct thresh_restart tr;
298 u16 old;
299 unsigned long new = simple_strtoul(buf, &end, 0); 296 unsigned long new = simple_strtoul(buf, &end, 0);
300 if (end == buf) 297 if (end == buf)
301 return -EINVAL; 298 return -EINVAL;
@@ -303,34 +300,36 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
303 new = THRESHOLD_MAX; 300 new = THRESHOLD_MAX;
304 if (new < 1) 301 if (new < 1)
305 new = 1; 302 new = 1;
306 old = b->threshold_limit; 303 tr.old_limit = b->threshold_limit;
307 b->threshold_limit = new; 304 b->threshold_limit = new;
305 tr.b = b;
306 tr.reset = 0;
308 307
309 affinity_set(b->cpu, &oldmask, &newmask); 308 work_on_cpu(b->cpu, threshold_restart_bank, &tr);
310 threshold_restart_bank(b, 0, old);
311 affinity_restore(&oldmask);
312 309
313 return end - buf; 310 return end - buf;
314} 311}
315 312
316static ssize_t show_error_count(struct threshold_block *b, char *buf) 313static long local_error_count(void *_b)
317{ 314{
318 u32 high, low; 315 struct threshold_block *b = _b;
319 cpumask_t oldmask, newmask; 316 u32 low, high;
320 affinity_set(b->cpu, &oldmask, &newmask); 317
321 rdmsr(b->address, low, high); 318 rdmsr(b->address, low, high);
322 affinity_restore(&oldmask); 319 return (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
323 return sprintf(buf, "%x\n", 320}
324 (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); 321
322static ssize_t show_error_count(struct threshold_block *b, char *buf)
323{
324 return sprintf(buf, "%lx\n", work_on_cpu(b->cpu, local_error_count, b));
325} 325}
326 326
327static ssize_t store_error_count(struct threshold_block *b, 327static ssize_t store_error_count(struct threshold_block *b,
328 const char *buf, size_t count) 328 const char *buf, size_t count)
329{ 329{
330 cpumask_t oldmask, newmask; 330 struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
331 affinity_set(b->cpu, &oldmask, &newmask); 331
332 threshold_restart_bank(b, 1, 0); 332 work_on_cpu(b->cpu, threshold_restart_bank, &tr);
333 affinity_restore(&oldmask);
334 return 1; 333 return 1;
335} 334}
336 335
@@ -463,12 +462,19 @@ out_free:
463 return err; 462 return err;
464} 463}
465 464
465static long local_allocate_threshold_blocks(void *_bank)
466{
467 unsigned int *bank = _bank;
468
469 return allocate_threshold_blocks(smp_processor_id(), *bank, 0,
470 MSR_IA32_MC0_MISC + *bank * 4);
471}
472
466/* symlinks sibling shared banks to first core. first core owns dir/files. */ 473/* symlinks sibling shared banks to first core. first core owns dir/files. */
467static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) 474static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
468{ 475{
469 int i, err = 0; 476 int i, err = 0;
470 struct threshold_bank *b = NULL; 477 struct threshold_bank *b = NULL;
471 cpumask_t oldmask, newmask;
472 char name[32]; 478 char name[32];
473 479
474 sprintf(name, "threshold_bank%i", bank); 480 sprintf(name, "threshold_bank%i", bank);
@@ -519,11 +525,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
519 525
520 per_cpu(threshold_banks, cpu)[bank] = b; 526 per_cpu(threshold_banks, cpu)[bank] = b;
521 527
522 affinity_set(cpu, &oldmask, &newmask); 528 err = work_on_cpu(cpu, local_allocate_threshold_blocks, &bank);
523 err = allocate_threshold_blocks(cpu, bank, 0,
524 MSR_IA32_MC0_MISC + bank * 4);
525 affinity_restore(&oldmask);
526
527 if (err) 529 if (err)
528 goto out_free; 530 goto out_free;
529 531
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index c0262791bda4..34185488e4fb 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
30 return 1; 30 return 1;
31} 31}
32 32
33static cpumask_t flat_target_cpus(void) 33static const struct cpumask *flat_target_cpus(void)
34{ 34{
35 return cpu_online_map; 35 return cpu_online_mask;
36} 36}
37 37
38static cpumask_t flat_vector_allocation_domain(int cpu) 38static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
39{ 39{
40 /* Careful. Some cpus do not strictly honor the set of cpus 40 /* Careful. Some cpus do not strictly honor the set of cpus
41 * specified in the interrupt destination when using lowest 41 * specified in the interrupt destination when using lowest
@@ -45,8 +45,8 @@ static cpumask_t flat_vector_allocation_domain(int cpu)
45 * deliver interrupts to the wrong hyperthread when only one 45 * deliver interrupts to the wrong hyperthread when only one
46 * hyperthread was specified in the interrupt desitination. 46 * hyperthread was specified in the interrupt desitination.
47 */ 47 */
48 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 48 cpumask_clear(retmask);
49 return domain; 49 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
50} 50}
51 51
52/* 52/*
@@ -69,9 +69,8 @@ static void flat_init_apic_ldr(void)
69 apic_write(APIC_LDR, val); 69 apic_write(APIC_LDR, val);
70} 70}
71 71
72static void flat_send_IPI_mask(cpumask_t cpumask, int vector) 72static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
73{ 73{
74 unsigned long mask = cpus_addr(cpumask)[0];
75 unsigned long flags; 74 unsigned long flags;
76 75
77 local_irq_save(flags); 76 local_irq_save(flags);
@@ -79,20 +78,41 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
79 local_irq_restore(flags); 78 local_irq_restore(flags);
80} 79}
81 80
81static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
82{
83 unsigned long mask = cpumask_bits(cpumask)[0];
84
85 _flat_send_IPI_mask(mask, vector);
86}
87
88static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
89 int vector)
90{
91 unsigned long mask = cpumask_bits(cpumask)[0];
92 int cpu = smp_processor_id();
93
94 if (cpu < BITS_PER_LONG)
95 clear_bit(cpu, &mask);
96 _flat_send_IPI_mask(mask, vector);
97}
98
82static void flat_send_IPI_allbutself(int vector) 99static void flat_send_IPI_allbutself(int vector)
83{ 100{
101 int cpu = smp_processor_id();
84#ifdef CONFIG_HOTPLUG_CPU 102#ifdef CONFIG_HOTPLUG_CPU
85 int hotplug = 1; 103 int hotplug = 1;
86#else 104#else
87 int hotplug = 0; 105 int hotplug = 0;
88#endif 106#endif
89 if (hotplug || vector == NMI_VECTOR) { 107 if (hotplug || vector == NMI_VECTOR) {
90 cpumask_t allbutme = cpu_online_map; 108 if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
109 unsigned long mask = cpumask_bits(cpu_online_mask)[0];
91 110
92 cpu_clear(smp_processor_id(), allbutme); 111 if (cpu < BITS_PER_LONG)
112 clear_bit(cpu, &mask);
93 113
94 if (!cpus_empty(allbutme)) 114 _flat_send_IPI_mask(mask, vector);
95 flat_send_IPI_mask(allbutme, vector); 115 }
96 } else if (num_online_cpus() > 1) { 116 } else if (num_online_cpus() > 1) {
97 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); 117 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
98 } 118 }
@@ -101,7 +121,7 @@ static void flat_send_IPI_allbutself(int vector)
101static void flat_send_IPI_all(int vector) 121static void flat_send_IPI_all(int vector)
102{ 122{
103 if (vector == NMI_VECTOR) 123 if (vector == NMI_VECTOR)
104 flat_send_IPI_mask(cpu_online_map, vector); 124 flat_send_IPI_mask(cpu_online_mask, vector);
105 else 125 else
106 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); 126 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
107} 127}
@@ -135,9 +155,18 @@ static int flat_apic_id_registered(void)
135 return physid_isset(read_xapic_id(), phys_cpu_present_map); 155 return physid_isset(read_xapic_id(), phys_cpu_present_map);
136} 156}
137 157
138static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) 158static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
159{
160 return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
161}
162
163static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
164 const struct cpumask *andmask)
139{ 165{
140 return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; 166 unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
167 unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS;
168
169 return mask1 & mask2;
141} 170}
142 171
143static unsigned int phys_pkg_id(int index_msb) 172static unsigned int phys_pkg_id(int index_msb)
@@ -157,8 +186,10 @@ struct genapic apic_flat = {
157 .send_IPI_all = flat_send_IPI_all, 186 .send_IPI_all = flat_send_IPI_all,
158 .send_IPI_allbutself = flat_send_IPI_allbutself, 187 .send_IPI_allbutself = flat_send_IPI_allbutself,
159 .send_IPI_mask = flat_send_IPI_mask, 188 .send_IPI_mask = flat_send_IPI_mask,
189 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
160 .send_IPI_self = apic_send_IPI_self, 190 .send_IPI_self = apic_send_IPI_self,
161 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, 191 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
192 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
162 .phys_pkg_id = phys_pkg_id, 193 .phys_pkg_id = phys_pkg_id,
163 .get_apic_id = get_apic_id, 194 .get_apic_id = get_apic_id,
164 .set_apic_id = set_apic_id, 195 .set_apic_id = set_apic_id,
@@ -188,35 +219,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
188 return 0; 219 return 0;
189} 220}
190 221
191static cpumask_t physflat_target_cpus(void) 222static const struct cpumask *physflat_target_cpus(void)
192{ 223{
193 return cpu_online_map; 224 return cpu_online_mask;
194} 225}
195 226
196static cpumask_t physflat_vector_allocation_domain(int cpu) 227static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
197{ 228{
198 return cpumask_of_cpu(cpu); 229 cpumask_clear(retmask);
230 cpumask_set_cpu(cpu, retmask);
199} 231}
200 232
201static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) 233static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
202{ 234{
203 send_IPI_mask_sequence(cpumask, vector); 235 send_IPI_mask_sequence(cpumask, vector);
204} 236}
205 237
206static void physflat_send_IPI_allbutself(int vector) 238static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
239 int vector)
207{ 240{
208 cpumask_t allbutme = cpu_online_map; 241 send_IPI_mask_allbutself(cpumask, vector);
242}
209 243
210 cpu_clear(smp_processor_id(), allbutme); 244static void physflat_send_IPI_allbutself(int vector)
211 physflat_send_IPI_mask(allbutme, vector); 245{
246 send_IPI_mask_allbutself(cpu_online_mask, vector);
212} 247}
213 248
214static void physflat_send_IPI_all(int vector) 249static void physflat_send_IPI_all(int vector)
215{ 250{
216 physflat_send_IPI_mask(cpu_online_map, vector); 251 physflat_send_IPI_mask(cpu_online_mask, vector);
217} 252}
218 253
219static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) 254static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
220{ 255{
221 int cpu; 256 int cpu;
222 257
@@ -224,13 +259,31 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
224 * We're using fixed IRQ delivery, can only return one phys APIC ID. 259 * We're using fixed IRQ delivery, can only return one phys APIC ID.
225 * May as well be the first. 260 * May as well be the first.
226 */ 261 */
227 cpu = first_cpu(cpumask); 262 cpu = cpumask_first(cpumask);
228 if ((unsigned)cpu < nr_cpu_ids) 263 if ((unsigned)cpu < nr_cpu_ids)
229 return per_cpu(x86_cpu_to_apicid, cpu); 264 return per_cpu(x86_cpu_to_apicid, cpu);
230 else 265 else
231 return BAD_APICID; 266 return BAD_APICID;
232} 267}
233 268
269static unsigned int
270physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
271 const struct cpumask *andmask)
272{
273 int cpu;
274
275 /*
276 * We're using fixed IRQ delivery, can only return one phys APIC ID.
277 * May as well be the first.
278 */
279 for_each_cpu_and(cpu, cpumask, andmask)
280 if (cpumask_test_cpu(cpu, cpu_online_mask))
281 break;
282 if (cpu < nr_cpu_ids)
283 return per_cpu(x86_cpu_to_apicid, cpu);
284 return BAD_APICID;
285}
286
234struct genapic apic_physflat = { 287struct genapic apic_physflat = {
235 .name = "physical flat", 288 .name = "physical flat",
236 .acpi_madt_oem_check = physflat_acpi_madt_oem_check, 289 .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
@@ -243,8 +296,10 @@ struct genapic apic_physflat = {
243 .send_IPI_all = physflat_send_IPI_all, 296 .send_IPI_all = physflat_send_IPI_all,
244 .send_IPI_allbutself = physflat_send_IPI_allbutself, 297 .send_IPI_allbutself = physflat_send_IPI_allbutself,
245 .send_IPI_mask = physflat_send_IPI_mask, 298 .send_IPI_mask = physflat_send_IPI_mask,
299 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
246 .send_IPI_self = apic_send_IPI_self, 300 .send_IPI_self = apic_send_IPI_self,
247 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, 301 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
302 .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
248 .phys_pkg_id = phys_pkg_id, 303 .phys_pkg_id = phys_pkg_id,
249 .get_apic_id = get_apic_id, 304 .get_apic_id = get_apic_id,
250 .set_apic_id = set_apic_id, 305 .set_apic_id = set_apic_id,
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c
index f6a2c8eb48a6..6ce497cc372d 100644
--- a/arch/x86/kernel/genx2apic_cluster.c
+++ b/arch/x86/kernel/genx2apic_cluster.c
@@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
22 22
23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
24 24
25static cpumask_t x2apic_target_cpus(void) 25static const struct cpumask *x2apic_target_cpus(void)
26{ 26{
27 return cpumask_of_cpu(0); 27 return cpumask_of(0);
28} 28}
29 29
30/* 30/*
31 * for now each logical cpu is in its own vector allocation domain. 31 * for now each logical cpu is in its own vector allocation domain.
32 */ 32 */
33static cpumask_t x2apic_vector_allocation_domain(int cpu) 33static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
34{ 34{
35 cpumask_t domain = CPU_MASK_NONE; 35 cpumask_clear(retmask);
36 cpu_set(cpu, domain); 36 cpumask_set_cpu(cpu, retmask);
37 return domain;
38} 37}
39 38
40static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 39static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
@@ -56,32 +55,53 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
56 * at once. We have 16 cpu's in a cluster. This will minimize IPI register 55 * at once. We have 16 cpu's in a cluster. This will minimize IPI register
57 * writes. 56 * writes.
58 */ 57 */
59static void x2apic_send_IPI_mask(cpumask_t mask, int vector) 58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
60{ 59{
61 unsigned long flags; 60 unsigned long flags;
62 unsigned long query_cpu; 61 unsigned long query_cpu;
63 62
64 local_irq_save(flags); 63 local_irq_save(flags);
65 for_each_cpu_mask(query_cpu, mask) { 64 for_each_cpu(query_cpu, mask)
66 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), 65 __x2apic_send_IPI_dest(
67 vector, APIC_DEST_LOGICAL); 66 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
68 } 67 vector, APIC_DEST_LOGICAL);
69 local_irq_restore(flags); 68 local_irq_restore(flags);
70} 69}
71 70
72static void x2apic_send_IPI_allbutself(int vector) 71static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
72 int vector)
73{ 73{
74 cpumask_t mask = cpu_online_map; 74 unsigned long flags;
75 unsigned long query_cpu;
76 unsigned long this_cpu = smp_processor_id();
75 77
76 cpu_clear(smp_processor_id(), mask); 78 local_irq_save(flags);
79 for_each_cpu(query_cpu, mask)
80 if (query_cpu != this_cpu)
81 __x2apic_send_IPI_dest(
82 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
83 vector, APIC_DEST_LOGICAL);
84 local_irq_restore(flags);
85}
86
87static void x2apic_send_IPI_allbutself(int vector)
88{
89 unsigned long flags;
90 unsigned long query_cpu;
91 unsigned long this_cpu = smp_processor_id();
77 92
78 if (!cpus_empty(mask)) 93 local_irq_save(flags);
79 x2apic_send_IPI_mask(mask, vector); 94 for_each_online_cpu(query_cpu)
95 if (query_cpu != this_cpu)
96 __x2apic_send_IPI_dest(
97 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
98 vector, APIC_DEST_LOGICAL);
99 local_irq_restore(flags);
80} 100}
81 101
82static void x2apic_send_IPI_all(int vector) 102static void x2apic_send_IPI_all(int vector)
83{ 103{
84 x2apic_send_IPI_mask(cpu_online_map, vector); 104 x2apic_send_IPI_mask(cpu_online_mask, vector);
85} 105}
86 106
87static int x2apic_apic_id_registered(void) 107static int x2apic_apic_id_registered(void)
@@ -89,21 +109,38 @@ static int x2apic_apic_id_registered(void)
89 return 1; 109 return 1;
90} 110}
91 111
92static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) 112static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
93{ 113{
94 int cpu; 114 int cpu;
95 115
96 /* 116 /*
97 * We're using fixed IRQ delivery, can only return one phys APIC ID. 117 * We're using fixed IRQ delivery, can only return one logical APIC ID.
98 * May as well be the first. 118 * May as well be the first.
99 */ 119 */
100 cpu = first_cpu(cpumask); 120 cpu = cpumask_first(cpumask);
101 if ((unsigned)cpu < NR_CPUS) 121 if ((unsigned)cpu < nr_cpu_ids)
102 return per_cpu(x86_cpu_to_logical_apicid, cpu); 122 return per_cpu(x86_cpu_to_logical_apicid, cpu);
103 else 123 else
104 return BAD_APICID; 124 return BAD_APICID;
105} 125}
106 126
127static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
128 const struct cpumask *andmask)
129{
130 int cpu;
131
132 /*
133 * We're using fixed IRQ delivery, can only return one logical APIC ID.
134 * May as well be the first.
135 */
136 for_each_cpu_and(cpu, cpumask, andmask)
137 if (cpumask_test_cpu(cpu, cpu_online_mask))
138 break;
139 if (cpu < nr_cpu_ids)
140 return per_cpu(x86_cpu_to_logical_apicid, cpu);
141 return BAD_APICID;
142}
143
107static unsigned int get_apic_id(unsigned long x) 144static unsigned int get_apic_id(unsigned long x)
108{ 145{
109 unsigned int id; 146 unsigned int id;
@@ -150,8 +187,10 @@ struct genapic apic_x2apic_cluster = {
150 .send_IPI_all = x2apic_send_IPI_all, 187 .send_IPI_all = x2apic_send_IPI_all,
151 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 188 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
152 .send_IPI_mask = x2apic_send_IPI_mask, 189 .send_IPI_mask = x2apic_send_IPI_mask,
190 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
153 .send_IPI_self = x2apic_send_IPI_self, 191 .send_IPI_self = x2apic_send_IPI_self,
154 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 192 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
193 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
155 .phys_pkg_id = phys_pkg_id, 194 .phys_pkg_id = phys_pkg_id,
156 .get_apic_id = get_apic_id, 195 .get_apic_id = get_apic_id,
157 .set_apic_id = set_apic_id, 196 .set_apic_id = set_apic_id,
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c
index d042211768b7..62895cf315ff 100644
--- a/arch/x86/kernel/genx2apic_phys.c
+++ b/arch/x86/kernel/genx2apic_phys.c
@@ -29,16 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
29 29
30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
31 31
32static cpumask_t x2apic_target_cpus(void) 32static const struct cpumask *x2apic_target_cpus(void)
33{ 33{
34 return cpumask_of_cpu(0); 34 return cpumask_of(0);
35} 35}
36 36
37static cpumask_t x2apic_vector_allocation_domain(int cpu) 37static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
38{ 38{
39 cpumask_t domain = CPU_MASK_NONE; 39 cpumask_clear(retmask);
40 cpu_set(cpu, domain); 40 cpumask_set_cpu(cpu, retmask);
41 return domain;
42} 41}
43 42
44static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 43static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
@@ -54,32 +53,54 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
54 x2apic_icr_write(cfg, apicid); 53 x2apic_icr_write(cfg, apicid);
55} 54}
56 55
57static void x2apic_send_IPI_mask(cpumask_t mask, int vector) 56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
58{ 57{
59 unsigned long flags; 58 unsigned long flags;
60 unsigned long query_cpu; 59 unsigned long query_cpu;
61 60
62 local_irq_save(flags); 61 local_irq_save(flags);
63 for_each_cpu_mask(query_cpu, mask) { 62 for_each_cpu(query_cpu, mask) {
64 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), 63 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
65 vector, APIC_DEST_PHYSICAL); 64 vector, APIC_DEST_PHYSICAL);
66 } 65 }
67 local_irq_restore(flags); 66 local_irq_restore(flags);
68} 67}
69 68
70static void x2apic_send_IPI_allbutself(int vector) 69static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
70 int vector)
71{ 71{
72 cpumask_t mask = cpu_online_map; 72 unsigned long flags;
73 unsigned long query_cpu;
74 unsigned long this_cpu = smp_processor_id();
75
76 local_irq_save(flags);
77 for_each_cpu(query_cpu, mask) {
78 if (query_cpu != this_cpu)
79 __x2apic_send_IPI_dest(
80 per_cpu(x86_cpu_to_apicid, query_cpu),
81 vector, APIC_DEST_PHYSICAL);
82 }
83 local_irq_restore(flags);
84}
73 85
74 cpu_clear(smp_processor_id(), mask); 86static void x2apic_send_IPI_allbutself(int vector)
87{
88 unsigned long flags;
89 unsigned long query_cpu;
90 unsigned long this_cpu = smp_processor_id();
75 91
76 if (!cpus_empty(mask)) 92 local_irq_save(flags);
77 x2apic_send_IPI_mask(mask, vector); 93 for_each_online_cpu(query_cpu)
94 if (query_cpu != this_cpu)
95 __x2apic_send_IPI_dest(
96 per_cpu(x86_cpu_to_apicid, query_cpu),
97 vector, APIC_DEST_PHYSICAL);
98 local_irq_restore(flags);
78} 99}
79 100
80static void x2apic_send_IPI_all(int vector) 101static void x2apic_send_IPI_all(int vector)
81{ 102{
82 x2apic_send_IPI_mask(cpu_online_map, vector); 103 x2apic_send_IPI_mask(cpu_online_mask, vector);
83} 104}
84 105
85static int x2apic_apic_id_registered(void) 106static int x2apic_apic_id_registered(void)
@@ -87,7 +108,7 @@ static int x2apic_apic_id_registered(void)
87 return 1; 108 return 1;
88} 109}
89 110
90static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) 111static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
91{ 112{
92 int cpu; 113 int cpu;
93 114
@@ -95,13 +116,30 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
95 * We're using fixed IRQ delivery, can only return one phys APIC ID. 116 * We're using fixed IRQ delivery, can only return one phys APIC ID.
96 * May as well be the first. 117 * May as well be the first.
97 */ 118 */
98 cpu = first_cpu(cpumask); 119 cpu = cpumask_first(cpumask);
99 if ((unsigned)cpu < NR_CPUS) 120 if ((unsigned)cpu < nr_cpu_ids)
100 return per_cpu(x86_cpu_to_apicid, cpu); 121 return per_cpu(x86_cpu_to_apicid, cpu);
101 else 122 else
102 return BAD_APICID; 123 return BAD_APICID;
103} 124}
104 125
126static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
127 const struct cpumask *andmask)
128{
129 int cpu;
130
131 /*
132 * We're using fixed IRQ delivery, can only return one phys APIC ID.
133 * May as well be the first.
134 */
135 for_each_cpu_and(cpu, cpumask, andmask)
136 if (cpumask_test_cpu(cpu, cpu_online_mask))
137 break;
138 if (cpu < nr_cpu_ids)
139 return per_cpu(x86_cpu_to_apicid, cpu);
140 return BAD_APICID;
141}
142
105static unsigned int get_apic_id(unsigned long x) 143static unsigned int get_apic_id(unsigned long x)
106{ 144{
107 unsigned int id; 145 unsigned int id;
@@ -145,8 +183,10 @@ struct genapic apic_x2apic_phys = {
145 .send_IPI_all = x2apic_send_IPI_all, 183 .send_IPI_all = x2apic_send_IPI_all,
146 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 184 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
147 .send_IPI_mask = x2apic_send_IPI_mask, 185 .send_IPI_mask = x2apic_send_IPI_mask,
186 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
148 .send_IPI_self = x2apic_send_IPI_self, 187 .send_IPI_self = x2apic_send_IPI_self,
149 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 188 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
189 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
150 .phys_pkg_id = phys_pkg_id, 190 .phys_pkg_id = phys_pkg_id,
151 .get_apic_id = get_apic_id, 191 .get_apic_id = get_apic_id,
152 .set_apic_id = set_apic_id, 192 .set_apic_id = set_apic_id,
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index dece17289731..b193e082f6ce 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -79,16 +79,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
79 79
80/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 80/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
81 81
82static cpumask_t uv_target_cpus(void) 82static const struct cpumask *uv_target_cpus(void)
83{ 83{
84 return cpumask_of_cpu(0); 84 return cpumask_of(0);
85} 85}
86 86
87static cpumask_t uv_vector_allocation_domain(int cpu) 87static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
88{ 88{
89 cpumask_t domain = CPU_MASK_NONE; 89 cpumask_clear(retmask);
90 cpu_set(cpu, domain); 90 cpumask_set_cpu(cpu, retmask);
91 return domain;
92} 91}
93 92
94int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) 93int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
@@ -127,28 +126,37 @@ static void uv_send_IPI_one(int cpu, int vector)
127 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 126 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
128} 127}
129 128
130static void uv_send_IPI_mask(cpumask_t mask, int vector) 129static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
131{ 130{
132 unsigned int cpu; 131 unsigned int cpu;
133 132
134 for_each_possible_cpu(cpu) 133 for_each_cpu(cpu, mask)
135 if (cpu_isset(cpu, mask)) 134 uv_send_IPI_one(cpu, vector);
135}
136
137static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
138{
139 unsigned int cpu;
140 unsigned int this_cpu = smp_processor_id();
141
142 for_each_cpu(cpu, mask)
143 if (cpu != this_cpu)
136 uv_send_IPI_one(cpu, vector); 144 uv_send_IPI_one(cpu, vector);
137} 145}
138 146
139static void uv_send_IPI_allbutself(int vector) 147static void uv_send_IPI_allbutself(int vector)
140{ 148{
141 cpumask_t mask = cpu_online_map; 149 unsigned int cpu;
142 150 unsigned int this_cpu = smp_processor_id();
143 cpu_clear(smp_processor_id(), mask);
144 151
145 if (!cpus_empty(mask)) 152 for_each_online_cpu(cpu)
146 uv_send_IPI_mask(mask, vector); 153 if (cpu != this_cpu)
154 uv_send_IPI_one(cpu, vector);
147} 155}
148 156
149static void uv_send_IPI_all(int vector) 157static void uv_send_IPI_all(int vector)
150{ 158{
151 uv_send_IPI_mask(cpu_online_map, vector); 159 uv_send_IPI_mask(cpu_online_mask, vector);
152} 160}
153 161
154static int uv_apic_id_registered(void) 162static int uv_apic_id_registered(void)
@@ -160,7 +168,7 @@ static void uv_init_apic_ldr(void)
160{ 168{
161} 169}
162 170
163static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) 171static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
164{ 172{
165 int cpu; 173 int cpu;
166 174
@@ -168,13 +176,30 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
168 * We're using fixed IRQ delivery, can only return one phys APIC ID. 176 * We're using fixed IRQ delivery, can only return one phys APIC ID.
169 * May as well be the first. 177 * May as well be the first.
170 */ 178 */
171 cpu = first_cpu(cpumask); 179 cpu = cpumask_first(cpumask);
172 if ((unsigned)cpu < nr_cpu_ids) 180 if ((unsigned)cpu < nr_cpu_ids)
173 return per_cpu(x86_cpu_to_apicid, cpu); 181 return per_cpu(x86_cpu_to_apicid, cpu);
174 else 182 else
175 return BAD_APICID; 183 return BAD_APICID;
176} 184}
177 185
186static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
187 const struct cpumask *andmask)
188{
189 int cpu;
190
191 /*
192 * We're using fixed IRQ delivery, can only return one phys APIC ID.
193 * May as well be the first.
194 */
195 for_each_cpu_and(cpu, cpumask, andmask)
196 if (cpumask_test_cpu(cpu, cpu_online_mask))
197 break;
198 if (cpu < nr_cpu_ids)
199 return per_cpu(x86_cpu_to_apicid, cpu);
200 return BAD_APICID;
201}
202
178static unsigned int get_apic_id(unsigned long x) 203static unsigned int get_apic_id(unsigned long x)
179{ 204{
180 unsigned int id; 205 unsigned int id;
@@ -222,8 +247,10 @@ struct genapic apic_x2apic_uv_x = {
222 .send_IPI_all = uv_send_IPI_all, 247 .send_IPI_all = uv_send_IPI_all,
223 .send_IPI_allbutself = uv_send_IPI_allbutself, 248 .send_IPI_allbutself = uv_send_IPI_allbutself,
224 .send_IPI_mask = uv_send_IPI_mask, 249 .send_IPI_mask = uv_send_IPI_mask,
250 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
225 .send_IPI_self = uv_send_IPI_self, 251 .send_IPI_self = uv_send_IPI_self,
226 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, 252 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
253 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
227 .phys_pkg_id = phys_pkg_id, 254 .phys_pkg_id = phys_pkg_id,
228 .get_apic_id = get_apic_id, 255 .get_apic_id = get_apic_id,
229 .set_apic_id = set_apic_id, 256 .set_apic_id = set_apic_id,
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index e7745961ed31..3e070bb961d7 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -136,8 +136,8 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
136 136
137struct irq_cfg { 137struct irq_cfg {
138 struct irq_pin_list *irq_2_pin; 138 struct irq_pin_list *irq_2_pin;
139 cpumask_t domain; 139 cpumask_var_t domain;
140 cpumask_t old_domain; 140 cpumask_var_t old_domain;
141 unsigned move_cleanup_count; 141 unsigned move_cleanup_count;
142 u8 vector; 142 u8 vector;
143 u8 move_in_progress : 1; 143 u8 move_in_progress : 1;
@@ -152,22 +152,22 @@ static struct irq_cfg irq_cfgx[] = {
152#else 152#else
153static struct irq_cfg irq_cfgx[NR_IRQS] = { 153static struct irq_cfg irq_cfgx[NR_IRQS] = {
154#endif 154#endif
155 [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, 155 [0] = { .vector = IRQ0_VECTOR, },
156 [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, 156 [1] = { .vector = IRQ1_VECTOR, },
157 [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, 157 [2] = { .vector = IRQ2_VECTOR, },
158 [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, 158 [3] = { .vector = IRQ3_VECTOR, },
159 [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, 159 [4] = { .vector = IRQ4_VECTOR, },
160 [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, 160 [5] = { .vector = IRQ5_VECTOR, },
161 [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, 161 [6] = { .vector = IRQ6_VECTOR, },
162 [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, 162 [7] = { .vector = IRQ7_VECTOR, },
163 [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, 163 [8] = { .vector = IRQ8_VECTOR, },
164 [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, 164 [9] = { .vector = IRQ9_VECTOR, },
165 [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, 165 [10] = { .vector = IRQ10_VECTOR, },
166 [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, 166 [11] = { .vector = IRQ11_VECTOR, },
167 [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, 167 [12] = { .vector = IRQ12_VECTOR, },
168 [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, 168 [13] = { .vector = IRQ13_VECTOR, },
169 [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, 169 [14] = { .vector = IRQ14_VECTOR, },
170 [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, 170 [15] = { .vector = IRQ15_VECTOR, },
171}; 171};
172 172
173void __init arch_early_irq_init(void) 173void __init arch_early_irq_init(void)
@@ -183,6 +183,10 @@ void __init arch_early_irq_init(void)
183 for (i = 0; i < count; i++) { 183 for (i = 0; i < count; i++) {
184 desc = irq_to_desc(i); 184 desc = irq_to_desc(i);
185 desc->chip_data = &cfg[i]; 185 desc->chip_data = &cfg[i];
186 alloc_bootmem_cpumask_var(&cfg[i].domain);
187 alloc_bootmem_cpumask_var(&cfg[i].old_domain);
188 if (i < NR_IRQS_LEGACY)
189 cpumask_setall(cfg[i].domain);
186 } 190 }
187} 191}
188 192
@@ -207,6 +211,20 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu)
207 node = cpu_to_node(cpu); 211 node = cpu_to_node(cpu);
208 212
209 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 213 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
214 if (cfg) {
215 /* FIXME: needs alloc_cpumask_var_node() */
216 if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) {
217 kfree(cfg);
218 cfg = NULL;
219 } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) {
220 free_cpumask_var(cfg->domain);
221 kfree(cfg);
222 cfg = NULL;
223 } else {
224 cpumask_clear(cfg->domain);
225 cpumask_clear(cfg->old_domain);
226 }
227 }
210 printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); 228 printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
211 229
212 return cfg; 230 return cfg;
@@ -329,13 +347,14 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
329 } 347 }
330} 348}
331 349
332static void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) 350static void
351set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
333{ 352{
334 struct irq_cfg *cfg = desc->chip_data; 353 struct irq_cfg *cfg = desc->chip_data;
335 354
336 if (!cfg->move_in_progress) { 355 if (!cfg->move_in_progress) {
337 /* it means that domain is not changed */ 356 /* it means that domain is not changed */
338 if (!cpus_intersects(desc->affinity, mask)) 357 if (!cpumask_intersects(&desc->affinity, mask))
339 cfg->move_desc_pending = 1; 358 cfg->move_desc_pending = 1;
340 } 359 }
341} 360}
@@ -350,7 +369,8 @@ static struct irq_cfg *irq_cfg(unsigned int irq)
350#endif 369#endif
351 370
352#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC 371#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
353static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) 372static inline void
373set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
354{ 374{
355} 375}
356#endif 376#endif
@@ -481,6 +501,26 @@ static void ioapic_mask_entry(int apic, int pin)
481} 501}
482 502
483#ifdef CONFIG_SMP 503#ifdef CONFIG_SMP
504static void send_cleanup_vector(struct irq_cfg *cfg)
505{
506 cpumask_var_t cleanup_mask;
507
508 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
509 unsigned int i;
510 cfg->move_cleanup_count = 0;
511 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
512 cfg->move_cleanup_count++;
513 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
514 send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
515 } else {
516 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
517 cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
518 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
519 free_cpumask_var(cleanup_mask);
520 }
521 cfg->move_in_progress = 0;
522}
523
484static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 524static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
485{ 525{
486 int apic, pin; 526 int apic, pin;
@@ -516,48 +556,61 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
516 } 556 }
517} 557}
518 558
519static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); 559static int
560assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
520 561
521static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) 562/*
563 * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid
564 * of that, or returns BAD_APICID and leaves desc->affinity untouched.
565 */
566static unsigned int
567set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
522{ 568{
523 struct irq_cfg *cfg; 569 struct irq_cfg *cfg;
524 unsigned long flags;
525 unsigned int dest;
526 cpumask_t tmp;
527 unsigned int irq; 570 unsigned int irq;
528 571
529 cpus_and(tmp, mask, cpu_online_map); 572 if (!cpumask_intersects(mask, cpu_online_mask))
530 if (cpus_empty(tmp)) 573 return BAD_APICID;
531 return;
532 574
533 irq = desc->irq; 575 irq = desc->irq;
534 cfg = desc->chip_data; 576 cfg = desc->chip_data;
535 if (assign_irq_vector(irq, cfg, mask)) 577 if (assign_irq_vector(irq, cfg, mask))
536 return; 578 return BAD_APICID;
537 579
580 cpumask_and(&desc->affinity, cfg->domain, mask);
538 set_extra_move_desc(desc, mask); 581 set_extra_move_desc(desc, mask);
582 return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
583}
539 584
540 cpus_and(tmp, cfg->domain, mask); 585static void
541 dest = cpu_mask_to_apicid(tmp); 586set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
542 /* 587{
543 * Only the high 8 bits are valid. 588 struct irq_cfg *cfg;
544 */ 589 unsigned long flags;
545 dest = SET_APIC_LOGICAL_ID(dest); 590 unsigned int dest;
591 unsigned int irq;
592
593 irq = desc->irq;
594 cfg = desc->chip_data;
546 595
547 spin_lock_irqsave(&ioapic_lock, flags); 596 spin_lock_irqsave(&ioapic_lock, flags);
548 __target_IO_APIC_irq(irq, dest, cfg); 597 dest = set_desc_affinity(desc, mask);
549 desc->affinity = mask; 598 if (dest != BAD_APICID) {
599 /* Only the high 8 bits are valid. */
600 dest = SET_APIC_LOGICAL_ID(dest);
601 __target_IO_APIC_irq(irq, dest, cfg);
602 }
550 spin_unlock_irqrestore(&ioapic_lock, flags); 603 spin_unlock_irqrestore(&ioapic_lock, flags);
551} 604}
552 605
553static void set_ioapic_affinity_irq(unsigned int irq, 606static void
554 const struct cpumask *mask) 607set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
555{ 608{
556 struct irq_desc *desc; 609 struct irq_desc *desc;
557 610
558 desc = irq_to_desc(irq); 611 desc = irq_to_desc(irq);
559 612
560 set_ioapic_affinity_irq_desc(desc, *mask); 613 set_ioapic_affinity_irq_desc(desc, mask);
561} 614}
562#endif /* CONFIG_SMP */ 615#endif /* CONFIG_SMP */
563 616
@@ -1219,7 +1272,8 @@ void unlock_vector_lock(void)
1219 spin_unlock(&vector_lock); 1272 spin_unlock(&vector_lock);
1220} 1273}
1221 1274
1222static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) 1275static int
1276__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1223{ 1277{
1224 /* 1278 /*
1225 * NOTE! The local APIC isn't very good at handling 1279 * NOTE! The local APIC isn't very good at handling
@@ -1234,49 +1288,49 @@ static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
1234 */ 1288 */
1235 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1289 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1236 unsigned int old_vector; 1290 unsigned int old_vector;
1237 int cpu; 1291 int cpu, err;
1292 cpumask_var_t tmp_mask;
1238 1293
1239 if ((cfg->move_in_progress) || cfg->move_cleanup_count) 1294 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1240 return -EBUSY; 1295 return -EBUSY;
1241 1296
1242 /* Only try and allocate irqs on cpus that are present */ 1297 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1243 cpus_and(mask, mask, cpu_online_map); 1298 return -ENOMEM;
1244 1299
1245 old_vector = cfg->vector; 1300 old_vector = cfg->vector;
1246 if (old_vector) { 1301 if (old_vector) {
1247 cpumask_t tmp; 1302 cpumask_and(tmp_mask, mask, cpu_online_mask);
1248 cpus_and(tmp, cfg->domain, mask); 1303 cpumask_and(tmp_mask, cfg->domain, tmp_mask);
1249 if (!cpus_empty(tmp)) 1304 if (!cpumask_empty(tmp_mask)) {
1305 free_cpumask_var(tmp_mask);
1250 return 0; 1306 return 0;
1307 }
1251 } 1308 }
1252 1309
1253 for_each_cpu_mask_nr(cpu, mask) { 1310 /* Only try and allocate irqs on cpus that are present */
1254 cpumask_t domain, new_mask; 1311 err = -ENOSPC;
1312 for_each_cpu_and(cpu, mask, cpu_online_mask) {
1255 int new_cpu; 1313 int new_cpu;
1256 int vector, offset; 1314 int vector, offset;
1257 1315
1258 domain = vector_allocation_domain(cpu); 1316 vector_allocation_domain(cpu, tmp_mask);
1259 cpus_and(new_mask, domain, cpu_online_map);
1260 1317
1261 vector = current_vector; 1318 vector = current_vector;
1262 offset = current_offset; 1319 offset = current_offset;
1263next: 1320next:
1264 vector += 8; 1321 vector += 8;
1265 if (vector >= first_system_vector) { 1322 if (vector >= first_system_vector) {
1266 /* If we run out of vectors on large boxen, must share them. */ 1323 /* If out of vectors on large boxen, must share them. */
1267 offset = (offset + 1) % 8; 1324 offset = (offset + 1) % 8;
1268 vector = FIRST_DEVICE_VECTOR + offset; 1325 vector = FIRST_DEVICE_VECTOR + offset;
1269 } 1326 }
1270 if (unlikely(current_vector == vector)) 1327 if (unlikely(current_vector == vector))
1271 continue; 1328 continue;
1272#ifdef CONFIG_X86_64 1329
1273 if (vector == IA32_SYSCALL_VECTOR) 1330 if (test_bit(vector, used_vectors))
1274 goto next;
1275#else
1276 if (vector == SYSCALL_VECTOR)
1277 goto next; 1331 goto next;
1278#endif 1332
1279 for_each_cpu_mask_nr(new_cpu, new_mask) 1333 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1280 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1334 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1281 goto next; 1335 goto next;
1282 /* Found one! */ 1336 /* Found one! */
@@ -1284,18 +1338,21 @@ next:
1284 current_offset = offset; 1338 current_offset = offset;
1285 if (old_vector) { 1339 if (old_vector) {
1286 cfg->move_in_progress = 1; 1340 cfg->move_in_progress = 1;
1287 cfg->old_domain = cfg->domain; 1341 cpumask_copy(cfg->old_domain, cfg->domain);
1288 } 1342 }
1289 for_each_cpu_mask_nr(new_cpu, new_mask) 1343 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1290 per_cpu(vector_irq, new_cpu)[vector] = irq; 1344 per_cpu(vector_irq, new_cpu)[vector] = irq;
1291 cfg->vector = vector; 1345 cfg->vector = vector;
1292 cfg->domain = domain; 1346 cpumask_copy(cfg->domain, tmp_mask);
1293 return 0; 1347 err = 0;
1348 break;
1294 } 1349 }
1295 return -ENOSPC; 1350 free_cpumask_var(tmp_mask);
1351 return err;
1296} 1352}
1297 1353
1298static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) 1354static int
1355assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1299{ 1356{
1300 int err; 1357 int err;
1301 unsigned long flags; 1358 unsigned long flags;
@@ -1308,23 +1365,20 @@ static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
1308 1365
1309static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1366static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1310{ 1367{
1311 cpumask_t mask;
1312 int cpu, vector; 1368 int cpu, vector;
1313 1369
1314 BUG_ON(!cfg->vector); 1370 BUG_ON(!cfg->vector);
1315 1371
1316 vector = cfg->vector; 1372 vector = cfg->vector;
1317 cpus_and(mask, cfg->domain, cpu_online_map); 1373 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1318 for_each_cpu_mask_nr(cpu, mask)
1319 per_cpu(vector_irq, cpu)[vector] = -1; 1374 per_cpu(vector_irq, cpu)[vector] = -1;
1320 1375
1321 cfg->vector = 0; 1376 cfg->vector = 0;
1322 cpus_clear(cfg->domain); 1377 cpumask_clear(cfg->domain);
1323 1378
1324 if (likely(!cfg->move_in_progress)) 1379 if (likely(!cfg->move_in_progress))
1325 return; 1380 return;
1326 cpus_and(mask, cfg->old_domain, cpu_online_map); 1381 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1327 for_each_cpu_mask_nr(cpu, mask) {
1328 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1382 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1329 vector++) { 1383 vector++) {
1330 if (per_cpu(vector_irq, cpu)[vector] != irq) 1384 if (per_cpu(vector_irq, cpu)[vector] != irq)
@@ -1349,7 +1403,7 @@ void __setup_vector_irq(int cpu)
1349 if (!desc) 1403 if (!desc)
1350 continue; 1404 continue;
1351 cfg = desc->chip_data; 1405 cfg = desc->chip_data;
1352 if (!cpu_isset(cpu, cfg->domain)) 1406 if (!cpumask_test_cpu(cpu, cfg->domain))
1353 continue; 1407 continue;
1354 vector = cfg->vector; 1408 vector = cfg->vector;
1355 per_cpu(vector_irq, cpu)[vector] = irq; 1409 per_cpu(vector_irq, cpu)[vector] = irq;
@@ -1361,7 +1415,7 @@ void __setup_vector_irq(int cpu)
1361 continue; 1415 continue;
1362 1416
1363 cfg = irq_cfg(irq); 1417 cfg = irq_cfg(irq);
1364 if (!cpu_isset(cpu, cfg->domain)) 1418 if (!cpumask_test_cpu(cpu, cfg->domain))
1365 per_cpu(vector_irq, cpu)[vector] = -1; 1419 per_cpu(vector_irq, cpu)[vector] = -1;
1366 } 1420 }
1367} 1421}
@@ -1497,18 +1551,17 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1497{ 1551{
1498 struct irq_cfg *cfg; 1552 struct irq_cfg *cfg;
1499 struct IO_APIC_route_entry entry; 1553 struct IO_APIC_route_entry entry;
1500 cpumask_t mask; 1554 unsigned int dest;
1501 1555
1502 if (!IO_APIC_IRQ(irq)) 1556 if (!IO_APIC_IRQ(irq))
1503 return; 1557 return;
1504 1558
1505 cfg = desc->chip_data; 1559 cfg = desc->chip_data;
1506 1560
1507 mask = TARGET_CPUS; 1561 if (assign_irq_vector(irq, cfg, TARGET_CPUS))
1508 if (assign_irq_vector(irq, cfg, mask))
1509 return; 1562 return;
1510 1563
1511 cpus_and(mask, cfg->domain, mask); 1564 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
1512 1565
1513 apic_printk(APIC_VERBOSE,KERN_DEBUG 1566 apic_printk(APIC_VERBOSE,KERN_DEBUG
1514 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1567 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
@@ -1518,8 +1571,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1518 1571
1519 1572
1520 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, 1573 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1521 cpu_mask_to_apicid(mask), trigger, polarity, 1574 dest, trigger, polarity, cfg->vector)) {
1522 cfg->vector)) {
1523 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1575 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1524 mp_ioapics[apic].mp_apicid, pin); 1576 mp_ioapics[apic].mp_apicid, pin);
1525 __clear_irq_vector(irq, cfg); 1577 __clear_irq_vector(irq, cfg);
@@ -2241,7 +2293,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
2241 unsigned long flags; 2293 unsigned long flags;
2242 2294
2243 spin_lock_irqsave(&vector_lock, flags); 2295 spin_lock_irqsave(&vector_lock, flags);
2244 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); 2296 send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2245 spin_unlock_irqrestore(&vector_lock, flags); 2297 spin_unlock_irqrestore(&vector_lock, flags);
2246 2298
2247 return 1; 2299 return 1;
@@ -2290,18 +2342,17 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2290 * as simple as edge triggered migration and we can do the irq migration 2342 * as simple as edge triggered migration and we can do the irq migration
2291 * with a simple atomic update to IO-APIC RTE. 2343 * with a simple atomic update to IO-APIC RTE.
2292 */ 2344 */
2293static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) 2345static void
2346migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2294{ 2347{
2295 struct irq_cfg *cfg; 2348 struct irq_cfg *cfg;
2296 cpumask_t tmp, cleanup_mask;
2297 struct irte irte; 2349 struct irte irte;
2298 int modify_ioapic_rte; 2350 int modify_ioapic_rte;
2299 unsigned int dest; 2351 unsigned int dest;
2300 unsigned long flags; 2352 unsigned long flags;
2301 unsigned int irq; 2353 unsigned int irq;
2302 2354
2303 cpus_and(tmp, mask, cpu_online_map); 2355 if (!cpumask_intersects(mask, cpu_online_mask))
2304 if (cpus_empty(tmp))
2305 return; 2356 return;
2306 2357
2307 irq = desc->irq; 2358 irq = desc->irq;
@@ -2314,8 +2365,7 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
2314 2365
2315 set_extra_move_desc(desc, mask); 2366 set_extra_move_desc(desc, mask);
2316 2367
2317 cpus_and(tmp, cfg->domain, mask); 2368 dest = cpu_mask_to_apicid_and(cfg->domain, mask);
2318 dest = cpu_mask_to_apicid(tmp);
2319 2369
2320 modify_ioapic_rte = desc->status & IRQ_LEVEL; 2370 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2321 if (modify_ioapic_rte) { 2371 if (modify_ioapic_rte) {
@@ -2332,14 +2382,10 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
2332 */ 2382 */
2333 modify_irte(irq, &irte); 2383 modify_irte(irq, &irte);
2334 2384
2335 if (cfg->move_in_progress) { 2385 if (cfg->move_in_progress)
2336 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 2386 send_cleanup_vector(cfg);
2337 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2338 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2339 cfg->move_in_progress = 0;
2340 }
2341 2387
2342 desc->affinity = mask; 2388 cpumask_copy(&desc->affinity, mask);
2343} 2389}
2344 2390
2345static int migrate_irq_remapped_level_desc(struct irq_desc *desc) 2391static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2361,11 +2407,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2361 } 2407 }
2362 2408
2363 /* everthing is clear. we have right of way */ 2409 /* everthing is clear. we have right of way */
2364 migrate_ioapic_irq_desc(desc, desc->pending_mask); 2410 migrate_ioapic_irq_desc(desc, &desc->pending_mask);
2365 2411
2366 ret = 0; 2412 ret = 0;
2367 desc->status &= ~IRQ_MOVE_PENDING; 2413 desc->status &= ~IRQ_MOVE_PENDING;
2368 cpus_clear(desc->pending_mask); 2414 cpumask_clear(&desc->pending_mask);
2369 2415
2370unmask: 2416unmask:
2371 unmask_IO_APIC_irq_desc(desc); 2417 unmask_IO_APIC_irq_desc(desc);
@@ -2402,11 +2448,12 @@ static void ir_irq_migration(struct work_struct *work)
2402/* 2448/*
2403 * Migrates the IRQ destination in the process context. 2449 * Migrates the IRQ destination in the process context.
2404 */ 2450 */
2405static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) 2451static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2452 const struct cpumask *mask)
2406{ 2453{
2407 if (desc->status & IRQ_LEVEL) { 2454 if (desc->status & IRQ_LEVEL) {
2408 desc->status |= IRQ_MOVE_PENDING; 2455 desc->status |= IRQ_MOVE_PENDING;
2409 desc->pending_mask = mask; 2456 cpumask_copy(&desc->pending_mask, mask);
2410 migrate_irq_remapped_level_desc(desc); 2457 migrate_irq_remapped_level_desc(desc);
2411 return; 2458 return;
2412 } 2459 }
@@ -2418,7 +2465,7 @@ static void set_ir_ioapic_affinity_irq(unsigned int irq,
2418{ 2465{
2419 struct irq_desc *desc = irq_to_desc(irq); 2466 struct irq_desc *desc = irq_to_desc(irq);
2420 2467
2421 set_ir_ioapic_affinity_irq_desc(desc, *mask); 2468 set_ir_ioapic_affinity_irq_desc(desc, mask);
2422} 2469}
2423#endif 2470#endif
2424 2471
@@ -2449,7 +2496,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2449 if (!cfg->move_cleanup_count) 2496 if (!cfg->move_cleanup_count)
2450 goto unlock; 2497 goto unlock;
2451 2498
2452 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) 2499 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2453 goto unlock; 2500 goto unlock;
2454 2501
2455 __get_cpu_var(vector_irq)[vector] = -1; 2502 __get_cpu_var(vector_irq)[vector] = -1;
@@ -2486,20 +2533,14 @@ static void irq_complete_move(struct irq_desc **descp)
2486 2533
2487 vector = ~get_irq_regs()->orig_ax; 2534 vector = ~get_irq_regs()->orig_ax;
2488 me = smp_processor_id(); 2535 me = smp_processor_id();
2489 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
2490 cpumask_t cleanup_mask;
2491
2492#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC 2536#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2493 *descp = desc = move_irq_desc(desc, me); 2537 *descp = desc = move_irq_desc(desc, me);
2494 /* get the new one */ 2538 /* get the new one */
2495 cfg = desc->chip_data; 2539 cfg = desc->chip_data;
2496#endif 2540#endif
2497 2541
2498 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 2542 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2499 cfg->move_cleanup_count = cpus_weight(cleanup_mask); 2543 send_cleanup_vector(cfg);
2500 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2501 cfg->move_in_progress = 0;
2502 }
2503} 2544}
2504#else 2545#else
2505static inline void irq_complete_move(struct irq_desc **descp) {} 2546static inline void irq_complete_move(struct irq_desc **descp) {}
@@ -3224,16 +3265,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3224 struct irq_cfg *cfg; 3265 struct irq_cfg *cfg;
3225 int err; 3266 int err;
3226 unsigned dest; 3267 unsigned dest;
3227 cpumask_t tmp;
3228 3268
3229 cfg = irq_cfg(irq); 3269 cfg = irq_cfg(irq);
3230 tmp = TARGET_CPUS; 3270 err = assign_irq_vector(irq, cfg, TARGET_CPUS);
3231 err = assign_irq_vector(irq, cfg, tmp);
3232 if (err) 3271 if (err)
3233 return err; 3272 return err;
3234 3273
3235 cpus_and(tmp, cfg->domain, tmp); 3274 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
3236 dest = cpu_mask_to_apicid(tmp);
3237 3275
3238#ifdef CONFIG_INTR_REMAP 3276#ifdef CONFIG_INTR_REMAP
3239 if (irq_remapped(irq)) { 3277 if (irq_remapped(irq)) {
@@ -3293,19 +3331,12 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3293 struct irq_cfg *cfg; 3331 struct irq_cfg *cfg;
3294 struct msi_msg msg; 3332 struct msi_msg msg;
3295 unsigned int dest; 3333 unsigned int dest;
3296 cpumask_t tmp;
3297 3334
3298 if (!cpumask_intersects(mask, cpu_online_mask)) 3335 dest = set_desc_affinity(desc, mask);
3336 if (dest == BAD_APICID)
3299 return; 3337 return;
3300 3338
3301 cfg = desc->chip_data; 3339 cfg = desc->chip_data;
3302 if (assign_irq_vector(irq, cfg, *mask))
3303 return;
3304
3305 set_extra_move_desc(desc, *mask);
3306
3307 cpumask_and(&tmp, &cfg->domain, mask);
3308 dest = cpu_mask_to_apicid(tmp);
3309 3340
3310 read_msi_msg_desc(desc, &msg); 3341 read_msi_msg_desc(desc, &msg);
3311 3342
@@ -3315,37 +3346,27 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3315 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3346 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3316 3347
3317 write_msi_msg_desc(desc, &msg); 3348 write_msi_msg_desc(desc, &msg);
3318 cpumask_copy(&desc->affinity, mask);
3319} 3349}
3320#ifdef CONFIG_INTR_REMAP 3350#ifdef CONFIG_INTR_REMAP
3321/* 3351/*
3322 * Migrate the MSI irq to another cpumask. This migration is 3352 * Migrate the MSI irq to another cpumask. This migration is
3323 * done in the process context using interrupt-remapping hardware. 3353 * done in the process context using interrupt-remapping hardware.
3324 */ 3354 */
3325static void ir_set_msi_irq_affinity(unsigned int irq, 3355static void
3326 const struct cpumask *mask) 3356ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3327{ 3357{
3328 struct irq_desc *desc = irq_to_desc(irq); 3358 struct irq_desc *desc = irq_to_desc(irq);
3329 struct irq_cfg *cfg; 3359 struct irq_cfg *cfg = desc->chip_data;
3330 unsigned int dest; 3360 unsigned int dest;
3331 cpumask_t tmp, cleanup_mask;
3332 struct irte irte; 3361 struct irte irte;
3333 3362
3334 if (!cpumask_intersects(mask, cpu_online_mask))
3335 return;
3336
3337 if (get_irte(irq, &irte)) 3363 if (get_irte(irq, &irte))
3338 return; 3364 return;
3339 3365
3340 cfg = desc->chip_data; 3366 dest = set_desc_affinity(desc, mask);
3341 if (assign_irq_vector(irq, cfg, *mask)) 3367 if (dest == BAD_APICID)
3342 return; 3368 return;
3343 3369
3344 set_extra_move_desc(desc, *mask);
3345
3346 cpumask_and(&tmp, &cfg->domain, mask);
3347 dest = cpu_mask_to_apicid(tmp);
3348
3349 irte.vector = cfg->vector; 3370 irte.vector = cfg->vector;
3350 irte.dest_id = IRTE_DEST(dest); 3371 irte.dest_id = IRTE_DEST(dest);
3351 3372
@@ -3359,14 +3380,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq,
3359 * at the new destination. So, time to cleanup the previous 3380 * at the new destination. So, time to cleanup the previous
3360 * vector allocation. 3381 * vector allocation.
3361 */ 3382 */
3362 if (cfg->move_in_progress) { 3383 if (cfg->move_in_progress)
3363 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 3384 send_cleanup_vector(cfg);
3364 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
3365 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
3366 cfg->move_in_progress = 0;
3367 }
3368
3369 cpumask_copy(&desc->affinity, mask);
3370} 3385}
3371 3386
3372#endif 3387#endif
@@ -3563,19 +3578,12 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3563 struct irq_cfg *cfg; 3578 struct irq_cfg *cfg;
3564 struct msi_msg msg; 3579 struct msi_msg msg;
3565 unsigned int dest; 3580 unsigned int dest;
3566 cpumask_t tmp;
3567 3581
3568 if (!cpumask_intersects(mask, cpu_online_mask)) 3582 dest = set_desc_affinity(desc, mask);
3583 if (dest == BAD_APICID)
3569 return; 3584 return;
3570 3585
3571 cfg = desc->chip_data; 3586 cfg = desc->chip_data;
3572 if (assign_irq_vector(irq, cfg, *mask))
3573 return;
3574
3575 set_extra_move_desc(desc, *mask);
3576
3577 cpumask_and(&tmp, &cfg->domain, mask);
3578 dest = cpu_mask_to_apicid(tmp);
3579 3587
3580 dmar_msi_read(irq, &msg); 3588 dmar_msi_read(irq, &msg);
3581 3589
@@ -3585,7 +3593,6 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3585 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3593 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3586 3594
3587 dmar_msi_write(irq, &msg); 3595 dmar_msi_write(irq, &msg);
3588 cpumask_copy(&desc->affinity, mask);
3589} 3596}
3590 3597
3591#endif /* CONFIG_SMP */ 3598#endif /* CONFIG_SMP */
@@ -3625,19 +3632,12 @@ static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3625 struct irq_cfg *cfg; 3632 struct irq_cfg *cfg;
3626 struct msi_msg msg; 3633 struct msi_msg msg;
3627 unsigned int dest; 3634 unsigned int dest;
3628 cpumask_t tmp;
3629 3635
3630 if (!cpumask_intersects(mask, cpu_online_mask)) 3636 dest = set_desc_affinity(desc, mask);
3637 if (dest == BAD_APICID)
3631 return; 3638 return;
3632 3639
3633 cfg = desc->chip_data; 3640 cfg = desc->chip_data;
3634 if (assign_irq_vector(irq, cfg, *mask))
3635 return;
3636
3637 set_extra_move_desc(desc, *mask);
3638
3639 cpumask_and(&tmp, &cfg->domain, mask);
3640 dest = cpu_mask_to_apicid(tmp);
3641 3641
3642 hpet_msi_read(irq, &msg); 3642 hpet_msi_read(irq, &msg);
3643 3643
@@ -3647,7 +3647,6 @@ static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3647 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3647 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3648 3648
3649 hpet_msi_write(irq, &msg); 3649 hpet_msi_write(irq, &msg);
3650 cpumask_copy(&desc->affinity, mask);
3651} 3650}
3652 3651
3653#endif /* CONFIG_SMP */ 3652#endif /* CONFIG_SMP */
@@ -3707,22 +3706,14 @@ static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3707 struct irq_desc *desc = irq_to_desc(irq); 3706 struct irq_desc *desc = irq_to_desc(irq);
3708 struct irq_cfg *cfg; 3707 struct irq_cfg *cfg;
3709 unsigned int dest; 3708 unsigned int dest;
3710 cpumask_t tmp;
3711 3709
3712 if (!cpumask_intersects(mask, cpu_online_mask)) 3710 dest = set_desc_affinity(desc, mask);
3711 if (dest == BAD_APICID)
3713 return; 3712 return;
3714 3713
3715 cfg = desc->chip_data; 3714 cfg = desc->chip_data;
3716 if (assign_irq_vector(irq, cfg, *mask))
3717 return;
3718
3719 set_extra_move_desc(desc, *mask);
3720
3721 cpumask_and(&tmp, &cfg->domain, mask);
3722 dest = cpu_mask_to_apicid(tmp);
3723 3715
3724 target_ht_irq(irq, dest, cfg->vector); 3716 target_ht_irq(irq, dest, cfg->vector);
3725 cpumask_copy(&desc->affinity, mask);
3726} 3717}
3727 3718
3728#endif 3719#endif
@@ -3742,17 +3733,14 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3742{ 3733{
3743 struct irq_cfg *cfg; 3734 struct irq_cfg *cfg;
3744 int err; 3735 int err;
3745 cpumask_t tmp;
3746 3736
3747 cfg = irq_cfg(irq); 3737 cfg = irq_cfg(irq);
3748 tmp = TARGET_CPUS; 3738 err = assign_irq_vector(irq, cfg, TARGET_CPUS);
3749 err = assign_irq_vector(irq, cfg, tmp);
3750 if (!err) { 3739 if (!err) {
3751 struct ht_irq_msg msg; 3740 struct ht_irq_msg msg;
3752 unsigned dest; 3741 unsigned dest;
3753 3742
3754 cpus_and(tmp, cfg->domain, tmp); 3743 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
3755 dest = cpu_mask_to_apicid(tmp);
3756 3744
3757 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3745 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3758 3746
@@ -3788,7 +3776,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3788int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, 3776int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3789 unsigned long mmr_offset) 3777 unsigned long mmr_offset)
3790{ 3778{
3791 const cpumask_t *eligible_cpu = get_cpu_mask(cpu); 3779 const struct cpumask *eligible_cpu = cpumask_of(cpu);
3792 struct irq_cfg *cfg; 3780 struct irq_cfg *cfg;
3793 int mmr_pnode; 3781 int mmr_pnode;
3794 unsigned long mmr_value; 3782 unsigned long mmr_value;
@@ -3798,7 +3786,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3798 3786
3799 cfg = irq_cfg(irq); 3787 cfg = irq_cfg(irq);
3800 3788
3801 err = assign_irq_vector(irq, cfg, *eligible_cpu); 3789 err = assign_irq_vector(irq, cfg, eligible_cpu);
3802 if (err != 0) 3790 if (err != 0)
3803 return err; 3791 return err;
3804 3792
@@ -3817,7 +3805,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3817 entry->polarity = 0; 3805 entry->polarity = 0;
3818 entry->trigger = 0; 3806 entry->trigger = 0;
3819 entry->mask = 0; 3807 entry->mask = 0;
3820 entry->dest = cpu_mask_to_apicid(*eligible_cpu); 3808 entry->dest = cpu_mask_to_apicid(eligible_cpu);
3821 3809
3822 mmr_pnode = uv_blade_to_pnode(mmr_blade); 3810 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3823 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 3811 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -4028,7 +4016,7 @@ void __init setup_ioapic_dest(void)
4028 int pin, ioapic, irq, irq_entry; 4016 int pin, ioapic, irq, irq_entry;
4029 struct irq_desc *desc; 4017 struct irq_desc *desc;
4030 struct irq_cfg *cfg; 4018 struct irq_cfg *cfg;
4031 cpumask_t mask; 4019 const struct cpumask *mask;
4032 4020
4033 if (skip_ioapic_setup == 1) 4021 if (skip_ioapic_setup == 1)
4034 return; 4022 return;
@@ -4059,7 +4047,7 @@ void __init setup_ioapic_dest(void)
4059 */ 4047 */
4060 if (desc->status & 4048 if (desc->status &
4061 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 4049 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
4062 mask = desc->affinity; 4050 mask = &desc->affinity;
4063 else 4051 else
4064 mask = TARGET_CPUS; 4052 mask = TARGET_CPUS;
4065 4053
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
index f1c688e46f35..285bbf8831fa 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/ipi.c
@@ -116,18 +116,18 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector)
116/* 116/*
117 * This is only used on smaller machines. 117 * This is only used on smaller machines.
118 */ 118 */
119void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) 119void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
120{ 120{
121 unsigned long mask = cpus_addr(cpumask)[0]; 121 unsigned long mask = cpumask_bits(cpumask)[0];
122 unsigned long flags; 122 unsigned long flags;
123 123
124 local_irq_save(flags); 124 local_irq_save(flags);
125 WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); 125 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
126 __send_IPI_dest_field(mask, vector); 126 __send_IPI_dest_field(mask, vector);
127 local_irq_restore(flags); 127 local_irq_restore(flags);
128} 128}
129 129
130void send_IPI_mask_sequence(cpumask_t mask, int vector) 130void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
131{ 131{
132 unsigned long flags; 132 unsigned long flags;
133 unsigned int query_cpu; 133 unsigned int query_cpu;
@@ -139,12 +139,24 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
139 */ 139 */
140 140
141 local_irq_save(flags); 141 local_irq_save(flags);
142 for_each_possible_cpu(query_cpu) { 142 for_each_cpu(query_cpu, mask)
143 if (cpu_isset(query_cpu, mask)) { 143 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
144 local_irq_restore(flags);
145}
146
147void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
148{
149 unsigned long flags;
150 unsigned int query_cpu;
151 unsigned int this_cpu = smp_processor_id();
152
153 /* See Hack comment above */
154
155 local_irq_save(flags);
156 for_each_cpu(query_cpu, mask)
157 if (query_cpu != this_cpu)
144 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), 158 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
145 vector); 159 vector);
146 }
147 }
148 local_irq_restore(flags); 160 local_irq_restore(flags);
149} 161}
150 162
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3f1d9d18df67..bce53e1352a0 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -9,6 +9,7 @@
9#include <asm/apic.h> 9#include <asm/apic.h>
10#include <asm/io_apic.h> 10#include <asm/io_apic.h>
11#include <asm/smp.h> 11#include <asm/smp.h>
12#include <asm/irq.h>
12 13
13atomic_t irq_err_count; 14atomic_t irq_err_count;
14 15
@@ -190,3 +191,5 @@ u64 arch_irq_stat(void)
190#endif 191#endif
191 return sum; 192 return sum;
192} 193}
194
195EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 9cf9cbbf7a02..9dc5588f336a 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -233,27 +233,28 @@ unsigned int do_IRQ(struct pt_regs *regs)
233#ifdef CONFIG_HOTPLUG_CPU 233#ifdef CONFIG_HOTPLUG_CPU
234#include <mach_apic.h> 234#include <mach_apic.h>
235 235
236void fixup_irqs(cpumask_t map) 236/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
237void fixup_irqs(void)
237{ 238{
238 unsigned int irq; 239 unsigned int irq;
239 static int warned; 240 static int warned;
240 struct irq_desc *desc; 241 struct irq_desc *desc;
241 242
242 for_each_irq_desc(irq, desc) { 243 for_each_irq_desc(irq, desc) {
243 cpumask_t mask; 244 const struct cpumask *affinity;
244 245
245 if (!desc) 246 if (!desc)
246 continue; 247 continue;
247 if (irq == 2) 248 if (irq == 2)
248 continue; 249 continue;
249 250
250 cpus_and(mask, desc->affinity, map); 251 affinity = &desc->affinity;
251 if (any_online_cpu(mask) == NR_CPUS) { 252 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
252 printk("Breaking affinity for irq %i\n", irq); 253 printk("Breaking affinity for irq %i\n", irq);
253 mask = map; 254 affinity = cpu_all_mask;
254 } 255 }
255 if (desc->chip->set_affinity) 256 if (desc->chip->set_affinity)
256 desc->chip->set_affinity(irq, &mask); 257 desc->chip->set_affinity(irq, affinity);
257 else if (desc->action && !(warned++)) 258 else if (desc->action && !(warned++))
258 printk("Cannot set affinity for irq %i\n", irq); 259 printk("Cannot set affinity for irq %i\n", irq);
259 } 260 }
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 54c69d47a771..6383d50f82ea 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -80,16 +80,17 @@ asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
80} 80}
81 81
82#ifdef CONFIG_HOTPLUG_CPU 82#ifdef CONFIG_HOTPLUG_CPU
83void fixup_irqs(cpumask_t map) 83/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
84void fixup_irqs(void)
84{ 85{
85 unsigned int irq; 86 unsigned int irq;
86 static int warned; 87 static int warned;
87 struct irq_desc *desc; 88 struct irq_desc *desc;
88 89
89 for_each_irq_desc(irq, desc) { 90 for_each_irq_desc(irq, desc) {
90 cpumask_t mask;
91 int break_affinity = 0; 91 int break_affinity = 0;
92 int set_affinity = 1; 92 int set_affinity = 1;
93 const struct cpumask *affinity;
93 94
94 if (!desc) 95 if (!desc)
95 continue; 96 continue;
@@ -99,23 +100,23 @@ void fixup_irqs(cpumask_t map)
99 /* interrupt's are disabled at this point */ 100 /* interrupt's are disabled at this point */
100 spin_lock(&desc->lock); 101 spin_lock(&desc->lock);
101 102
103 affinity = &desc->affinity;
102 if (!irq_has_action(irq) || 104 if (!irq_has_action(irq) ||
103 cpus_equal(desc->affinity, map)) { 105 cpumask_equal(affinity, cpu_online_mask)) {
104 spin_unlock(&desc->lock); 106 spin_unlock(&desc->lock);
105 continue; 107 continue;
106 } 108 }
107 109
108 cpus_and(mask, desc->affinity, map); 110 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
109 if (cpus_empty(mask)) {
110 break_affinity = 1; 111 break_affinity = 1;
111 mask = map; 112 affinity = cpu_all_mask;
112 } 113 }
113 114
114 if (desc->chip->mask) 115 if (desc->chip->mask)
115 desc->chip->mask(irq); 116 desc->chip->mask(irq);
116 117
117 if (desc->chip->set_affinity) 118 if (desc->chip->set_affinity)
118 desc->chip->set_affinity(irq, &mask); 119 desc->chip->set_affinity(irq, affinity);
119 else if (!(warned++)) 120 else if (!(warned++))
120 set_affinity = 0; 121 set_affinity = 0;
121 122
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 203384ed2b5d..84723295f88a 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -110,6 +110,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
110 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 110 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
111}; 111};
112 112
113int vector_used_by_percpu_irq(unsigned int vector)
114{
115 int cpu;
116
117 for_each_online_cpu(cpu) {
118 if (per_cpu(vector_irq, cpu)[vector] != -1)
119 return 1;
120 }
121
122 return 0;
123}
124
113/* Overridden in paravirt.c */ 125/* Overridden in paravirt.c */
114void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); 126void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
115 127
@@ -146,10 +158,12 @@ void __init native_init_IRQ(void)
146 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); 158 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
147 159
148 /* IPI for single call function */ 160 /* IPI for single call function */
149 set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); 161 alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
162 call_function_single_interrupt);
150 163
151 /* Low priority IPI to cleanup after moving an irq */ 164 /* Low priority IPI to cleanup after moving an irq */
152 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 165 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
166 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
153#endif 167#endif
154 168
155#ifdef CONFIG_X86_LOCAL_APIC 169#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index 6190e6ef546c..31ebfe38e96c 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -69,6 +69,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
69 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 69 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
70}; 70};
71 71
72int vector_used_by_percpu_irq(unsigned int vector)
73{
74 int cpu;
75
76 for_each_online_cpu(cpu) {
77 if (per_cpu(vector_irq, cpu)[vector] != -1)
78 return 1;
79 }
80
81 return 0;
82}
83
72void __init init_ISA_irqs(void) 84void __init init_ISA_irqs(void)
73{ 85{
74 int i; 86 int i;
@@ -121,6 +133,7 @@ static void __init smp_intr_init(void)
121 133
122 /* Low priority IPI to cleanup after moving an irq */ 134 /* Low priority IPI to cleanup after moving an irq */
123 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); 135 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
136 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
124#endif 137#endif
125} 138}
126 139
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 61f718df6eec..ba7b9a0e6063 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -592,10 +592,7 @@ static int crash_nmi_callback(struct notifier_block *self,
592 592
593static void smp_send_nmi_allbutself(void) 593static void smp_send_nmi_allbutself(void)
594{ 594{
595 cpumask_t mask = cpu_online_map; 595 send_IPI_allbutself(NMI_VECTOR);
596 cpu_clear(safe_smp_processor_id(), mask);
597 if (!cpus_empty(mask))
598 send_IPI_mask(mask, NMI_VECTOR);
599} 596}
600 597
601static struct notifier_block crash_nmi_nb = { 598static struct notifier_block crash_nmi_nb = {
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 8e8b1193add5..49f3f709ee1f 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -152,6 +152,11 @@ void __init setup_per_cpu_areas(void)
152 old_size = PERCPU_ENOUGH_ROOM; 152 old_size = PERCPU_ENOUGH_ROOM;
153 align = max_t(unsigned long, PAGE_SIZE, align); 153 align = max_t(unsigned long, PAGE_SIZE, align);
154 size = roundup(old_size, align); 154 size = roundup(old_size, align);
155
156 printk(KERN_INFO
157 "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
158 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
159
155 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 160 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
156 size); 161 size);
157 162
@@ -168,24 +173,24 @@ void __init setup_per_cpu_areas(void)
168 "cpu %d has no node %d or node-local memory\n", 173 "cpu %d has no node %d or node-local memory\n",
169 cpu, node); 174 cpu, node);
170 if (ptr) 175 if (ptr)
171 printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n", 176 printk(KERN_DEBUG
177 "per cpu data for cpu%d at %016lx\n",
172 cpu, __pa(ptr)); 178 cpu, __pa(ptr));
173 } 179 }
174 else { 180 else {
175 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 181 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
176 __pa(MAX_DMA_ADDRESS)); 182 __pa(MAX_DMA_ADDRESS));
177 if (ptr) 183 if (ptr)
178 printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", 184 printk(KERN_DEBUG
179 cpu, node, __pa(ptr)); 185 "per cpu data for cpu%d on node%d "
186 "at %016lx\n",
187 cpu, node, __pa(ptr));
180 } 188 }
181#endif 189#endif
182 per_cpu_offset(cpu) = ptr - __per_cpu_start; 190 per_cpu_offset(cpu) = ptr - __per_cpu_start;
183 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 191 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
184 } 192 }
185 193
186 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
187 NR_CPUS, nr_cpu_ids, nr_node_ids);
188
189 /* Setup percpu data maps */ 194 /* Setup percpu data maps */
190 setup_per_cpu_maps(); 195 setup_per_cpu_maps();
191 196
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 7e558db362c1..beea2649a240 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -118,22 +118,22 @@ static void native_smp_send_reschedule(int cpu)
118 WARN_ON(1); 118 WARN_ON(1);
119 return; 119 return;
120 } 120 }
121 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); 121 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
122} 122}
123 123
124void native_send_call_func_single_ipi(int cpu) 124void native_send_call_func_single_ipi(int cpu)
125{ 125{
126 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); 126 send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
127} 127}
128 128
129void native_send_call_func_ipi(cpumask_t mask) 129void native_send_call_func_ipi(const struct cpumask *mask)
130{ 130{
131 cpumask_t allbutself; 131 cpumask_t allbutself;
132 132
133 allbutself = cpu_online_map; 133 allbutself = cpu_online_map;
134 cpu_clear(smp_processor_id(), allbutself); 134 cpu_clear(smp_processor_id(), allbutself);
135 135
136 if (cpus_equal(mask, allbutself) && 136 if (cpus_equal(*mask, allbutself) &&
137 cpus_equal(cpu_online_map, cpu_callout_map)) 137 cpus_equal(cpu_online_map, cpu_callout_map))
138 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 138 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
139 else 139 else
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c5392058cd07..9e177a4077ee 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1259,6 +1259,15 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1259 check_nmi_watchdog(); 1259 check_nmi_watchdog();
1260} 1260}
1261 1261
1262static int __initdata setup_possible_cpus = -1;
1263static int __init _setup_possible_cpus(char *str)
1264{
1265 get_option(&str, &setup_possible_cpus);
1266 return 0;
1267}
1268early_param("possible_cpus", _setup_possible_cpus);
1269
1270
1262/* 1271/*
1263 * cpu_possible_map should be static, it cannot change as cpu's 1272 * cpu_possible_map should be static, it cannot change as cpu's
1264 * are onlined, or offlined. The reason is per-cpu data-structures 1273 * are onlined, or offlined. The reason is per-cpu data-structures
@@ -1271,7 +1280,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1271 * 1280 *
1272 * Three ways to find out the number of additional hotplug CPUs: 1281 * Three ways to find out the number of additional hotplug CPUs:
1273 * - If the BIOS specified disabled CPUs in ACPI/mptables use that. 1282 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1274 * - The user can overwrite it with additional_cpus=NUM 1283 * - The user can overwrite it with possible_cpus=NUM
1275 * - Otherwise don't reserve additional CPUs. 1284 * - Otherwise don't reserve additional CPUs.
1276 * We do this because additional CPUs waste a lot of memory. 1285 * We do this because additional CPUs waste a lot of memory.
1277 * -AK 1286 * -AK
@@ -1284,9 +1293,17 @@ __init void prefill_possible_map(void)
1284 if (!num_processors) 1293 if (!num_processors)
1285 num_processors = 1; 1294 num_processors = 1;
1286 1295
1287 possible = num_processors + disabled_cpus; 1296 if (setup_possible_cpus == -1)
1288 if (possible > NR_CPUS) 1297 possible = num_processors + disabled_cpus;
1289 possible = NR_CPUS; 1298 else
1299 possible = setup_possible_cpus;
1300
1301 if (possible > CONFIG_NR_CPUS) {
1302 printk(KERN_WARNING
1303 "%d Processors exceeds NR_CPUS limit of %d\n",
1304 possible, CONFIG_NR_CPUS);
1305 possible = CONFIG_NR_CPUS;
1306 }
1290 1307
1291 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 1308 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1292 possible, max_t(int, possible - num_processors, 0)); 1309 possible, max_t(int, possible - num_processors, 0));
@@ -1351,7 +1368,7 @@ void cpu_disable_common(void)
1351 lock_vector_lock(); 1368 lock_vector_lock();
1352 remove_cpu_from_maps(cpu); 1369 remove_cpu_from_maps(cpu);
1353 unlock_vector_lock(); 1370 unlock_vector_lock();
1354 fixup_irqs(cpu_online_map); 1371 fixup_irqs();
1355} 1372}
1356 1373
1357int native_cpu_disable(void) 1374int native_cpu_disable(void)
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
index 8da059f949be..ce5054642247 100644
--- a/arch/x86/kernel/tlb_32.c
+++ b/arch/x86/kernel/tlb_32.c
@@ -163,7 +163,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
163 * We have to send the IPI only to 163 * We have to send the IPI only to
164 * CPUs affected. 164 * CPUs affected.
165 */ 165 */
166 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); 166 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
167 167
168 while (!cpus_empty(flush_cpumask)) 168 while (!cpus_empty(flush_cpumask))
169 /* nothing. lockup detection does not belong here */ 169 /* nothing. lockup detection does not belong here */
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index 29887d7081a9..f8be6f1d2e48 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
191 * We have to send the IPI only to 191 * We have to send the IPI only to
192 * CPUs affected. 192 * CPUs affected.
193 */ 193 */
194 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); 194 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
195 195
196 while (!cpus_empty(f->flush_cpumask)) 196 while (!cpus_empty(f->flush_cpumask))
197 cpu_relax(); 197 cpu_relax();
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 141907ab6e22..2d1f4c7e4052 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -72,9 +72,6 @@
72 72
73#include "cpu/mcheck/mce.h" 73#include "cpu/mcheck/mce.h"
74 74
75DECLARE_BITMAP(used_vectors, NR_VECTORS);
76EXPORT_SYMBOL_GPL(used_vectors);
77
78asmlinkage int system_call(void); 75asmlinkage int system_call(void);
79 76
80/* Do we ignore FPU interrupts ? */ 77/* Do we ignore FPU interrupts ? */
@@ -89,6 +86,9 @@ gate_desc idt_table[256]
89 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; 86 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
90#endif 87#endif
91 88
89DECLARE_BITMAP(used_vectors, NR_VECTORS);
90EXPORT_SYMBOL_GPL(used_vectors);
91
92static int ignore_nmis; 92static int ignore_nmis;
93 93
94static inline void conditional_sti(struct pt_regs *regs) 94static inline void conditional_sti(struct pt_regs *regs)
@@ -941,9 +941,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
941 941
942void __init trap_init(void) 942void __init trap_init(void)
943{ 943{
944#ifdef CONFIG_X86_32
945 int i; 944 int i;
946#endif
947 945
948#ifdef CONFIG_EISA 946#ifdef CONFIG_EISA
949 void __iomem *p = early_ioremap(0x0FFFD9, 4); 947 void __iomem *p = early_ioremap(0x0FFFD9, 4);
@@ -1000,11 +998,15 @@ void __init trap_init(void)
1000 } 998 }
1001 999
1002 set_system_trap_gate(SYSCALL_VECTOR, &system_call); 1000 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
1001#endif
1003 1002
1004 /* Reserve all the builtin and the syscall vector: */ 1003 /* Reserve all the builtin and the syscall vector: */
1005 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 1004 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
1006 set_bit(i, used_vectors); 1005 set_bit(i, used_vectors);
1007 1006
1007#ifdef CONFIG_X86_64
1008 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
1009#else
1008 set_bit(SYSCALL_VECTOR, used_vectors); 1010 set_bit(SYSCALL_VECTOR, used_vectors);
1009#endif 1011#endif
1010 /* 1012 /*
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
index 3624a364b7f3..bc4c7840b2a8 100644
--- a/arch/x86/mach-generic/bigsmp.c
+++ b/arch/x86/mach-generic/bigsmp.c
@@ -42,9 +42,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
42 { } 42 { }
43}; 43};
44 44
45static cpumask_t vector_allocation_domain(int cpu) 45static void vector_allocation_domain(int cpu, cpumask_t *retmask)
46{ 46{
47 return cpumask_of_cpu(cpu); 47 cpus_clear(*retmask);
48 cpu_set(cpu, *retmask);
48} 49}
49 50
50static int probe_bigsmp(void) 51static int probe_bigsmp(void)
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
index 7b4e6d0d1690..4ba5ccaa1584 100644
--- a/arch/x86/mach-generic/es7000.c
+++ b/arch/x86/mach-generic/es7000.c
@@ -87,7 +87,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
87} 87}
88#endif 88#endif
89 89
90static cpumask_t vector_allocation_domain(int cpu) 90static void vector_allocation_domain(int cpu, cpumask_t *retmask)
91{ 91{
92 /* Careful. Some cpus do not strictly honor the set of cpus 92 /* Careful. Some cpus do not strictly honor the set of cpus
93 * specified in the interrupt destination when using lowest 93 * specified in the interrupt destination when using lowest
@@ -97,8 +97,7 @@ static cpumask_t vector_allocation_domain(int cpu)
97 * deliver interrupts to the wrong hyperthread when only one 97 * deliver interrupts to the wrong hyperthread when only one
98 * hyperthread was specified in the interrupt desitination. 98 * hyperthread was specified in the interrupt desitination.
99 */ 99 */
100 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 100 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
101 return domain;
102} 101}
103 102
104struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); 103struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c
index 71a309b122e6..511d7941364f 100644
--- a/arch/x86/mach-generic/numaq.c
+++ b/arch/x86/mach-generic/numaq.c
@@ -38,7 +38,7 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
38 return 0; 38 return 0;
39} 39}
40 40
41static cpumask_t vector_allocation_domain(int cpu) 41static void vector_allocation_domain(int cpu, cpumask_t *retmask)
42{ 42{
43 /* Careful. Some cpus do not strictly honor the set of cpus 43 /* Careful. Some cpus do not strictly honor the set of cpus
44 * specified in the interrupt destination when using lowest 44 * specified in the interrupt destination when using lowest
@@ -48,8 +48,7 @@ static cpumask_t vector_allocation_domain(int cpu)
48 * deliver interrupts to the wrong hyperthread when only one 48 * deliver interrupts to the wrong hyperthread when only one
49 * hyperthread was specified in the interrupt desitination. 49 * hyperthread was specified in the interrupt desitination.
50 */ 50 */
51 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 51 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
52 return domain;
53} 52}
54 53
55struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); 54struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c
index 2c6d234e0009..2821ffc188b5 100644
--- a/arch/x86/mach-generic/summit.c
+++ b/arch/x86/mach-generic/summit.c
@@ -24,7 +24,7 @@ static int probe_summit(void)
24 return 0; 24 return 0;
25} 25}
26 26
27static cpumask_t vector_allocation_domain(int cpu) 27static void vector_allocation_domain(int cpu, cpumask_t *retmask)
28{ 28{
29 /* Careful. Some cpus do not strictly honor the set of cpus 29 /* Careful. Some cpus do not strictly honor the set of cpus
30 * specified in the interrupt destination when using lowest 30 * specified in the interrupt destination when using lowest
@@ -34,8 +34,7 @@ static cpumask_t vector_allocation_domain(int cpu)
34 * deliver interrupts to the wrong hyperthread when only one 34 * deliver interrupts to the wrong hyperthread when only one
35 * hyperthread was specified in the interrupt desitination. 35 * hyperthread was specified in the interrupt desitination.
36 */ 36 */
37 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 37 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
38 return domain;
39} 38}
40 39
41struct genapic apic_summit = APIC_INIT("summit", probe_summit); 40struct genapic apic_summit = APIC_INIT("summit", probe_summit);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 9c990185e9f2..a5bc05492b1e 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -672,7 +672,7 @@ void __init smp_boot_cpus(void)
672 672
673 /* loop over all the extended VIC CPUs and boot them. The 673 /* loop over all the extended VIC CPUs and boot them. The
674 * Quad CPUs must be bootstrapped by their extended VIC cpu */ 674 * Quad CPUs must be bootstrapped by their extended VIC cpu */
675 for (i = 0; i < NR_CPUS; i++) { 675 for (i = 0; i < nr_cpu_ids; i++) {
676 if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) 676 if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
677 continue; 677 continue;
678 do_boot_cpu(i); 678 do_boot_cpu(i);
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index cebcbf152d46..71a14f89f89e 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -278,7 +278,7 @@ void __init numa_init_array(void)
278 int rr, i; 278 int rr, i;
279 279
280 rr = first_node(node_online_map); 280 rr = first_node(node_online_map);
281 for (i = 0; i < NR_CPUS; i++) { 281 for (i = 0; i < nr_cpu_ids; i++) {
282 if (early_cpu_to_node(i) != NUMA_NO_NODE) 282 if (early_cpu_to_node(i) != NUMA_NO_NODE)
283 continue; 283 continue;
284 numa_set_node(i, rr); 284 numa_set_node(i, rr);
@@ -549,7 +549,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn)
549 memnodemap[0] = 0; 549 memnodemap[0] = 0;
550 node_set_online(0); 550 node_set_online(0);
551 node_set(0, node_possible_map); 551 node_set(0, node_possible_map);
552 for (i = 0; i < NR_CPUS; i++) 552 for (i = 0; i < nr_cpu_ids; i++)
553 numa_set_node(i, 0); 553 numa_set_node(i, 0);
554 e820_register_active_regions(0, start_pfn, last_pfn); 554 e820_register_active_regions(0, start_pfn, last_pfn);
555 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); 555 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 51c0a2fc14fe..09737c8af074 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -382,7 +382,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
382 if (!node_online(i)) 382 if (!node_online(i))
383 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 383 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
384 384
385 for (i = 0; i < NR_CPUS; i++) { 385 for (i = 0; i < nr_cpu_ids; i++) {
386 int node = early_cpu_to_node(i); 386 int node = early_cpu_to_node(i);
387 387
388 if (node == NUMA_NO_NODE) 388 if (node == NUMA_NO_NODE)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 773d68d3e912..503c240e26c7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1082,7 +1082,7 @@ static void drop_other_mm_ref(void *info)
1082 1082
1083static void xen_drop_mm_ref(struct mm_struct *mm) 1083static void xen_drop_mm_ref(struct mm_struct *mm)
1084{ 1084{
1085 cpumask_t mask; 1085 cpumask_var_t mask;
1086 unsigned cpu; 1086 unsigned cpu;
1087 1087
1088 if (current->active_mm == mm) { 1088 if (current->active_mm == mm) {
@@ -1094,7 +1094,16 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
1094 } 1094 }
1095 1095
1096 /* Get the "official" set of cpus referring to our pagetable. */ 1096 /* Get the "official" set of cpus referring to our pagetable. */
1097 mask = mm->cpu_vm_mask; 1097 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1098 for_each_online_cpu(cpu) {
1099 if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
1100 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1101 continue;
1102 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1103 }
1104 return;
1105 }
1106 cpumask_copy(mask, &mm->cpu_vm_mask);
1098 1107
1099 /* It's possible that a vcpu may have a stale reference to our 1108 /* It's possible that a vcpu may have a stale reference to our
1100 cr3, because its in lazy mode, and it hasn't yet flushed 1109 cr3, because its in lazy mode, and it hasn't yet flushed
@@ -1103,11 +1112,12 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
1103 if needed. */ 1112 if needed. */
1104 for_each_online_cpu(cpu) { 1113 for_each_online_cpu(cpu) {
1105 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) 1114 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1106 cpu_set(cpu, mask); 1115 cpumask_set_cpu(cpu, mask);
1107 } 1116 }
1108 1117
1109 if (!cpus_empty(mask)) 1118 if (!cpumask_empty(mask))
1110 smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); 1119 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1120 free_cpumask_var(mask);
1111} 1121}
1112#else 1122#else
1113static void xen_drop_mm_ref(struct mm_struct *mm) 1123static void xen_drop_mm_ref(struct mm_struct *mm)
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index acd9b6705e02..c44e2069c7c7 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -33,7 +33,7 @@
33#include "xen-ops.h" 33#include "xen-ops.h"
34#include "mmu.h" 34#include "mmu.h"
35 35
36cpumask_t xen_cpu_initialized_map; 36cpumask_var_t xen_cpu_initialized_map;
37 37
38static DEFINE_PER_CPU(int, resched_irq); 38static DEFINE_PER_CPU(int, resched_irq);
39static DEFINE_PER_CPU(int, callfunc_irq); 39static DEFINE_PER_CPU(int, callfunc_irq);
@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void)
158{ 158{
159 int i, rc; 159 int i, rc;
160 160
161 for (i = 0; i < NR_CPUS; i++) { 161 for (i = 0; i < nr_cpu_ids; i++) {
162 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 162 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
163 if (rc >= 0) { 163 if (rc >= 0) {
164 num_processors++; 164 num_processors++;
@@ -192,11 +192,14 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
192 if (xen_smp_intr_init(0)) 192 if (xen_smp_intr_init(0))
193 BUG(); 193 BUG();
194 194
195 xen_cpu_initialized_map = cpumask_of_cpu(0); 195 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
196 panic("could not allocate xen_cpu_initialized_map\n");
197
198 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
196 199
197 /* Restrict the possible_map according to max_cpus. */ 200 /* Restrict the possible_map according to max_cpus. */
198 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { 201 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
199 for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) 202 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
200 continue; 203 continue;
201 cpu_clear(cpu, cpu_possible_map); 204 cpu_clear(cpu, cpu_possible_map);
202 } 205 }
@@ -221,7 +224,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
221 struct vcpu_guest_context *ctxt; 224 struct vcpu_guest_context *ctxt;
222 struct desc_struct *gdt; 225 struct desc_struct *gdt;
223 226
224 if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) 227 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
225 return 0; 228 return 0;
226 229
227 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 230 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
@@ -408,24 +411,23 @@ static void xen_smp_send_reschedule(int cpu)
408 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 411 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
409} 412}
410 413
411static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) 414static void xen_send_IPI_mask(const struct cpumask *mask,
415 enum ipi_vector vector)
412{ 416{
413 unsigned cpu; 417 unsigned cpu;
414 418
415 cpus_and(mask, mask, cpu_online_map); 419 for_each_cpu_and(cpu, mask, cpu_online_mask)
416
417 for_each_cpu_mask_nr(cpu, mask)
418 xen_send_IPI_one(cpu, vector); 420 xen_send_IPI_one(cpu, vector);
419} 421}
420 422
421static void xen_smp_send_call_function_ipi(cpumask_t mask) 423static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
422{ 424{
423 int cpu; 425 int cpu;
424 426
425 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 427 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
426 428
427 /* Make sure other vcpus get a chance to run if they need to. */ 429 /* Make sure other vcpus get a chance to run if they need to. */
428 for_each_cpu_mask_nr(cpu, mask) { 430 for_each_cpu(cpu, mask) {
429 if (xen_vcpu_stolen(cpu)) { 431 if (xen_vcpu_stolen(cpu)) {
430 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 432 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
431 break; 433 break;
@@ -435,7 +437,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask)
435 437
436static void xen_smp_send_call_function_single_ipi(int cpu) 438static void xen_smp_send_call_function_single_ipi(int cpu)
437{ 439{
438 xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); 440 xen_send_IPI_mask(cpumask_of(cpu),
441 XEN_CALL_FUNCTION_SINGLE_VECTOR);
439} 442}
440 443
441static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 444static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 2a234db5949b..212ffe012b76 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -35,7 +35,8 @@ void xen_post_suspend(int suspend_cancelled)
35 pfn_to_mfn(xen_start_info->console.domU.mfn); 35 pfn_to_mfn(xen_start_info->console.domU.mfn);
36 } else { 36 } else {
37#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
38 xen_cpu_initialized_map = cpu_online_map; 38 BUG_ON(xen_cpu_initialized_map == NULL);
39 cpumask_copy(xen_cpu_initialized_map, cpu_online_mask);
39#endif 40#endif
40 xen_vcpu_restore(); 41 xen_vcpu_restore();
41 } 42 }
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 9e1afae8461f..c1f8faf0a2c5 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -58,7 +58,7 @@ void __init xen_init_spinlocks(void);
58__cpuinit void xen_init_lock_cpu(int cpu); 58__cpuinit void xen_init_lock_cpu(int cpu);
59void xen_uninit_lock_cpu(int cpu); 59void xen_uninit_lock_cpu(int cpu);
60 60
61extern cpumask_t xen_cpu_initialized_map; 61extern cpumask_var_t xen_cpu_initialized_map;
62#else 62#else
63static inline void xen_smp_init(void) {} 63static inline void xen_smp_init(void) {}
64#endif 64#endif