aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig13
-rw-r--r--arch/x86/include/asm/bigsmp/apic.h30
-rw-r--r--arch/x86/include/asm/bigsmp/ipi.h13
-rw-r--r--arch/x86/include/asm/es7000/apic.h86
-rw-r--r--arch/x86/include/asm/es7000/ipi.h12
-rw-r--r--arch/x86/include/asm/genapic_32.h13
-rw-r--r--arch/x86/include/asm/genapic_64.h14
-rw-r--r--arch/x86/include/asm/ipi.h23
-rw-r--r--arch/x86/include/asm/irq.h2
-rw-r--r--arch/x86/include/asm/mach-default/mach_apic.h27
-rw-r--r--arch/x86/include/asm/mach-default/mach_ipi.h18
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apic.h1
-rw-r--r--arch/x86/include/asm/numaq/apic.h12
-rw-r--r--arch/x86/include/asm/numaq/ipi.h13
-rw-r--r--arch/x86/include/asm/smp.h6
-rw-r--r--arch/x86/include/asm/summit/apic.h51
-rw-r--r--arch/x86/include/asm/summit/ipi.h9
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/kernel/apic.c12
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c41
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c108
-rw-r--r--arch/x86/kernel/crash.c5
-rw-r--r--arch/x86/kernel/genapic_flat_64.c105
-rw-r--r--arch/x86/kernel/genx2apic_cluster.c77
-rw-r--r--arch/x86/kernel/genx2apic_phys.c72
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c59
-rw-r--r--arch/x86/kernel/io_apic.c341
-rw-r--r--arch/x86/kernel/ipi.c28
-rw-r--r--arch/x86/kernel/irq_32.c13
-rw-r--r--arch/x86/kernel/irq_64.c15
-rw-r--r--arch/x86/kernel/setup_percpu.c17
-rw-r--r--arch/x86/kernel/smp.c8
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/tlb_32.c2
-rw-r--r--arch/x86/kernel/tlb_64.c2
-rw-r--r--arch/x86/mach-generic/bigsmp.c5
-rw-r--r--arch/x86/mach-generic/es7000.c5
-rw-r--r--arch/x86/mach-generic/numaq.c5
-rw-r--r--arch/x86/mach-generic/summit.c5
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c2
-rw-r--r--arch/x86/mm/numa_64.c4
-rw-r--r--arch/x86/mm/srat_64.c2
-rw-r--r--arch/x86/xen/mmu.c20
-rw-r--r--arch/x86/xen/smp.c27
-rw-r--r--arch/x86/xen/suspend.c3
-rw-r--r--arch/x86/xen/xen-ops.h2
46 files changed, 824 insertions, 508 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d99eeb7915c6..4a3f5851ec64 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -591,19 +591,20 @@ config IOMMU_HELPER
591 591
592config MAXSMP 592config MAXSMP
593 bool "Configure Maximum number of SMP Processors and NUMA Nodes" 593 bool "Configure Maximum number of SMP Processors and NUMA Nodes"
594 depends on X86_64 && SMP && BROKEN 594 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
595 select CPUMASK_OFFSTACK
595 default n 596 default n
596 help 597 help
597 Configure maximum number of CPUS and NUMA Nodes for this architecture. 598 Configure maximum number of CPUS and NUMA Nodes for this architecture.
598 If unsure, say N. 599 If unsure, say N.
599 600
600config NR_CPUS 601config NR_CPUS
601 int "Maximum number of CPUs (2-512)" if !MAXSMP 602 int "Maximum number of CPUs" if SMP && !MAXSMP
602 range 2 512 603 range 2 512 if SMP && !MAXSMP
603 depends on SMP 604 default "1" if !SMP
604 default "4096" if MAXSMP 605 default "4096" if MAXSMP
605 default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 606 default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000)
606 default "8" 607 default "8" if SMP
607 help 608 help
608 This allows you to specify the maximum number of CPUs which this 609 This allows you to specify the maximum number of CPUs which this
609 kernel will support. The maximum supported value is 512 and the 610 kernel will support. The maximum supported value is 512 and the
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
index ce547f24a1cd..976399debb3f 100644
--- a/arch/x86/include/asm/bigsmp/apic.h
+++ b/arch/x86/include/asm/bigsmp/apic.h
@@ -9,12 +9,12 @@ static inline int apic_id_registered(void)
9 return (1); 9 return (1);
10} 10}
11 11
12static inline cpumask_t target_cpus(void) 12static inline const cpumask_t *target_cpus(void)
13{ 13{
14#ifdef CONFIG_SMP 14#ifdef CONFIG_SMP
15 return cpu_online_map; 15 return &cpu_online_map;
16#else 16#else
17 return cpumask_of_cpu(0); 17 return &cpumask_of_cpu(0);
18#endif 18#endif
19} 19}
20 20
@@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid)
79 79
80static inline int cpu_present_to_apicid(int mps_cpu) 80static inline int cpu_present_to_apicid(int mps_cpu)
81{ 81{
82 if (mps_cpu < NR_CPUS) 82 if (mps_cpu < nr_cpu_ids)
83 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); 83 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
84 84
85 return BAD_APICID; 85 return BAD_APICID;
@@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[];
94/* Mapping from cpu number to logical apicid */ 94/* Mapping from cpu number to logical apicid */
95static inline int cpu_to_logical_apicid(int cpu) 95static inline int cpu_to_logical_apicid(int cpu)
96{ 96{
97 if (cpu >= NR_CPUS) 97 if (cpu >= nr_cpu_ids)
98 return BAD_APICID; 98 return BAD_APICID;
99 return cpu_physical_id(cpu); 99 return cpu_physical_id(cpu);
100} 100}
@@ -119,16 +119,32 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
119} 119}
120 120
121/* As we are using single CPU as destination, pick only one CPU here */ 121/* As we are using single CPU as destination, pick only one CPU here */
122static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 122static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
123{ 123{
124 int cpu; 124 int cpu;
125 int apicid; 125 int apicid;
126 126
127 cpu = first_cpu(cpumask); 127 cpu = first_cpu(*cpumask);
128 apicid = cpu_to_logical_apicid(cpu); 128 apicid = cpu_to_logical_apicid(cpu);
129 return apicid; 129 return apicid;
130} 130}
131 131
132static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 const struct cpumask *andmask)
134{
135 int cpu;
136
137 /*
138 * We're using fixed IRQ delivery, can only return one phys APIC ID.
139 * May as well be the first.
140 */
141 cpu = cpumask_any_and(cpumask, andmask);
142 if (cpu < nr_cpu_ids)
143 return cpu_to_logical_apicid(cpu);
144
145 return BAD_APICID;
146}
147
132static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) 148static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
133{ 149{
134 return cpuid_apic >> index_msb; 150 return cpuid_apic >> index_msb;
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h
index 9404c535b7ec..27fcd01b3ae6 100644
--- a/arch/x86/include/asm/bigsmp/ipi.h
+++ b/arch/x86/include/asm/bigsmp/ipi.h
@@ -1,25 +1,22 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef __ASM_MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define __ASM_MACH_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
5 6
6static inline void send_IPI_mask(cpumask_t mask, int vector) 7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
7{ 8{
8 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
9} 10}
10 11
11static inline void send_IPI_allbutself(int vector) 12static inline void send_IPI_allbutself(int vector)
12{ 13{
13 cpumask_t mask = cpu_online_map; 14 send_IPI_mask_allbutself(cpu_online_mask, vector);
14 cpu_clear(smp_processor_id(), mask);
15
16 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector);
18} 15}
19 16
20static inline void send_IPI_all(int vector) 17static inline void send_IPI_all(int vector)
21{ 18{
22 send_IPI_mask(cpu_online_map, vector); 19 send_IPI_mask(cpu_online_mask, vector);
23} 20}
24 21
25#endif /* __ASM_MACH_IPI_H */ 22#endif /* __ASM_MACH_IPI_H */
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
index e24ef876915f..ba8423c5363f 100644
--- a/arch/x86/include/asm/es7000/apic.h
+++ b/arch/x86/include/asm/es7000/apic.h
@@ -9,14 +9,14 @@ static inline int apic_id_registered(void)
9 return (1); 9 return (1);
10} 10}
11 11
12static inline cpumask_t target_cpus_cluster(void) 12static inline const cpumask_t *target_cpus_cluster(void)
13{ 13{
14 return CPU_MASK_ALL; 14 return &CPU_MASK_ALL;
15} 15}
16 16
17static inline cpumask_t target_cpus(void) 17static inline const cpumask_t *target_cpus(void)
18{ 18{
19 return cpumask_of_cpu(smp_processor_id()); 19 return &cpumask_of_cpu(smp_processor_id());
20} 20}
21 21
22#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) 22#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
@@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS];
80static inline void setup_apic_routing(void) 80static inline void setup_apic_routing(void)
81{ 81{
82 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); 82 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
83 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", 83 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
84 (apic_version[apic] == 0x14) ? 84 (apic_version[apic] == 0x14) ?
85 "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); 85 "Physical Cluster" : "Logical Cluster",
86 nr_ioapics, cpus_addr(*target_cpus())[0]);
86} 87}
87 88
88static inline int multi_timer_check(int apic, int irq) 89static inline int multi_timer_check(int apic, int irq)
@@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu)
100{ 101{
101 if (!mps_cpu) 102 if (!mps_cpu)
102 return boot_cpu_physical_apicid; 103 return boot_cpu_physical_apicid;
103 else if (mps_cpu < NR_CPUS) 104 else if (mps_cpu < nr_cpu_ids)
104 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); 105 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
105 else 106 else
106 return BAD_APICID; 107 return BAD_APICID;
@@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[];
120static inline int cpu_to_logical_apicid(int cpu) 121static inline int cpu_to_logical_apicid(int cpu)
121{ 122{
122#ifdef CONFIG_SMP 123#ifdef CONFIG_SMP
123 if (cpu >= NR_CPUS) 124 if (cpu >= nr_cpu_ids)
124 return BAD_APICID; 125 return BAD_APICID;
125 return (int)cpu_2_logical_apicid[cpu]; 126 return (int)cpu_2_logical_apicid[cpu];
126#else 127#else
127 return logical_smp_processor_id(); 128 return logical_smp_processor_id();
128#endif 129#endif
@@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid)
146 return (1); 147 return (1);
147} 148}
148 149
149static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) 150static inline unsigned int
151cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
150{ 152{
151 int num_bits_set; 153 int num_bits_set;
152 int cpus_found = 0; 154 int cpus_found = 0;
153 int cpu; 155 int cpu;
154 int apicid; 156 int apicid;
155 157
156 num_bits_set = cpus_weight(cpumask); 158 num_bits_set = cpumask_weight(cpumask);
157 /* Return id to all */ 159 /* Return id to all */
158 if (num_bits_set == NR_CPUS) 160 if (num_bits_set == NR_CPUS)
159 return 0xFF; 161 return 0xFF;
@@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
161 * The cpus in the mask must all be on the apic cluster. If are not 163 * The cpus in the mask must all be on the apic cluster. If are not
162 * on the same apicid cluster return default value of TARGET_CPUS. 164 * on the same apicid cluster return default value of TARGET_CPUS.
163 */ 165 */
164 cpu = first_cpu(cpumask); 166 cpu = cpumask_first(cpumask);
165 apicid = cpu_to_logical_apicid(cpu); 167 apicid = cpu_to_logical_apicid(cpu);
166 while (cpus_found < num_bits_set) { 168 while (cpus_found < num_bits_set) {
167 if (cpu_isset(cpu, cpumask)) { 169 if (cpumask_test_cpu(cpu, cpumask)) {
168 int new_apicid = cpu_to_logical_apicid(cpu); 170 int new_apicid = cpu_to_logical_apicid(cpu);
169 if (apicid_cluster(apicid) != 171 if (apicid_cluster(apicid) !=
170 apicid_cluster(new_apicid)){ 172 apicid_cluster(new_apicid)){
@@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
179 return apicid; 181 return apicid;
180} 182}
181 183
182static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 184static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
183{ 185{
184 int num_bits_set; 186 int num_bits_set;
185 int cpus_found = 0; 187 int cpus_found = 0;
186 int cpu; 188 int cpu;
187 int apicid; 189 int apicid;
188 190
189 num_bits_set = cpus_weight(cpumask); 191 num_bits_set = cpus_weight(*cpumask);
190 /* Return id to all */ 192 /* Return id to all */
191 if (num_bits_set == NR_CPUS) 193 if (num_bits_set == NR_CPUS)
192 return cpu_to_logical_apicid(0); 194 return cpu_to_logical_apicid(0);
@@ -194,10 +196,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
194 * The cpus in the mask must all be on the apic cluster. If are not 196 * The cpus in the mask must all be on the apic cluster. If are not
195 * on the same apicid cluster return default value of TARGET_CPUS. 197 * on the same apicid cluster return default value of TARGET_CPUS.
196 */ 198 */
197 cpu = first_cpu(cpumask); 199 cpu = first_cpu(*cpumask);
198 apicid = cpu_to_logical_apicid(cpu); 200 apicid = cpu_to_logical_apicid(cpu);
199 while (cpus_found < num_bits_set) { 201 while (cpus_found < num_bits_set) {
200 if (cpu_isset(cpu, cpumask)) { 202 if (cpu_isset(cpu, *cpumask)) {
201 int new_apicid = cpu_to_logical_apicid(cpu); 203 int new_apicid = cpu_to_logical_apicid(cpu);
202 if (apicid_cluster(apicid) != 204 if (apicid_cluster(apicid) !=
203 apicid_cluster(new_apicid)){ 205 apicid_cluster(new_apicid)){
@@ -212,6 +214,54 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
212 return apicid; 214 return apicid;
213} 215}
214 216
217static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
218 const struct cpumask *andmask)
219{
220 int num_bits_set;
221 int num_bits_set2;
222 int cpus_found = 0;
223 int cpu;
224 int apicid = 0;
225
226 num_bits_set = cpumask_weight(cpumask);
227 num_bits_set2 = cpumask_weight(andmask);
228 num_bits_set = min(num_bits_set, num_bits_set2);
229 /* Return id to all */
230 if (num_bits_set >= nr_cpu_ids)
231#if defined CONFIG_ES7000_CLUSTERED_APIC
232 return 0xFF;
233#else
234 return cpu_to_logical_apicid(0);
235#endif
236 /*
237 * The cpus in the mask must all be on the apic cluster. If are not
238 * on the same apicid cluster return default value of TARGET_CPUS.
239 */
240 cpu = cpumask_first_and(cpumask, andmask);
241 apicid = cpu_to_logical_apicid(cpu);
242
243 while (cpus_found < num_bits_set) {
244 if (cpumask_test_cpu(cpu, cpumask) &&
245 cpumask_test_cpu(cpu, andmask)) {
246 int new_apicid = cpu_to_logical_apicid(cpu);
247 if (apicid_cluster(apicid) !=
248 apicid_cluster(new_apicid)) {
249 printk(KERN_WARNING
250 "%s: Not a valid mask!\n", __func__);
251#if defined CONFIG_ES7000_CLUSTERED_APIC
252 return 0xFF;
253#else
254 return cpu_to_logical_apicid(0);
255#endif
256 }
257 apicid = new_apicid;
258 cpus_found++;
259 }
260 cpu++;
261 }
262 return apicid;
263}
264
215static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) 265static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
216{ 266{
217 return cpuid_apic >> index_msb; 267 return cpuid_apic >> index_msb;
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h
index 632a955fcc0a..7e8ed24d4b8a 100644
--- a/arch/x86/include/asm/es7000/ipi.h
+++ b/arch/x86/include/asm/es7000/ipi.h
@@ -1,24 +1,22 @@
1#ifndef __ASM_ES7000_IPI_H 1#ifndef __ASM_ES7000_IPI_H
2#define __ASM_ES7000_IPI_H 2#define __ASM_ES7000_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
5 6
6static inline void send_IPI_mask(cpumask_t mask, int vector) 7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
7{ 8{
8 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
9} 10}
10 11
11static inline void send_IPI_allbutself(int vector) 12static inline void send_IPI_allbutself(int vector)
12{ 13{
13 cpumask_t mask = cpu_online_map; 14 send_IPI_mask_allbutself(cpu_online_mask, vector);
14 cpu_clear(smp_processor_id(), mask);
15 if (!cpus_empty(mask))
16 send_IPI_mask(mask, vector);
17} 15}
18 16
19static inline void send_IPI_all(int vector) 17static inline void send_IPI_all(int vector)
20{ 18{
21 send_IPI_mask(cpu_online_map, vector); 19 send_IPI_mask(cpu_online_mask, vector);
22} 20}
23 21
24#endif /* __ASM_ES7000_IPI_H */ 22#endif /* __ASM_ES7000_IPI_H */
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
index 0ac17d33a8c7..746f37a7963a 100644
--- a/arch/x86/include/asm/genapic_32.h
+++ b/arch/x86/include/asm/genapic_32.h
@@ -24,7 +24,7 @@ struct genapic {
24 int (*probe)(void); 24 int (*probe)(void);
25 25
26 int (*apic_id_registered)(void); 26 int (*apic_id_registered)(void);
27 cpumask_t (*target_cpus)(void); 27 const struct cpumask *(*target_cpus)(void);
28 int int_delivery_mode; 28 int int_delivery_mode;
29 int int_dest_mode; 29 int int_dest_mode;
30 int ESR_DISABLE; 30 int ESR_DISABLE;
@@ -57,12 +57,16 @@ struct genapic {
57 57
58 unsigned (*get_apic_id)(unsigned long x); 58 unsigned (*get_apic_id)(unsigned long x);
59 unsigned long apic_id_mask; 59 unsigned long apic_id_mask;
60 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); 60 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
61 cpumask_t (*vector_allocation_domain)(int cpu); 61 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
62 const struct cpumask *andmask);
63 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
62 64
63#ifdef CONFIG_SMP 65#ifdef CONFIG_SMP
64 /* ipi */ 66 /* ipi */
65 void (*send_IPI_mask)(cpumask_t mask, int vector); 67 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
68 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
69 int vector);
66 void (*send_IPI_allbutself)(int vector); 70 void (*send_IPI_allbutself)(int vector);
67 void (*send_IPI_all)(int vector); 71 void (*send_IPI_all)(int vector);
68#endif 72#endif
@@ -114,6 +118,7 @@ struct genapic {
114 APICFUNC(get_apic_id) \ 118 APICFUNC(get_apic_id) \
115 .apic_id_mask = APIC_ID_MASK, \ 119 .apic_id_mask = APIC_ID_MASK, \
116 APICFUNC(cpu_mask_to_apicid) \ 120 APICFUNC(cpu_mask_to_apicid) \
121 APICFUNC(cpu_mask_to_apicid_and) \
117 APICFUNC(vector_allocation_domain) \ 122 APICFUNC(vector_allocation_domain) \
118 APICFUNC(acpi_madt_oem_check) \ 123 APICFUNC(acpi_madt_oem_check) \
119 IPIFUNC(send_IPI_mask) \ 124 IPIFUNC(send_IPI_mask) \
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
index 2cae011668b7..adf32fb56aa6 100644
--- a/arch/x86/include/asm/genapic_64.h
+++ b/arch/x86/include/asm/genapic_64.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_GENAPIC_64_H 1#ifndef _ASM_X86_GENAPIC_64_H
2#define _ASM_X86_GENAPIC_64_H 2#define _ASM_X86_GENAPIC_64_H
3 3
4#include <linux/cpumask.h>
5
4/* 6/*
5 * Copyright 2004 James Cleverdon, IBM. 7 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2 8 * Subject to the GNU Public License, v.2
@@ -18,16 +20,20 @@ struct genapic {
18 u32 int_delivery_mode; 20 u32 int_delivery_mode;
19 u32 int_dest_mode; 21 u32 int_dest_mode;
20 int (*apic_id_registered)(void); 22 int (*apic_id_registered)(void);
21 cpumask_t (*target_cpus)(void); 23 const struct cpumask *(*target_cpus)(void);
22 cpumask_t (*vector_allocation_domain)(int cpu); 24 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
23 void (*init_apic_ldr)(void); 25 void (*init_apic_ldr)(void);
24 /* ipi */ 26 /* ipi */
25 void (*send_IPI_mask)(cpumask_t mask, int vector); 27 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
28 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
29 int vector);
26 void (*send_IPI_allbutself)(int vector); 30 void (*send_IPI_allbutself)(int vector);
27 void (*send_IPI_all)(int vector); 31 void (*send_IPI_all)(int vector);
28 void (*send_IPI_self)(int vector); 32 void (*send_IPI_self)(int vector);
29 /* */ 33 /* */
30 unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); 34 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
35 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
36 const struct cpumask *andmask);
31 unsigned int (*phys_pkg_id)(int index_msb); 37 unsigned int (*phys_pkg_id)(int index_msb);
32 unsigned int (*get_apic_id)(unsigned long x); 38 unsigned int (*get_apic_id)(unsigned long x);
33 unsigned long (*set_apic_id)(unsigned int id); 39 unsigned long (*set_apic_id)(unsigned int id);
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index f89dffb28aa9..c745a306f7d3 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -117,7 +117,8 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
117 native_apic_mem_write(APIC_ICR, cfg); 117 native_apic_mem_write(APIC_ICR, cfg);
118} 118}
119 119
120static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) 120static inline void send_IPI_mask_sequence(const struct cpumask *mask,
121 int vector)
121{ 122{
122 unsigned long flags; 123 unsigned long flags;
123 unsigned long query_cpu; 124 unsigned long query_cpu;
@@ -128,11 +129,29 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
128 * - mbligh 129 * - mbligh
129 */ 130 */
130 local_irq_save(flags); 131 local_irq_save(flags);
131 for_each_cpu_mask_nr(query_cpu, mask) { 132 for_each_cpu(query_cpu, mask) {
132 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), 133 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
133 vector, APIC_DEST_PHYSICAL); 134 vector, APIC_DEST_PHYSICAL);
134 } 135 }
135 local_irq_restore(flags); 136 local_irq_restore(flags);
136} 137}
137 138
139static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
140 int vector)
141{
142 unsigned long flags;
143 unsigned int query_cpu;
144 unsigned int this_cpu = smp_processor_id();
145
146 /* See Hack comment above */
147
148 local_irq_save(flags);
149 for_each_cpu(query_cpu, mask)
150 if (query_cpu != this_cpu)
151 __send_IPI_dest_field(
152 per_cpu(x86_cpu_to_apicid, query_cpu),
153 vector, APIC_DEST_PHYSICAL);
154 local_irq_restore(flags);
155}
156
138#endif /* _ASM_X86_IPI_H */ 157#endif /* _ASM_X86_IPI_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index bae0eda95486..8766d30fb746 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -37,7 +37,7 @@ extern int irqbalance_disable(char *str);
37 37
38#ifdef CONFIG_HOTPLUG_CPU 38#ifdef CONFIG_HOTPLUG_CPU
39#include <linux/cpumask.h> 39#include <linux/cpumask.h>
40extern void fixup_irqs(cpumask_t map); 40extern void fixup_irqs(void);
41#endif 41#endif
42 42
43extern unsigned int do_IRQ(struct pt_regs *regs); 43extern unsigned int do_IRQ(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
index 6cb3a467e067..8863d978cb96 100644
--- a/arch/x86/include/asm/mach-default/mach_apic.h
+++ b/arch/x86/include/asm/mach-default/mach_apic.h
@@ -8,12 +8,12 @@
8 8
9#define APIC_DFR_VALUE (APIC_DFR_FLAT) 9#define APIC_DFR_VALUE (APIC_DFR_FLAT)
10 10
11static inline cpumask_t target_cpus(void) 11static inline const struct cpumask *target_cpus(void)
12{ 12{
13#ifdef CONFIG_SMP 13#ifdef CONFIG_SMP
14 return cpu_online_map; 14 return cpu_online_mask;
15#else 15#else
16 return cpumask_of_cpu(0); 16 return cpumask_of(0);
17#endif 17#endif
18} 18}
19 19
@@ -28,6 +28,7 @@ static inline cpumask_t target_cpus(void)
28#define apic_id_registered (genapic->apic_id_registered) 28#define apic_id_registered (genapic->apic_id_registered)
29#define init_apic_ldr (genapic->init_apic_ldr) 29#define init_apic_ldr (genapic->init_apic_ldr)
30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) 30#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
31#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
31#define phys_pkg_id (genapic->phys_pkg_id) 32#define phys_pkg_id (genapic->phys_pkg_id)
32#define vector_allocation_domain (genapic->vector_allocation_domain) 33#define vector_allocation_domain (genapic->vector_allocation_domain)
33#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) 34#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
@@ -61,9 +62,18 @@ static inline int apic_id_registered(void)
61 return physid_isset(read_apic_id(), phys_cpu_present_map); 62 return physid_isset(read_apic_id(), phys_cpu_present_map);
62} 63}
63 64
64static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 65static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
65{ 66{
66 return cpus_addr(cpumask)[0]; 67 return cpumask_bits(cpumask)[0];
68}
69
70static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
71 const struct cpumask *andmask)
72{
73 unsigned long mask1 = cpumask_bits(cpumask)[0];
74 unsigned long mask2 = cpumask_bits(andmask)[0];
75
76 return (unsigned int)(mask1 & mask2);
67} 77}
68 78
69static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) 79static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
@@ -88,7 +98,7 @@ static inline int apicid_to_node(int logical_apicid)
88#endif 98#endif
89} 99}
90 100
91static inline cpumask_t vector_allocation_domain(int cpu) 101static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
92{ 102{
93 /* Careful. Some cpus do not strictly honor the set of cpus 103 /* Careful. Some cpus do not strictly honor the set of cpus
94 * specified in the interrupt destination when using lowest 104 * specified in the interrupt destination when using lowest
@@ -98,8 +108,7 @@ static inline cpumask_t vector_allocation_domain(int cpu)
98 * deliver interrupts to the wrong hyperthread when only one 108 * deliver interrupts to the wrong hyperthread when only one
99 * hyperthread was specified in the interrupt desitination. 109 * hyperthread was specified in the interrupt desitination.
100 */ 110 */
101 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 111 *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
102 return domain;
103} 112}
104#endif 113#endif
105 114
@@ -131,7 +140,7 @@ static inline int cpu_to_logical_apicid(int cpu)
131 140
132static inline int cpu_present_to_apicid(int mps_cpu) 141static inline int cpu_present_to_apicid(int mps_cpu)
133{ 142{
134 if (mps_cpu < NR_CPUS && cpu_present(mps_cpu)) 143 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
135 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 144 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
136 else 145 else
137 return BAD_APICID; 146 return BAD_APICID;
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h
index fabca01ebacf..191312d155da 100644
--- a/arch/x86/include/asm/mach-default/mach_ipi.h
+++ b/arch/x86/include/asm/mach-default/mach_ipi.h
@@ -4,7 +4,8 @@
4/* Avoid include hell */ 4/* Avoid include hell */
5#define NMI_VECTOR 0x02 5#define NMI_VECTOR 0x02
6 6
7void send_IPI_mask_bitmask(cpumask_t mask, int vector); 7void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
8void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
8void __send_IPI_shortcut(unsigned int shortcut, int vector); 9void __send_IPI_shortcut(unsigned int shortcut, int vector);
9 10
10extern int no_broadcast; 11extern int no_broadcast;
@@ -12,28 +13,27 @@ extern int no_broadcast;
12#ifdef CONFIG_X86_64 13#ifdef CONFIG_X86_64
13#include <asm/genapic.h> 14#include <asm/genapic.h>
14#define send_IPI_mask (genapic->send_IPI_mask) 15#define send_IPI_mask (genapic->send_IPI_mask)
16#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
15#else 17#else
16static inline void send_IPI_mask(cpumask_t mask, int vector) 18static inline void send_IPI_mask(const struct cpumask *mask, int vector)
17{ 19{
18 send_IPI_mask_bitmask(mask, vector); 20 send_IPI_mask_bitmask(mask, vector);
19} 21}
22void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
20#endif 23#endif
21 24
22static inline void __local_send_IPI_allbutself(int vector) 25static inline void __local_send_IPI_allbutself(int vector)
23{ 26{
24 if (no_broadcast || vector == NMI_VECTOR) { 27 if (no_broadcast || vector == NMI_VECTOR)
25 cpumask_t mask = cpu_online_map; 28 send_IPI_mask_allbutself(cpu_online_mask, vector);
26 29 else
27 cpu_clear(smp_processor_id(), mask);
28 send_IPI_mask(mask, vector);
29 } else
30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); 30 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
31} 31}
32 32
33static inline void __local_send_IPI_all(int vector) 33static inline void __local_send_IPI_all(int vector)
34{ 34{
35 if (no_broadcast || vector == NMI_VECTOR) 35 if (no_broadcast || vector == NMI_VECTOR)
36 send_IPI_mask(cpu_online_map, vector); 36 send_IPI_mask(cpu_online_mask, vector);
37 else 37 else
38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector); 38 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
39} 39}
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h
index e430f47df667..48553e958ad5 100644
--- a/arch/x86/include/asm/mach-generic/mach_apic.h
+++ b/arch/x86/include/asm/mach-generic/mach_apic.h
@@ -24,6 +24,7 @@
24#define check_phys_apicid_present (genapic->check_phys_apicid_present) 24#define check_phys_apicid_present (genapic->check_phys_apicid_present)
25#define check_apicid_used (genapic->check_apicid_used) 25#define check_apicid_used (genapic->check_apicid_used)
26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) 26#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
27#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
27#define vector_allocation_domain (genapic->vector_allocation_domain) 28#define vector_allocation_domain (genapic->vector_allocation_domain)
28#define enable_apic_mode (genapic->enable_apic_mode) 29#define enable_apic_mode (genapic->enable_apic_mode)
29#define phys_pkg_id (genapic->phys_pkg_id) 30#define phys_pkg_id (genapic->phys_pkg_id)
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
index 0bf2a06b7a4e..c80f00d29965 100644
--- a/arch/x86/include/asm/numaq/apic.h
+++ b/arch/x86/include/asm/numaq/apic.h
@@ -7,9 +7,9 @@
7 7
8#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) 8#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
9 9
10static inline cpumask_t target_cpus(void) 10static inline const cpumask_t *target_cpus(void)
11{ 11{
12 return CPU_MASK_ALL; 12 return &CPU_MASK_ALL;
13} 13}
14 14
15#define NO_BALANCE_IRQ (1) 15#define NO_BALANCE_IRQ (1)
@@ -122,7 +122,13 @@ static inline void enable_apic_mode(void)
122 * We use physical apicids here, not logical, so just return the default 122 * We use physical apicids here, not logical, so just return the default
123 * physical broadcast to stop people from breaking us 123 * physical broadcast to stop people from breaking us
124 */ 124 */
125static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 125static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
126{
127 return (int) 0xF;
128}
129
130static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
131 const struct cpumask *andmask)
126{ 132{
127 return (int) 0xF; 133 return (int) 0xF;
128} 134}
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h
index 935588d286cf..a8374c652778 100644
--- a/arch/x86/include/asm/numaq/ipi.h
+++ b/arch/x86/include/asm/numaq/ipi.h
@@ -1,25 +1,22 @@
1#ifndef __ASM_NUMAQ_IPI_H 1#ifndef __ASM_NUMAQ_IPI_H
2#define __ASM_NUMAQ_IPI_H 2#define __ASM_NUMAQ_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t, int vector); 4void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
5void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
5 6
6static inline void send_IPI_mask(cpumask_t mask, int vector) 7static inline void send_IPI_mask(const struct cpumask *mask, int vector)
7{ 8{
8 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
9} 10}
10 11
11static inline void send_IPI_allbutself(int vector) 12static inline void send_IPI_allbutself(int vector)
12{ 13{
13 cpumask_t mask = cpu_online_map; 14 send_IPI_mask_allbutself(cpu_online_mask, vector);
14 cpu_clear(smp_processor_id(), mask);
15
16 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector);
18} 15}
19 16
20static inline void send_IPI_all(int vector) 17static inline void send_IPI_all(int vector)
21{ 18{
22 send_IPI_mask(cpu_online_map, vector); 19 send_IPI_mask(cpu_online_mask, vector);
23} 20}
24 21
25#endif /* __ASM_NUMAQ_IPI_H */ 22#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index d12811ce51d9..830b9fcb6427 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -60,7 +60,7 @@ struct smp_ops {
60 void (*cpu_die)(unsigned int cpu); 60 void (*cpu_die)(unsigned int cpu);
61 void (*play_dead)(void); 61 void (*play_dead)(void);
62 62
63 void (*send_call_func_ipi)(cpumask_t mask); 63 void (*send_call_func_ipi)(const struct cpumask *mask);
64 void (*send_call_func_single_ipi)(int cpu); 64 void (*send_call_func_single_ipi)(int cpu);
65}; 65};
66 66
@@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu)
125 125
126static inline void arch_send_call_function_ipi(cpumask_t mask) 126static inline void arch_send_call_function_ipi(cpumask_t mask)
127{ 127{
128 smp_ops.send_call_func_ipi(mask); 128 smp_ops.send_call_func_ipi(&mask);
129} 129}
130 130
131void cpu_disable_common(void); 131void cpu_disable_common(void);
@@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu);
138void native_play_dead(void); 138void native_play_dead(void);
139void play_dead_common(void); 139void play_dead_common(void);
140 140
141void native_send_call_func_ipi(cpumask_t mask); 141void native_send_call_func_ipi(const struct cpumask *mask);
142void native_send_call_func_single_ipi(int cpu); 142void native_send_call_func_single_ipi(int cpu);
143 143
144extern void prefill_possible_map(void); 144extern void prefill_possible_map(void);
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
index 9b3070f1c2ac..651a93849341 100644
--- a/arch/x86/include/asm/summit/apic.h
+++ b/arch/x86/include/asm/summit/apic.h
@@ -14,13 +14,13 @@
14 14
15#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) 15#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
16 16
17static inline cpumask_t target_cpus(void) 17static inline const cpumask_t *target_cpus(void)
18{ 18{
19 /* CPU_MASK_ALL (0xff) has undefined behaviour with 19 /* CPU_MASK_ALL (0xff) has undefined behaviour with
20 * dest_LowestPrio mode logical clustered apic interrupt routing 20 * dest_LowestPrio mode logical clustered apic interrupt routing
21 * Just start on cpu 0. IRQ balancing will spread load 21 * Just start on cpu 0. IRQ balancing will spread load
22 */ 22 */
23 return cpumask_of_cpu(0); 23 return &cpumask_of_cpu(0);
24} 24}
25 25
26#define INT_DELIVERY_MODE (dest_LowestPrio) 26#define INT_DELIVERY_MODE (dest_LowestPrio)
@@ -137,14 +137,14 @@ static inline void enable_apic_mode(void)
137{ 137{
138} 138}
139 139
140static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) 140static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
141{ 141{
142 int num_bits_set; 142 int num_bits_set;
143 int cpus_found = 0; 143 int cpus_found = 0;
144 int cpu; 144 int cpu;
145 int apicid; 145 int apicid;
146 146
147 num_bits_set = cpus_weight(cpumask); 147 num_bits_set = cpus_weight(*cpumask);
148 /* Return id to all */ 148 /* Return id to all */
149 if (num_bits_set == NR_CPUS) 149 if (num_bits_set == NR_CPUS)
150 return (int) 0xFF; 150 return (int) 0xFF;
@@ -152,10 +152,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
152 * The cpus in the mask must all be on the apic cluster. If are not 152 * The cpus in the mask must all be on the apic cluster. If are not
153 * on the same apicid cluster return default value of TARGET_CPUS. 153 * on the same apicid cluster return default value of TARGET_CPUS.
154 */ 154 */
155 cpu = first_cpu(cpumask); 155 cpu = first_cpu(*cpumask);
156 apicid = cpu_to_logical_apicid(cpu); 156 apicid = cpu_to_logical_apicid(cpu);
157 while (cpus_found < num_bits_set) { 157 while (cpus_found < num_bits_set) {
158 if (cpu_isset(cpu, cpumask)) { 158 if (cpu_isset(cpu, *cpumask)) {
159 int new_apicid = cpu_to_logical_apicid(cpu); 159 int new_apicid = cpu_to_logical_apicid(cpu);
160 if (apicid_cluster(apicid) != 160 if (apicid_cluster(apicid) !=
161 apicid_cluster(new_apicid)){ 161 apicid_cluster(new_apicid)){
@@ -170,6 +170,45 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
170 return apicid; 170 return apicid;
171} 171}
172 172
173static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
174 const struct cpumask *andmask)
175{
176 int num_bits_set;
177 int num_bits_set2;
178 int cpus_found = 0;
179 int cpu;
180 int apicid = 0;
181
182 num_bits_set = cpumask_weight(cpumask);
183 num_bits_set2 = cpumask_weight(andmask);
184 num_bits_set = min(num_bits_set, num_bits_set2);
185 /* Return id to all */
186 if (num_bits_set >= nr_cpu_ids)
187 return 0xFF;
188 /*
189 * The cpus in the mask must all be on the apic cluster. If are not
190 * on the same apicid cluster return default value of TARGET_CPUS.
191 */
192 cpu = cpumask_first_and(cpumask, andmask);
193 apicid = cpu_to_logical_apicid(cpu);
194 while (cpus_found < num_bits_set) {
195 if (cpumask_test_cpu(cpu, cpumask)
196 && cpumask_test_cpu(cpu, andmask)) {
197 int new_apicid = cpu_to_logical_apicid(cpu);
198 if (apicid_cluster(apicid) !=
199 apicid_cluster(new_apicid)) {
200 printk(KERN_WARNING
201 "%s: Not a valid mask!\n", __func__);
202 return 0xFF;
203 }
204 apicid = apicid | new_apicid;
205 cpus_found++;
206 }
207 cpu++;
208 }
209 return apicid;
210}
211
173/* cpuid returns the value latched in the HW at reset, not the APIC ID 212/* cpuid returns the value latched in the HW at reset, not the APIC ID
174 * register's value. For any box whose BIOS changes APIC IDs, like 213 * register's value. For any box whose BIOS changes APIC IDs, like
175 * clustered APIC systems, we must use hard_smp_processor_id. 214 * clustered APIC systems, we must use hard_smp_processor_id.
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h
index 53bd1e7bd7b4..a8a2c24f50cc 100644
--- a/arch/x86/include/asm/summit/ipi.h
+++ b/arch/x86/include/asm/summit/ipi.h
@@ -1,9 +1,10 @@
1#ifndef __ASM_SUMMIT_IPI_H 1#ifndef __ASM_SUMMIT_IPI_H
2#define __ASM_SUMMIT_IPI_H 2#define __ASM_SUMMIT_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
5void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
5 6
6static inline void send_IPI_mask(cpumask_t mask, int vector) 7static inline void send_IPI_mask(const cpumask_t *mask, int vector)
7{ 8{
8 send_IPI_mask_sequence(mask, vector); 9 send_IPI_mask_sequence(mask, vector);
9} 10}
@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector)
14 cpu_clear(smp_processor_id(), mask); 15 cpu_clear(smp_processor_id(), mask);
15 16
16 if (!cpus_empty(mask)) 17 if (!cpus_empty(mask))
17 send_IPI_mask(mask, vector); 18 send_IPI_mask(&mask, vector);
18} 19}
19 20
20static inline void send_IPI_all(int vector) 21static inline void send_IPI_all(int vector)
21{ 22{
22 send_IPI_mask(cpu_online_map, vector); 23 send_IPI_mask(&cpu_online_map, vector);
23} 24}
24 25
25#endif /* __ASM_SUMMIT_IPI_H */ 26#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index ff386ff50ed7..79e31e9dcdda 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -226,6 +226,8 @@ extern cpumask_t cpu_coregroup_map(int cpu);
226#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 226#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
227#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 227#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
228#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 228#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
229#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
230#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
229 231
230/* indicates that pointers to the topology cpumask_t maps are valid */ 232/* indicates that pointers to the topology cpumask_t maps are valid */
231#define arch_provides_topology_pointers yes 233#define arch_provides_topology_pointers yes
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index b2cef49f3085..edda4c00e3d2 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -141,7 +141,7 @@ static int lapic_next_event(unsigned long delta,
141 struct clock_event_device *evt); 141 struct clock_event_device *evt);
142static void lapic_timer_setup(enum clock_event_mode mode, 142static void lapic_timer_setup(enum clock_event_mode mode,
143 struct clock_event_device *evt); 143 struct clock_event_device *evt);
144static void lapic_timer_broadcast(const struct cpumask *mask); 144static void lapic_timer_broadcast(const cpumask_t *mask);
145static void apic_pm_activate(void); 145static void apic_pm_activate(void);
146 146
147/* 147/*
@@ -453,10 +453,10 @@ static void lapic_timer_setup(enum clock_event_mode mode,
453/* 453/*
454 * Local APIC timer broadcast function 454 * Local APIC timer broadcast function
455 */ 455 */
456static void lapic_timer_broadcast(const struct cpumask *mask) 456static void lapic_timer_broadcast(const cpumask_t *mask)
457{ 457{
458#ifdef CONFIG_SMP 458#ifdef CONFIG_SMP
459 send_IPI_mask(*mask, LOCAL_TIMER_VECTOR); 459 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
460#endif 460#endif
461} 461}
462 462
@@ -1903,8 +1903,8 @@ void __cpuinit generic_processor_info(int apicid, int version)
1903 } 1903 }
1904#endif 1904#endif
1905 1905
1906 cpu_set(cpu, cpu_possible_map); 1906 set_cpu_possible(cpu, true);
1907 cpu_set(cpu, cpu_present_map); 1907 set_cpu_present(cpu, true);
1908} 1908}
1909 1909
1910#ifdef CONFIG_X86_64 1910#ifdef CONFIG_X86_64
@@ -2106,7 +2106,7 @@ __cpuinit int apic_is_clustered_box(void)
2106 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); 2106 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
2107 bitmap_zero(clustermap, NUM_APIC_CLUSTERS); 2107 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
2108 2108
2109 for (i = 0; i < NR_CPUS; i++) { 2109 for (i = 0; i < nr_cpu_ids; i++) {
2110 /* are we being called early in kernel startup? */ 2110 /* are we being called early in kernel startup? */
2111 if (bios_cpu_apicid) { 2111 if (bios_cpu_apicid) {
2112 id = bios_cpu_apicid[i]; 2112 id = bios_cpu_apicid[i];
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 43ea612d3e9d..fb7f946cb65e 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -534,31 +534,16 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
534 per_cpu(cpuid4_info, cpu) = NULL; 534 per_cpu(cpuid4_info, cpu) = NULL;
535} 535}
536 536
537static int __cpuinit detect_cache_attributes(unsigned int cpu) 537static void get_cpu_leaves(void *_retval)
538{ 538{
539 struct _cpuid4_info *this_leaf; 539 int j, *retval = _retval, cpu = smp_processor_id();
540 unsigned long j;
541 int retval;
542 cpumask_t oldmask;
543
544 if (num_cache_leaves == 0)
545 return -ENOENT;
546
547 per_cpu(cpuid4_info, cpu) = kzalloc(
548 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
549 if (per_cpu(cpuid4_info, cpu) == NULL)
550 return -ENOMEM;
551
552 oldmask = current->cpus_allowed;
553 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
554 if (retval)
555 goto out;
556 540
557 /* Do cpuid and store the results */ 541 /* Do cpuid and store the results */
558 for (j = 0; j < num_cache_leaves; j++) { 542 for (j = 0; j < num_cache_leaves; j++) {
543 struct _cpuid4_info *this_leaf;
559 this_leaf = CPUID4_INFO_IDX(cpu, j); 544 this_leaf = CPUID4_INFO_IDX(cpu, j);
560 retval = cpuid4_cache_lookup(j, this_leaf); 545 *retval = cpuid4_cache_lookup(j, this_leaf);
561 if (unlikely(retval < 0)) { 546 if (unlikely(*retval < 0)) {
562 int i; 547 int i;
563 548
564 for (i = 0; i < j; i++) 549 for (i = 0; i < j; i++)
@@ -567,9 +552,21 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
567 } 552 }
568 cache_shared_cpu_map_setup(cpu, j); 553 cache_shared_cpu_map_setup(cpu, j);
569 } 554 }
570 set_cpus_allowed_ptr(current, &oldmask); 555}
556
557static int __cpuinit detect_cache_attributes(unsigned int cpu)
558{
559 int retval;
560
561 if (num_cache_leaves == 0)
562 return -ENOENT;
563
564 per_cpu(cpuid4_info, cpu) = kzalloc(
565 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
566 if (per_cpu(cpuid4_info, cpu) == NULL)
567 return -ENOMEM;
571 568
572out: 569 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
573 if (retval) { 570 if (retval) {
574 kfree(per_cpu(cpuid4_info, cpu)); 571 kfree(per_cpu(cpuid4_info, cpu));
575 per_cpu(cpuid4_info, cpu) = NULL; 572 per_cpu(cpuid4_info, cpu) = NULL;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 5eb390a4b2e9..a1de80f368f1 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -83,34 +83,41 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
83 * CPU Initialization 83 * CPU Initialization
84 */ 84 */
85 85
86struct thresh_restart {
87 struct threshold_block *b;
88 int reset;
89 u16 old_limit;
90};
91
86/* must be called with correct cpu affinity */ 92/* must be called with correct cpu affinity */
87static void threshold_restart_bank(struct threshold_block *b, 93static long threshold_restart_bank(void *_tr)
88 int reset, u16 old_limit)
89{ 94{
95 struct thresh_restart *tr = _tr;
90 u32 mci_misc_hi, mci_misc_lo; 96 u32 mci_misc_hi, mci_misc_lo;
91 97
92 rdmsr(b->address, mci_misc_lo, mci_misc_hi); 98 rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
93 99
94 if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) 100 if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
95 reset = 1; /* limit cannot be lower than err count */ 101 tr->reset = 1; /* limit cannot be lower than err count */
96 102
97 if (reset) { /* reset err count and overflow bit */ 103 if (tr->reset) { /* reset err count and overflow bit */
98 mci_misc_hi = 104 mci_misc_hi =
99 (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | 105 (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
100 (THRESHOLD_MAX - b->threshold_limit); 106 (THRESHOLD_MAX - tr->b->threshold_limit);
101 } else if (old_limit) { /* change limit w/o reset */ 107 } else if (tr->old_limit) { /* change limit w/o reset */
102 int new_count = (mci_misc_hi & THRESHOLD_MAX) + 108 int new_count = (mci_misc_hi & THRESHOLD_MAX) +
103 (old_limit - b->threshold_limit); 109 (tr->old_limit - tr->b->threshold_limit);
104 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | 110 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
105 (new_count & THRESHOLD_MAX); 111 (new_count & THRESHOLD_MAX);
106 } 112 }
107 113
108 b->interrupt_enable ? 114 tr->b->interrupt_enable ?
109 (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : 115 (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
110 (mci_misc_hi &= ~MASK_INT_TYPE_HI); 116 (mci_misc_hi &= ~MASK_INT_TYPE_HI);
111 117
112 mci_misc_hi |= MASK_COUNT_EN_HI; 118 mci_misc_hi |= MASK_COUNT_EN_HI;
113 wrmsr(b->address, mci_misc_lo, mci_misc_hi); 119 wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
120 return 0;
114} 121}
115 122
116/* cpu init entry point, called from mce.c with preempt off */ 123/* cpu init entry point, called from mce.c with preempt off */
@@ -120,6 +127,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
120 unsigned int cpu = smp_processor_id(); 127 unsigned int cpu = smp_processor_id();
121 u8 lvt_off; 128 u8 lvt_off;
122 u32 low = 0, high = 0, address = 0; 129 u32 low = 0, high = 0, address = 0;
130 struct thresh_restart tr;
123 131
124 for (bank = 0; bank < NR_BANKS; ++bank) { 132 for (bank = 0; bank < NR_BANKS; ++bank) {
125 for (block = 0; block < NR_BLOCKS; ++block) { 133 for (block = 0; block < NR_BLOCKS; ++block) {
@@ -162,7 +170,10 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
162 wrmsr(address, low, high); 170 wrmsr(address, low, high);
163 171
164 threshold_defaults.address = address; 172 threshold_defaults.address = address;
165 threshold_restart_bank(&threshold_defaults, 0, 0); 173 tr.b = &threshold_defaults;
174 tr.reset = 0;
175 tr.old_limit = 0;
176 threshold_restart_bank(&tr);
166 } 177 }
167 } 178 }
168} 179}
@@ -251,20 +262,6 @@ struct threshold_attr {
251 ssize_t(*store) (struct threshold_block *, const char *, size_t count); 262 ssize_t(*store) (struct threshold_block *, const char *, size_t count);
252}; 263};
253 264
254static void affinity_set(unsigned int cpu, cpumask_t *oldmask,
255 cpumask_t *newmask)
256{
257 *oldmask = current->cpus_allowed;
258 cpus_clear(*newmask);
259 cpu_set(cpu, *newmask);
260 set_cpus_allowed_ptr(current, newmask);
261}
262
263static void affinity_restore(const cpumask_t *oldmask)
264{
265 set_cpus_allowed_ptr(current, oldmask);
266}
267
268#define SHOW_FIELDS(name) \ 265#define SHOW_FIELDS(name) \
269static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ 266static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
270{ \ 267{ \
@@ -277,15 +274,16 @@ static ssize_t store_interrupt_enable(struct threshold_block *b,
277 const char *buf, size_t count) 274 const char *buf, size_t count)
278{ 275{
279 char *end; 276 char *end;
280 cpumask_t oldmask, newmask; 277 struct thresh_restart tr;
281 unsigned long new = simple_strtoul(buf, &end, 0); 278 unsigned long new = simple_strtoul(buf, &end, 0);
282 if (end == buf) 279 if (end == buf)
283 return -EINVAL; 280 return -EINVAL;
284 b->interrupt_enable = !!new; 281 b->interrupt_enable = !!new;
285 282
286 affinity_set(b->cpu, &oldmask, &newmask); 283 tr.b = b;
287 threshold_restart_bank(b, 0, 0); 284 tr.reset = 0;
288 affinity_restore(&oldmask); 285 tr.old_limit = 0;
286 work_on_cpu(b->cpu, threshold_restart_bank, &tr);
289 287
290 return end - buf; 288 return end - buf;
291} 289}
@@ -294,8 +292,7 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
294 const char *buf, size_t count) 292 const char *buf, size_t count)
295{ 293{
296 char *end; 294 char *end;
297 cpumask_t oldmask, newmask; 295 struct thresh_restart tr;
298 u16 old;
299 unsigned long new = simple_strtoul(buf, &end, 0); 296 unsigned long new = simple_strtoul(buf, &end, 0);
300 if (end == buf) 297 if (end == buf)
301 return -EINVAL; 298 return -EINVAL;
@@ -303,34 +300,36 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
303 new = THRESHOLD_MAX; 300 new = THRESHOLD_MAX;
304 if (new < 1) 301 if (new < 1)
305 new = 1; 302 new = 1;
306 old = b->threshold_limit; 303 tr.old_limit = b->threshold_limit;
307 b->threshold_limit = new; 304 b->threshold_limit = new;
305 tr.b = b;
306 tr.reset = 0;
308 307
309 affinity_set(b->cpu, &oldmask, &newmask); 308 work_on_cpu(b->cpu, threshold_restart_bank, &tr);
310 threshold_restart_bank(b, 0, old);
311 affinity_restore(&oldmask);
312 309
313 return end - buf; 310 return end - buf;
314} 311}
315 312
316static ssize_t show_error_count(struct threshold_block *b, char *buf) 313static long local_error_count(void *_b)
317{ 314{
318 u32 high, low; 315 struct threshold_block *b = _b;
319 cpumask_t oldmask, newmask; 316 u32 low, high;
320 affinity_set(b->cpu, &oldmask, &newmask); 317
321 rdmsr(b->address, low, high); 318 rdmsr(b->address, low, high);
322 affinity_restore(&oldmask); 319 return (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
323 return sprintf(buf, "%x\n", 320}
324 (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); 321
322static ssize_t show_error_count(struct threshold_block *b, char *buf)
323{
324 return sprintf(buf, "%lx\n", work_on_cpu(b->cpu, local_error_count, b));
325} 325}
326 326
327static ssize_t store_error_count(struct threshold_block *b, 327static ssize_t store_error_count(struct threshold_block *b,
328 const char *buf, size_t count) 328 const char *buf, size_t count)
329{ 329{
330 cpumask_t oldmask, newmask; 330 struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
331 affinity_set(b->cpu, &oldmask, &newmask); 331
332 threshold_restart_bank(b, 1, 0); 332 work_on_cpu(b->cpu, threshold_restart_bank, &tr);
333 affinity_restore(&oldmask);
334 return 1; 333 return 1;
335} 334}
336 335
@@ -463,12 +462,19 @@ out_free:
463 return err; 462 return err;
464} 463}
465 464
465static long local_allocate_threshold_blocks(void *_bank)
466{
467 unsigned int *bank = _bank;
468
469 return allocate_threshold_blocks(smp_processor_id(), *bank, 0,
470 MSR_IA32_MC0_MISC + *bank * 4);
471}
472
466/* symlinks sibling shared banks to first core. first core owns dir/files. */ 473/* symlinks sibling shared banks to first core. first core owns dir/files. */
467static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) 474static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
468{ 475{
469 int i, err = 0; 476 int i, err = 0;
470 struct threshold_bank *b = NULL; 477 struct threshold_bank *b = NULL;
471 cpumask_t oldmask, newmask;
472 char name[32]; 478 char name[32];
473 479
474 sprintf(name, "threshold_bank%i", bank); 480 sprintf(name, "threshold_bank%i", bank);
@@ -519,11 +525,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
519 525
520 per_cpu(threshold_banks, cpu)[bank] = b; 526 per_cpu(threshold_banks, cpu)[bank] = b;
521 527
522 affinity_set(cpu, &oldmask, &newmask); 528 err = work_on_cpu(cpu, local_allocate_threshold_blocks, &bank);
523 err = allocate_threshold_blocks(cpu, bank, 0,
524 MSR_IA32_MC0_MISC + bank * 4);
525 affinity_restore(&oldmask);
526
527 if (err) 529 if (err)
528 goto out_free; 530 goto out_free;
529 531
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 268553817909..81e01f7b1d12 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -77,10 +77,7 @@ static int crash_nmi_callback(struct notifier_block *self,
77 77
78static void smp_send_nmi_allbutself(void) 78static void smp_send_nmi_allbutself(void)
79{ 79{
80 cpumask_t mask = cpu_online_map; 80 send_IPI_allbutself(NMI_VECTOR);
81 cpu_clear(safe_smp_processor_id(), mask);
82 if (!cpus_empty(mask))
83 send_IPI_mask(mask, NMI_VECTOR);
84} 81}
85 82
86static struct notifier_block crash_nmi_nb = { 83static struct notifier_block crash_nmi_nb = {
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index c0262791bda4..7fa5f49c2dda 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
30 return 1; 30 return 1;
31} 31}
32 32
33static cpumask_t flat_target_cpus(void) 33static const struct cpumask *flat_target_cpus(void)
34{ 34{
35 return cpu_online_map; 35 return cpu_online_mask;
36} 36}
37 37
38static cpumask_t flat_vector_allocation_domain(int cpu) 38static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
39{ 39{
40 /* Careful. Some cpus do not strictly honor the set of cpus 40 /* Careful. Some cpus do not strictly honor the set of cpus
41 * specified in the interrupt destination when using lowest 41 * specified in the interrupt destination when using lowest
@@ -45,8 +45,8 @@ static cpumask_t flat_vector_allocation_domain(int cpu)
45 * deliver interrupts to the wrong hyperthread when only one 45 * deliver interrupts to the wrong hyperthread when only one
46 * hyperthread was specified in the interrupt desitination. 46 * hyperthread was specified in the interrupt desitination.
47 */ 47 */
48 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 48 cpumask_clear(retmask);
49 return domain; 49 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
50} 50}
51 51
52/* 52/*
@@ -69,9 +69,8 @@ static void flat_init_apic_ldr(void)
69 apic_write(APIC_LDR, val); 69 apic_write(APIC_LDR, val);
70} 70}
71 71
72static void flat_send_IPI_mask(cpumask_t cpumask, int vector) 72static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
73{ 73{
74 unsigned long mask = cpus_addr(cpumask)[0];
75 unsigned long flags; 74 unsigned long flags;
76 75
77 local_irq_save(flags); 76 local_irq_save(flags);
@@ -79,20 +78,41 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
79 local_irq_restore(flags); 78 local_irq_restore(flags);
80} 79}
81 80
81static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
82{
83 unsigned long mask = cpumask_bits(cpumask)[0];
84
85 _flat_send_IPI_mask(mask, vector);
86}
87
88static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
89 int vector)
90{
91 unsigned long mask = cpumask_bits(cpumask)[0];
92 int cpu = smp_processor_id();
93
94 if (cpu < BITS_PER_LONG)
95 clear_bit(cpu, &mask);
96 _flat_send_IPI_mask(mask, vector);
97}
98
82static void flat_send_IPI_allbutself(int vector) 99static void flat_send_IPI_allbutself(int vector)
83{ 100{
101 int cpu = smp_processor_id();
84#ifdef CONFIG_HOTPLUG_CPU 102#ifdef CONFIG_HOTPLUG_CPU
85 int hotplug = 1; 103 int hotplug = 1;
86#else 104#else
87 int hotplug = 0; 105 int hotplug = 0;
88#endif 106#endif
89 if (hotplug || vector == NMI_VECTOR) { 107 if (hotplug || vector == NMI_VECTOR) {
90 cpumask_t allbutme = cpu_online_map; 108 if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
109 unsigned long mask = cpumask_bits(cpu_online_mask)[0];
91 110
92 cpu_clear(smp_processor_id(), allbutme); 111 if (cpu < BITS_PER_LONG)
112 clear_bit(cpu, &mask);
93 113
94 if (!cpus_empty(allbutme)) 114 _flat_send_IPI_mask(mask, vector);
95 flat_send_IPI_mask(allbutme, vector); 115 }
96 } else if (num_online_cpus() > 1) { 116 } else if (num_online_cpus() > 1) {
97 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); 117 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
98 } 118 }
@@ -101,7 +121,7 @@ static void flat_send_IPI_allbutself(int vector)
101static void flat_send_IPI_all(int vector) 121static void flat_send_IPI_all(int vector)
102{ 122{
103 if (vector == NMI_VECTOR) 123 if (vector == NMI_VECTOR)
104 flat_send_IPI_mask(cpu_online_map, vector); 124 flat_send_IPI_mask(cpu_online_mask, vector);
105 else 125 else
106 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); 126 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
107} 127}
@@ -135,9 +155,18 @@ static int flat_apic_id_registered(void)
135 return physid_isset(read_xapic_id(), phys_cpu_present_map); 155 return physid_isset(read_xapic_id(), phys_cpu_present_map);
136} 156}
137 157
138static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) 158static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
159{
160 return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
161}
162
163static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
164 const struct cpumask *andmask)
139{ 165{
140 return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; 166 unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
167 unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS;
168
169 return mask1 & mask2;
141} 170}
142 171
143static unsigned int phys_pkg_id(int index_msb) 172static unsigned int phys_pkg_id(int index_msb)
@@ -157,8 +186,10 @@ struct genapic apic_flat = {
157 .send_IPI_all = flat_send_IPI_all, 186 .send_IPI_all = flat_send_IPI_all,
158 .send_IPI_allbutself = flat_send_IPI_allbutself, 187 .send_IPI_allbutself = flat_send_IPI_allbutself,
159 .send_IPI_mask = flat_send_IPI_mask, 188 .send_IPI_mask = flat_send_IPI_mask,
189 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
160 .send_IPI_self = apic_send_IPI_self, 190 .send_IPI_self = apic_send_IPI_self,
161 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, 191 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
192 .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
162 .phys_pkg_id = phys_pkg_id, 193 .phys_pkg_id = phys_pkg_id,
163 .get_apic_id = get_apic_id, 194 .get_apic_id = get_apic_id,
164 .set_apic_id = set_apic_id, 195 .set_apic_id = set_apic_id,
@@ -188,35 +219,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
188 return 0; 219 return 0;
189} 220}
190 221
191static cpumask_t physflat_target_cpus(void) 222static const struct cpumask *physflat_target_cpus(void)
192{ 223{
193 return cpu_online_map; 224 return cpu_online_mask;
194} 225}
195 226
196static cpumask_t physflat_vector_allocation_domain(int cpu) 227static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
197{ 228{
198 return cpumask_of_cpu(cpu); 229 cpumask_clear(retmask);
230 cpumask_set_cpu(cpu, retmask);
199} 231}
200 232
201static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) 233static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
202{ 234{
203 send_IPI_mask_sequence(cpumask, vector); 235 send_IPI_mask_sequence(cpumask, vector);
204} 236}
205 237
206static void physflat_send_IPI_allbutself(int vector) 238static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
239 int vector)
207{ 240{
208 cpumask_t allbutme = cpu_online_map; 241 send_IPI_mask_allbutself(cpumask, vector);
242}
209 243
210 cpu_clear(smp_processor_id(), allbutme); 244static void physflat_send_IPI_allbutself(int vector)
211 physflat_send_IPI_mask(allbutme, vector); 245{
246 send_IPI_mask_allbutself(cpu_online_mask, vector);
212} 247}
213 248
214static void physflat_send_IPI_all(int vector) 249static void physflat_send_IPI_all(int vector)
215{ 250{
216 physflat_send_IPI_mask(cpu_online_map, vector); 251 physflat_send_IPI_mask(cpu_online_mask, vector);
217} 252}
218 253
219static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) 254static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
220{ 255{
221 int cpu; 256 int cpu;
222 257
@@ -224,13 +259,29 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
224 * We're using fixed IRQ delivery, can only return one phys APIC ID. 259 * We're using fixed IRQ delivery, can only return one phys APIC ID.
225 * May as well be the first. 260 * May as well be the first.
226 */ 261 */
227 cpu = first_cpu(cpumask); 262 cpu = cpumask_first(cpumask);
228 if ((unsigned)cpu < nr_cpu_ids) 263 if ((unsigned)cpu < nr_cpu_ids)
229 return per_cpu(x86_cpu_to_apicid, cpu); 264 return per_cpu(x86_cpu_to_apicid, cpu);
230 else 265 else
231 return BAD_APICID; 266 return BAD_APICID;
232} 267}
233 268
269static unsigned int
270physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
271 const struct cpumask *andmask)
272{
273 int cpu;
274
275 /*
276 * We're using fixed IRQ delivery, can only return one phys APIC ID.
277 * May as well be the first.
278 */
279 cpu = cpumask_any_and(cpumask, andmask);
280 if (cpu < nr_cpu_ids)
281 return per_cpu(x86_cpu_to_apicid, cpu);
282 return BAD_APICID;
283}
284
234struct genapic apic_physflat = { 285struct genapic apic_physflat = {
235 .name = "physical flat", 286 .name = "physical flat",
236 .acpi_madt_oem_check = physflat_acpi_madt_oem_check, 287 .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
@@ -243,8 +294,10 @@ struct genapic apic_physflat = {
243 .send_IPI_all = physflat_send_IPI_all, 294 .send_IPI_all = physflat_send_IPI_all,
244 .send_IPI_allbutself = physflat_send_IPI_allbutself, 295 .send_IPI_allbutself = physflat_send_IPI_allbutself,
245 .send_IPI_mask = physflat_send_IPI_mask, 296 .send_IPI_mask = physflat_send_IPI_mask,
297 .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
246 .send_IPI_self = apic_send_IPI_self, 298 .send_IPI_self = apic_send_IPI_self,
247 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, 299 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
300 .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
248 .phys_pkg_id = phys_pkg_id, 301 .phys_pkg_id = phys_pkg_id,
249 .get_apic_id = get_apic_id, 302 .get_apic_id = get_apic_id,
250 .set_apic_id = set_apic_id, 303 .set_apic_id = set_apic_id,
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c
index f6a2c8eb48a6..4716a0c9f936 100644
--- a/arch/x86/kernel/genx2apic_cluster.c
+++ b/arch/x86/kernel/genx2apic_cluster.c
@@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
22 22
23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 23/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
24 24
25static cpumask_t x2apic_target_cpus(void) 25static const struct cpumask *x2apic_target_cpus(void)
26{ 26{
27 return cpumask_of_cpu(0); 27 return cpumask_of(0);
28} 28}
29 29
30/* 30/*
31 * for now each logical cpu is in its own vector allocation domain. 31 * for now each logical cpu is in its own vector allocation domain.
32 */ 32 */
33static cpumask_t x2apic_vector_allocation_domain(int cpu) 33static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
34{ 34{
35 cpumask_t domain = CPU_MASK_NONE; 35 cpumask_clear(retmask);
36 cpu_set(cpu, domain); 36 cpumask_set_cpu(cpu, retmask);
37 return domain;
38} 37}
39 38
40static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 39static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
@@ -56,32 +55,53 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
56 * at once. We have 16 cpu's in a cluster. This will minimize IPI register 55 * at once. We have 16 cpu's in a cluster. This will minimize IPI register
57 * writes. 56 * writes.
58 */ 57 */
59static void x2apic_send_IPI_mask(cpumask_t mask, int vector) 58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
60{ 59{
61 unsigned long flags; 60 unsigned long flags;
62 unsigned long query_cpu; 61 unsigned long query_cpu;
63 62
64 local_irq_save(flags); 63 local_irq_save(flags);
65 for_each_cpu_mask(query_cpu, mask) { 64 for_each_cpu(query_cpu, mask)
66 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), 65 __x2apic_send_IPI_dest(
67 vector, APIC_DEST_LOGICAL); 66 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
68 } 67 vector, APIC_DEST_LOGICAL);
69 local_irq_restore(flags); 68 local_irq_restore(flags);
70} 69}
71 70
72static void x2apic_send_IPI_allbutself(int vector) 71static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
72 int vector)
73{ 73{
74 cpumask_t mask = cpu_online_map; 74 unsigned long flags;
75 unsigned long query_cpu;
76 unsigned long this_cpu = smp_processor_id();
75 77
76 cpu_clear(smp_processor_id(), mask); 78 local_irq_save(flags);
79 for_each_cpu(query_cpu, mask)
80 if (query_cpu != this_cpu)
81 __x2apic_send_IPI_dest(
82 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
83 vector, APIC_DEST_LOGICAL);
84 local_irq_restore(flags);
85}
86
87static void x2apic_send_IPI_allbutself(int vector)
88{
89 unsigned long flags;
90 unsigned long query_cpu;
91 unsigned long this_cpu = smp_processor_id();
77 92
78 if (!cpus_empty(mask)) 93 local_irq_save(flags);
79 x2apic_send_IPI_mask(mask, vector); 94 for_each_online_cpu(query_cpu)
95 if (query_cpu != this_cpu)
96 __x2apic_send_IPI_dest(
97 per_cpu(x86_cpu_to_logical_apicid, query_cpu),
98 vector, APIC_DEST_LOGICAL);
99 local_irq_restore(flags);
80} 100}
81 101
82static void x2apic_send_IPI_all(int vector) 102static void x2apic_send_IPI_all(int vector)
83{ 103{
84 x2apic_send_IPI_mask(cpu_online_map, vector); 104 x2apic_send_IPI_mask(cpu_online_mask, vector);
85} 105}
86 106
87static int x2apic_apic_id_registered(void) 107static int x2apic_apic_id_registered(void)
@@ -89,7 +109,7 @@ static int x2apic_apic_id_registered(void)
89 return 1; 109 return 1;
90} 110}
91 111
92static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) 112static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
93{ 113{
94 int cpu; 114 int cpu;
95 115
@@ -97,13 +117,28 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
97 * We're using fixed IRQ delivery, can only return one phys APIC ID. 117 * We're using fixed IRQ delivery, can only return one phys APIC ID.
98 * May as well be the first. 118 * May as well be the first.
99 */ 119 */
100 cpu = first_cpu(cpumask); 120 cpu = cpumask_first(cpumask);
101 if ((unsigned)cpu < NR_CPUS) 121 if ((unsigned)cpu < nr_cpu_ids)
102 return per_cpu(x86_cpu_to_logical_apicid, cpu); 122 return per_cpu(x86_cpu_to_logical_apicid, cpu);
103 else 123 else
104 return BAD_APICID; 124 return BAD_APICID;
105} 125}
106 126
127static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
128 const struct cpumask *andmask)
129{
130 int cpu;
131
132 /*
133 * We're using fixed IRQ delivery, can only return one phys APIC ID.
134 * May as well be the first.
135 */
136 cpu = cpumask_any_and(cpumask, andmask);
137 if (cpu < nr_cpu_ids)
138 return per_cpu(x86_cpu_to_apicid, cpu);
139 return BAD_APICID;
140}
141
107static unsigned int get_apic_id(unsigned long x) 142static unsigned int get_apic_id(unsigned long x)
108{ 143{
109 unsigned int id; 144 unsigned int id;
@@ -150,8 +185,10 @@ struct genapic apic_x2apic_cluster = {
150 .send_IPI_all = x2apic_send_IPI_all, 185 .send_IPI_all = x2apic_send_IPI_all,
151 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 186 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
152 .send_IPI_mask = x2apic_send_IPI_mask, 187 .send_IPI_mask = x2apic_send_IPI_mask,
188 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
153 .send_IPI_self = x2apic_send_IPI_self, 189 .send_IPI_self = x2apic_send_IPI_self,
154 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 190 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
191 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
155 .phys_pkg_id = phys_pkg_id, 192 .phys_pkg_id = phys_pkg_id,
156 .get_apic_id = get_apic_id, 193 .get_apic_id = get_apic_id,
157 .set_apic_id = set_apic_id, 194 .set_apic_id = set_apic_id,
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c
index d042211768b7..b255507884f2 100644
--- a/arch/x86/kernel/genx2apic_phys.c
+++ b/arch/x86/kernel/genx2apic_phys.c
@@ -29,16 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
29 29
30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 30/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
31 31
32static cpumask_t x2apic_target_cpus(void) 32static const struct cpumask *x2apic_target_cpus(void)
33{ 33{
34 return cpumask_of_cpu(0); 34 return cpumask_of(0);
35} 35}
36 36
37static cpumask_t x2apic_vector_allocation_domain(int cpu) 37static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
38{ 38{
39 cpumask_t domain = CPU_MASK_NONE; 39 cpumask_clear(retmask);
40 cpu_set(cpu, domain); 40 cpumask_set_cpu(cpu, retmask);
41 return domain;
42} 41}
43 42
44static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, 43static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
@@ -54,32 +53,54 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
54 x2apic_icr_write(cfg, apicid); 53 x2apic_icr_write(cfg, apicid);
55} 54}
56 55
57static void x2apic_send_IPI_mask(cpumask_t mask, int vector) 56static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
58{ 57{
59 unsigned long flags; 58 unsigned long flags;
60 unsigned long query_cpu; 59 unsigned long query_cpu;
61 60
62 local_irq_save(flags); 61 local_irq_save(flags);
63 for_each_cpu_mask(query_cpu, mask) { 62 for_each_cpu(query_cpu, mask) {
64 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), 63 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
65 vector, APIC_DEST_PHYSICAL); 64 vector, APIC_DEST_PHYSICAL);
66 } 65 }
67 local_irq_restore(flags); 66 local_irq_restore(flags);
68} 67}
69 68
70static void x2apic_send_IPI_allbutself(int vector) 69static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
70 int vector)
71{ 71{
72 cpumask_t mask = cpu_online_map; 72 unsigned long flags;
73 unsigned long query_cpu;
74 unsigned long this_cpu = smp_processor_id();
75
76 local_irq_save(flags);
77 for_each_cpu(query_cpu, mask) {
78 if (query_cpu != this_cpu)
79 __x2apic_send_IPI_dest(
80 per_cpu(x86_cpu_to_apicid, query_cpu),
81 vector, APIC_DEST_PHYSICAL);
82 }
83 local_irq_restore(flags);
84}
73 85
74 cpu_clear(smp_processor_id(), mask); 86static void x2apic_send_IPI_allbutself(int vector)
87{
88 unsigned long flags;
89 unsigned long query_cpu;
90 unsigned long this_cpu = smp_processor_id();
75 91
76 if (!cpus_empty(mask)) 92 local_irq_save(flags);
77 x2apic_send_IPI_mask(mask, vector); 93 for_each_online_cpu(query_cpu)
94 if (query_cpu != this_cpu)
95 __x2apic_send_IPI_dest(
96 per_cpu(x86_cpu_to_apicid, query_cpu),
97 vector, APIC_DEST_PHYSICAL);
98 local_irq_restore(flags);
78} 99}
79 100
80static void x2apic_send_IPI_all(int vector) 101static void x2apic_send_IPI_all(int vector)
81{ 102{
82 x2apic_send_IPI_mask(cpu_online_map, vector); 103 x2apic_send_IPI_mask(cpu_online_mask, vector);
83} 104}
84 105
85static int x2apic_apic_id_registered(void) 106static int x2apic_apic_id_registered(void)
@@ -87,7 +108,7 @@ static int x2apic_apic_id_registered(void)
87 return 1; 108 return 1;
88} 109}
89 110
90static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) 111static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
91{ 112{
92 int cpu; 113 int cpu;
93 114
@@ -95,13 +116,28 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
95 * We're using fixed IRQ delivery, can only return one phys APIC ID. 116 * We're using fixed IRQ delivery, can only return one phys APIC ID.
96 * May as well be the first. 117 * May as well be the first.
97 */ 118 */
98 cpu = first_cpu(cpumask); 119 cpu = cpumask_first(cpumask);
99 if ((unsigned)cpu < NR_CPUS) 120 if ((unsigned)cpu < nr_cpu_ids)
100 return per_cpu(x86_cpu_to_apicid, cpu); 121 return per_cpu(x86_cpu_to_apicid, cpu);
101 else 122 else
102 return BAD_APICID; 123 return BAD_APICID;
103} 124}
104 125
126static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
127 const struct cpumask *andmask)
128{
129 int cpu;
130
131 /*
132 * We're using fixed IRQ delivery, can only return one phys APIC ID.
133 * May as well be the first.
134 */
135 cpu = cpumask_any_and(cpumask, andmask);
136 if (cpu < nr_cpu_ids)
137 return per_cpu(x86_cpu_to_apicid, cpu);
138 return BAD_APICID;
139}
140
105static unsigned int get_apic_id(unsigned long x) 141static unsigned int get_apic_id(unsigned long x)
106{ 142{
107 unsigned int id; 143 unsigned int id;
@@ -145,8 +181,10 @@ struct genapic apic_x2apic_phys = {
145 .send_IPI_all = x2apic_send_IPI_all, 181 .send_IPI_all = x2apic_send_IPI_all,
146 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 182 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
147 .send_IPI_mask = x2apic_send_IPI_mask, 183 .send_IPI_mask = x2apic_send_IPI_mask,
184 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
148 .send_IPI_self = x2apic_send_IPI_self, 185 .send_IPI_self = x2apic_send_IPI_self,
149 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 186 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
187 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
150 .phys_pkg_id = phys_pkg_id, 188 .phys_pkg_id = phys_pkg_id,
151 .get_apic_id = get_apic_id, 189 .get_apic_id = get_apic_id,
152 .set_apic_id = set_apic_id, 190 .set_apic_id = set_apic_id,
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 2c7dbdb98278..3984682cd849 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -75,16 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
75 75
76/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 76/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
77 77
78static cpumask_t uv_target_cpus(void) 78static const struct cpumask *uv_target_cpus(void)
79{ 79{
80 return cpumask_of_cpu(0); 80 return cpumask_of(0);
81} 81}
82 82
83static cpumask_t uv_vector_allocation_domain(int cpu) 83static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
84{ 84{
85 cpumask_t domain = CPU_MASK_NONE; 85 cpumask_clear(retmask);
86 cpu_set(cpu, domain); 86 cpumask_set_cpu(cpu, retmask);
87 return domain;
88} 87}
89 88
90int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) 89int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
@@ -123,28 +122,37 @@ static void uv_send_IPI_one(int cpu, int vector)
123 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 122 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
124} 123}
125 124
126static void uv_send_IPI_mask(cpumask_t mask, int vector) 125static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
127{ 126{
128 unsigned int cpu; 127 unsigned int cpu;
129 128
130 for_each_possible_cpu(cpu) 129 for_each_cpu(cpu, mask)
131 if (cpu_isset(cpu, mask)) 130 uv_send_IPI_one(cpu, vector);
131}
132
133static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
134{
135 unsigned int cpu;
136 unsigned int this_cpu = smp_processor_id();
137
138 for_each_cpu(cpu, mask)
139 if (cpu != this_cpu)
132 uv_send_IPI_one(cpu, vector); 140 uv_send_IPI_one(cpu, vector);
133} 141}
134 142
135static void uv_send_IPI_allbutself(int vector) 143static void uv_send_IPI_allbutself(int vector)
136{ 144{
137 cpumask_t mask = cpu_online_map; 145 unsigned int cpu;
138 146 unsigned int this_cpu = smp_processor_id();
139 cpu_clear(smp_processor_id(), mask);
140 147
141 if (!cpus_empty(mask)) 148 for_each_online_cpu(cpu)
142 uv_send_IPI_mask(mask, vector); 149 if (cpu != this_cpu)
150 uv_send_IPI_one(cpu, vector);
143} 151}
144 152
145static void uv_send_IPI_all(int vector) 153static void uv_send_IPI_all(int vector)
146{ 154{
147 uv_send_IPI_mask(cpu_online_map, vector); 155 uv_send_IPI_mask(cpu_online_mask, vector);
148} 156}
149 157
150static int uv_apic_id_registered(void) 158static int uv_apic_id_registered(void)
@@ -156,7 +164,7 @@ static void uv_init_apic_ldr(void)
156{ 164{
157} 165}
158 166
159static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) 167static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
160{ 168{
161 int cpu; 169 int cpu;
162 170
@@ -164,13 +172,28 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
164 * We're using fixed IRQ delivery, can only return one phys APIC ID. 172 * We're using fixed IRQ delivery, can only return one phys APIC ID.
165 * May as well be the first. 173 * May as well be the first.
166 */ 174 */
167 cpu = first_cpu(cpumask); 175 cpu = cpumask_first(cpumask);
168 if ((unsigned)cpu < nr_cpu_ids) 176 if ((unsigned)cpu < nr_cpu_ids)
169 return per_cpu(x86_cpu_to_apicid, cpu); 177 return per_cpu(x86_cpu_to_apicid, cpu);
170 else 178 else
171 return BAD_APICID; 179 return BAD_APICID;
172} 180}
173 181
182static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
183 const struct cpumask *andmask)
184{
185 int cpu;
186
187 /*
188 * We're using fixed IRQ delivery, can only return one phys APIC ID.
189 * May as well be the first.
190 */
191 cpu = cpumask_any_and(cpumask, andmask);
192 if (cpu < nr_cpu_ids)
193 return per_cpu(x86_cpu_to_apicid, cpu);
194 return BAD_APICID;
195}
196
174static unsigned int get_apic_id(unsigned long x) 197static unsigned int get_apic_id(unsigned long x)
175{ 198{
176 unsigned int id; 199 unsigned int id;
@@ -218,8 +241,10 @@ struct genapic apic_x2apic_uv_x = {
218 .send_IPI_all = uv_send_IPI_all, 241 .send_IPI_all = uv_send_IPI_all,
219 .send_IPI_allbutself = uv_send_IPI_allbutself, 242 .send_IPI_allbutself = uv_send_IPI_allbutself,
220 .send_IPI_mask = uv_send_IPI_mask, 243 .send_IPI_mask = uv_send_IPI_mask,
244 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
221 .send_IPI_self = uv_send_IPI_self, 245 .send_IPI_self = uv_send_IPI_self,
222 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, 246 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
247 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
223 .phys_pkg_id = phys_pkg_id, 248 .phys_pkg_id = phys_pkg_id,
224 .get_apic_id = get_apic_id, 249 .get_apic_id = get_apic_id,
225 .set_apic_id = set_apic_id, 250 .set_apic_id = set_apic_id,
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 3d7d0d55253f..60bb8b19f4cd 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -136,8 +136,8 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
136 136
137struct irq_cfg { 137struct irq_cfg {
138 struct irq_pin_list *irq_2_pin; 138 struct irq_pin_list *irq_2_pin;
139 cpumask_t domain; 139 cpumask_var_t domain;
140 cpumask_t old_domain; 140 cpumask_var_t old_domain;
141 unsigned move_cleanup_count; 141 unsigned move_cleanup_count;
142 u8 vector; 142 u8 vector;
143 u8 move_in_progress : 1; 143 u8 move_in_progress : 1;
@@ -149,22 +149,22 @@ static struct irq_cfg irq_cfgx[] = {
149#else 149#else
150static struct irq_cfg irq_cfgx[NR_IRQS] = { 150static struct irq_cfg irq_cfgx[NR_IRQS] = {
151#endif 151#endif
152 [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, 152 [0] = { .vector = IRQ0_VECTOR, },
153 [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, 153 [1] = { .vector = IRQ1_VECTOR, },
154 [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, 154 [2] = { .vector = IRQ2_VECTOR, },
155 [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, 155 [3] = { .vector = IRQ3_VECTOR, },
156 [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, 156 [4] = { .vector = IRQ4_VECTOR, },
157 [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, 157 [5] = { .vector = IRQ5_VECTOR, },
158 [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, 158 [6] = { .vector = IRQ6_VECTOR, },
159 [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, 159 [7] = { .vector = IRQ7_VECTOR, },
160 [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, 160 [8] = { .vector = IRQ8_VECTOR, },
161 [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, 161 [9] = { .vector = IRQ9_VECTOR, },
162 [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, 162 [10] = { .vector = IRQ10_VECTOR, },
163 [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, 163 [11] = { .vector = IRQ11_VECTOR, },
164 [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, 164 [12] = { .vector = IRQ12_VECTOR, },
165 [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, 165 [13] = { .vector = IRQ13_VECTOR, },
166 [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, 166 [14] = { .vector = IRQ14_VECTOR, },
167 [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, 167 [15] = { .vector = IRQ15_VECTOR, },
168}; 168};
169 169
170void __init arch_early_irq_init(void) 170void __init arch_early_irq_init(void)
@@ -180,6 +180,10 @@ void __init arch_early_irq_init(void)
180 for (i = 0; i < count; i++) { 180 for (i = 0; i < count; i++) {
181 desc = irq_to_desc(i); 181 desc = irq_to_desc(i);
182 desc->chip_data = &cfg[i]; 182 desc->chip_data = &cfg[i];
183 alloc_bootmem_cpumask_var(&cfg[i].domain);
184 alloc_bootmem_cpumask_var(&cfg[i].old_domain);
185 if (i < NR_IRQS_LEGACY)
186 cpumask_setall(cfg[i].domain);
183 } 187 }
184} 188}
185 189
@@ -204,6 +208,20 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu)
204 node = cpu_to_node(cpu); 208 node = cpu_to_node(cpu);
205 209
206 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 210 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
211 if (cfg) {
212 /* FIXME: needs alloc_cpumask_var_node() */
213 if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) {
214 kfree(cfg);
215 cfg = NULL;
216 } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) {
217 free_cpumask_var(cfg->domain);
218 kfree(cfg);
219 cfg = NULL;
220 } else {
221 cpumask_clear(cfg->domain);
222 cpumask_clear(cfg->old_domain);
223 }
224 }
207 printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); 225 printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
208 226
209 return cfg; 227 return cfg;
@@ -231,7 +249,8 @@ static struct irq_cfg *irq_cfg(unsigned int irq)
231 249
232#endif 250#endif
233 251
234static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) 252static inline void
253set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
235{ 254{
236} 255}
237 256
@@ -361,6 +380,26 @@ static void ioapic_mask_entry(int apic, int pin)
361} 380}
362 381
363#ifdef CONFIG_SMP 382#ifdef CONFIG_SMP
383static void send_cleanup_vector(struct irq_cfg *cfg)
384{
385 cpumask_var_t cleanup_mask;
386
387 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
388 unsigned int i;
389 cfg->move_cleanup_count = 0;
390 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
391 cfg->move_cleanup_count++;
392 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
393 send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
394 } else {
395 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
396 cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
397 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
398 free_cpumask_var(cleanup_mask);
399 }
400 cfg->move_in_progress = 0;
401}
402
364static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 403static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
365{ 404{
366 int apic, pin; 405 int apic, pin;
@@ -396,42 +435,55 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
396 } 435 }
397} 436}
398 437
399static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); 438static int
439assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
400 440
401static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, 441/*
402 const struct cpumask *mask) 442 * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid
443 * of that, or returns BAD_APICID and leaves desc->affinity untouched.
444 */
445static unsigned int
446set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
403{ 447{
404 struct irq_cfg *cfg; 448 struct irq_cfg *cfg;
405 unsigned long flags;
406 unsigned int dest;
407 cpumask_t tmp;
408 unsigned int irq; 449 unsigned int irq;
409 450
410 if (!cpumask_intersects(mask, cpu_online_mask)) 451 if (!cpumask_intersects(mask, cpu_online_mask))
411 return; 452 return BAD_APICID;
412 453
413 irq = desc->irq; 454 irq = desc->irq;
414 cfg = desc->chip_data; 455 cfg = desc->chip_data;
415 if (assign_irq_vector(irq, cfg, *mask)) 456 if (assign_irq_vector(irq, cfg, mask))
416 return; 457 return BAD_APICID;
417 458
418 set_extra_move_desc(desc, *mask); 459 cpumask_and(&desc->affinity, cfg->domain, mask);
460 set_extra_move_desc(desc, mask);
461 return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
462}
419 463
420 cpumask_and(&tmp, &cfg->domain, mask); 464static void
421 dest = cpu_mask_to_apicid(tmp); 465set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
422 /* 466{
423 * Only the high 8 bits are valid. 467 struct irq_cfg *cfg;
424 */ 468 unsigned long flags;
425 dest = SET_APIC_LOGICAL_ID(dest); 469 unsigned int dest;
470 unsigned int irq;
471
472 irq = desc->irq;
473 cfg = desc->chip_data;
426 474
427 spin_lock_irqsave(&ioapic_lock, flags); 475 spin_lock_irqsave(&ioapic_lock, flags);
428 __target_IO_APIC_irq(irq, dest, cfg); 476 dest = set_desc_affinity(desc, mask);
429 cpumask_copy(&desc->affinity, mask); 477 if (dest != BAD_APICID) {
478 /* Only the high 8 bits are valid. */
479 dest = SET_APIC_LOGICAL_ID(dest);
480 __target_IO_APIC_irq(irq, dest, cfg);
481 }
430 spin_unlock_irqrestore(&ioapic_lock, flags); 482 spin_unlock_irqrestore(&ioapic_lock, flags);
431} 483}
432 484
433static void set_ioapic_affinity_irq(unsigned int irq, 485static void
434 const struct cpumask *mask) 486set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
435{ 487{
436 struct irq_desc *desc; 488 struct irq_desc *desc;
437 489
@@ -1099,7 +1151,8 @@ void unlock_vector_lock(void)
1099 spin_unlock(&vector_lock); 1151 spin_unlock(&vector_lock);
1100} 1152}
1101 1153
1102static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) 1154static int
1155__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1103{ 1156{
1104 /* 1157 /*
1105 * NOTE! The local APIC isn't very good at handling 1158 * NOTE! The local APIC isn't very good at handling
@@ -1114,36 +1167,39 @@ static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
1114 */ 1167 */
1115 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1168 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1116 unsigned int old_vector; 1169 unsigned int old_vector;
1117 int cpu; 1170 int cpu, err;
1171 cpumask_var_t tmp_mask;
1118 1172
1119 if ((cfg->move_in_progress) || cfg->move_cleanup_count) 1173 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1120 return -EBUSY; 1174 return -EBUSY;
1121 1175
1122 /* Only try and allocate irqs on cpus that are present */ 1176 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1123 cpus_and(mask, mask, cpu_online_map); 1177 return -ENOMEM;
1124 1178
1125 old_vector = cfg->vector; 1179 old_vector = cfg->vector;
1126 if (old_vector) { 1180 if (old_vector) {
1127 cpumask_t tmp; 1181 cpumask_and(tmp_mask, mask, cpu_online_mask);
1128 cpus_and(tmp, cfg->domain, mask); 1182 cpumask_and(tmp_mask, cfg->domain, tmp_mask);
1129 if (!cpus_empty(tmp)) 1183 if (!cpumask_empty(tmp_mask)) {
1184 free_cpumask_var(tmp_mask);
1130 return 0; 1185 return 0;
1186 }
1131 } 1187 }
1132 1188
1133 for_each_cpu_mask_nr(cpu, mask) { 1189 /* Only try and allocate irqs on cpus that are present */
1134 cpumask_t domain, new_mask; 1190 err = -ENOSPC;
1191 for_each_cpu_and(cpu, mask, cpu_online_mask) {
1135 int new_cpu; 1192 int new_cpu;
1136 int vector, offset; 1193 int vector, offset;
1137 1194
1138 domain = vector_allocation_domain(cpu); 1195 vector_allocation_domain(cpu, tmp_mask);
1139 cpus_and(new_mask, domain, cpu_online_map);
1140 1196
1141 vector = current_vector; 1197 vector = current_vector;
1142 offset = current_offset; 1198 offset = current_offset;
1143next: 1199next:
1144 vector += 8; 1200 vector += 8;
1145 if (vector >= first_system_vector) { 1201 if (vector >= first_system_vector) {
1146 /* If we run out of vectors on large boxen, must share them. */ 1202 /* If out of vectors on large boxen, must share them. */
1147 offset = (offset + 1) % 8; 1203 offset = (offset + 1) % 8;
1148 vector = FIRST_DEVICE_VECTOR + offset; 1204 vector = FIRST_DEVICE_VECTOR + offset;
1149 } 1205 }
@@ -1156,7 +1212,7 @@ next:
1156 if (vector == SYSCALL_VECTOR) 1212 if (vector == SYSCALL_VECTOR)
1157 goto next; 1213 goto next;
1158#endif 1214#endif
1159 for_each_cpu_mask_nr(new_cpu, new_mask) 1215 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1160 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1216 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1161 goto next; 1217 goto next;
1162 /* Found one! */ 1218 /* Found one! */
@@ -1164,18 +1220,21 @@ next:
1164 current_offset = offset; 1220 current_offset = offset;
1165 if (old_vector) { 1221 if (old_vector) {
1166 cfg->move_in_progress = 1; 1222 cfg->move_in_progress = 1;
1167 cfg->old_domain = cfg->domain; 1223 cpumask_copy(cfg->old_domain, cfg->domain);
1168 } 1224 }
1169 for_each_cpu_mask_nr(new_cpu, new_mask) 1225 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1170 per_cpu(vector_irq, new_cpu)[vector] = irq; 1226 per_cpu(vector_irq, new_cpu)[vector] = irq;
1171 cfg->vector = vector; 1227 cfg->vector = vector;
1172 cfg->domain = domain; 1228 cpumask_copy(cfg->domain, tmp_mask);
1173 return 0; 1229 err = 0;
1230 break;
1174 } 1231 }
1175 return -ENOSPC; 1232 free_cpumask_var(tmp_mask);
1233 return err;
1176} 1234}
1177 1235
1178static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) 1236static int
1237assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1179{ 1238{
1180 int err; 1239 int err;
1181 unsigned long flags; 1240 unsigned long flags;
@@ -1188,23 +1247,20 @@ static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
1188 1247
1189static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1248static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1190{ 1249{
1191 cpumask_t mask;
1192 int cpu, vector; 1250 int cpu, vector;
1193 1251
1194 BUG_ON(!cfg->vector); 1252 BUG_ON(!cfg->vector);
1195 1253
1196 vector = cfg->vector; 1254 vector = cfg->vector;
1197 cpus_and(mask, cfg->domain, cpu_online_map); 1255 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1198 for_each_cpu_mask_nr(cpu, mask)
1199 per_cpu(vector_irq, cpu)[vector] = -1; 1256 per_cpu(vector_irq, cpu)[vector] = -1;
1200 1257
1201 cfg->vector = 0; 1258 cfg->vector = 0;
1202 cpus_clear(cfg->domain); 1259 cpumask_clear(cfg->domain);
1203 1260
1204 if (likely(!cfg->move_in_progress)) 1261 if (likely(!cfg->move_in_progress))
1205 return; 1262 return;
1206 cpus_and(mask, cfg->old_domain, cpu_online_map); 1263 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1207 for_each_cpu_mask_nr(cpu, mask) {
1208 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1264 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1209 vector++) { 1265 vector++) {
1210 if (per_cpu(vector_irq, cpu)[vector] != irq) 1266 if (per_cpu(vector_irq, cpu)[vector] != irq)
@@ -1229,7 +1285,7 @@ void __setup_vector_irq(int cpu)
1229 if (!desc) 1285 if (!desc)
1230 continue; 1286 continue;
1231 cfg = desc->chip_data; 1287 cfg = desc->chip_data;
1232 if (!cpu_isset(cpu, cfg->domain)) 1288 if (!cpumask_test_cpu(cpu, cfg->domain))
1233 continue; 1289 continue;
1234 vector = cfg->vector; 1290 vector = cfg->vector;
1235 per_cpu(vector_irq, cpu)[vector] = irq; 1291 per_cpu(vector_irq, cpu)[vector] = irq;
@@ -1241,7 +1297,7 @@ void __setup_vector_irq(int cpu)
1241 continue; 1297 continue;
1242 1298
1243 cfg = irq_cfg(irq); 1299 cfg = irq_cfg(irq);
1244 if (!cpu_isset(cpu, cfg->domain)) 1300 if (!cpumask_test_cpu(cpu, cfg->domain))
1245 per_cpu(vector_irq, cpu)[vector] = -1; 1301 per_cpu(vector_irq, cpu)[vector] = -1;
1246 } 1302 }
1247} 1303}
@@ -1377,18 +1433,17 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1377{ 1433{
1378 struct irq_cfg *cfg; 1434 struct irq_cfg *cfg;
1379 struct IO_APIC_route_entry entry; 1435 struct IO_APIC_route_entry entry;
1380 cpumask_t mask; 1436 unsigned int dest;
1381 1437
1382 if (!IO_APIC_IRQ(irq)) 1438 if (!IO_APIC_IRQ(irq))
1383 return; 1439 return;
1384 1440
1385 cfg = desc->chip_data; 1441 cfg = desc->chip_data;
1386 1442
1387 mask = TARGET_CPUS; 1443 if (assign_irq_vector(irq, cfg, TARGET_CPUS))
1388 if (assign_irq_vector(irq, cfg, mask))
1389 return; 1444 return;
1390 1445
1391 cpus_and(mask, cfg->domain, mask); 1446 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
1392 1447
1393 apic_printk(APIC_VERBOSE,KERN_DEBUG 1448 apic_printk(APIC_VERBOSE,KERN_DEBUG
1394 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1449 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
@@ -1398,8 +1453,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1398 1453
1399 1454
1400 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, 1455 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1401 cpu_mask_to_apicid(mask), trigger, polarity, 1456 dest, trigger, polarity, cfg->vector)) {
1402 cfg->vector)) {
1403 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1457 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1404 mp_ioapics[apic].mp_apicid, pin); 1458 mp_ioapics[apic].mp_apicid, pin);
1405 __clear_irq_vector(irq, cfg); 1459 __clear_irq_vector(irq, cfg);
@@ -2121,7 +2175,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
2121 unsigned long flags; 2175 unsigned long flags;
2122 2176
2123 spin_lock_irqsave(&vector_lock, flags); 2177 spin_lock_irqsave(&vector_lock, flags);
2124 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); 2178 send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2125 spin_unlock_irqrestore(&vector_lock, flags); 2179 spin_unlock_irqrestore(&vector_lock, flags);
2126 2180
2127 return 1; 2181 return 1;
@@ -2170,18 +2224,17 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2170 * as simple as edge triggered migration and we can do the irq migration 2224 * as simple as edge triggered migration and we can do the irq migration
2171 * with a simple atomic update to IO-APIC RTE. 2225 * with a simple atomic update to IO-APIC RTE.
2172 */ 2226 */
2173static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) 2227static void
2228migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2174{ 2229{
2175 struct irq_cfg *cfg; 2230 struct irq_cfg *cfg;
2176 cpumask_t tmp, cleanup_mask;
2177 struct irte irte; 2231 struct irte irte;
2178 int modify_ioapic_rte; 2232 int modify_ioapic_rte;
2179 unsigned int dest; 2233 unsigned int dest;
2180 unsigned long flags; 2234 unsigned long flags;
2181 unsigned int irq; 2235 unsigned int irq;
2182 2236
2183 cpus_and(tmp, mask, cpu_online_map); 2237 if (!cpumask_intersects(mask, cpu_online_mask))
2184 if (cpus_empty(tmp))
2185 return; 2238 return;
2186 2239
2187 irq = desc->irq; 2240 irq = desc->irq;
@@ -2194,8 +2247,7 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
2194 2247
2195 set_extra_move_desc(desc, mask); 2248 set_extra_move_desc(desc, mask);
2196 2249
2197 cpus_and(tmp, cfg->domain, mask); 2250 dest = cpu_mask_to_apicid_and(cfg->domain, mask);
2198 dest = cpu_mask_to_apicid(tmp);
2199 2251
2200 modify_ioapic_rte = desc->status & IRQ_LEVEL; 2252 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2201 if (modify_ioapic_rte) { 2253 if (modify_ioapic_rte) {
@@ -2212,14 +2264,10 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
2212 */ 2264 */
2213 modify_irte(irq, &irte); 2265 modify_irte(irq, &irte);
2214 2266
2215 if (cfg->move_in_progress) { 2267 if (cfg->move_in_progress)
2216 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 2268 send_cleanup_vector(cfg);
2217 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2218 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2219 cfg->move_in_progress = 0;
2220 }
2221 2269
2222 desc->affinity = mask; 2270 cpumask_copy(&desc->affinity, mask);
2223} 2271}
2224 2272
2225static int migrate_irq_remapped_level_desc(struct irq_desc *desc) 2273static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2241,11 +2289,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2241 } 2289 }
2242 2290
2243 /* everthing is clear. we have right of way */ 2291 /* everthing is clear. we have right of way */
2244 migrate_ioapic_irq_desc(desc, desc->pending_mask); 2292 migrate_ioapic_irq_desc(desc, &desc->pending_mask);
2245 2293
2246 ret = 0; 2294 ret = 0;
2247 desc->status &= ~IRQ_MOVE_PENDING; 2295 desc->status &= ~IRQ_MOVE_PENDING;
2248 cpus_clear(desc->pending_mask); 2296 cpumask_clear(&desc->pending_mask);
2249 2297
2250unmask: 2298unmask:
2251 unmask_IO_APIC_irq_desc(desc); 2299 unmask_IO_APIC_irq_desc(desc);
@@ -2292,7 +2340,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2292 return; 2340 return;
2293 } 2341 }
2294 2342
2295 migrate_ioapic_irq_desc(desc, *mask); 2343 migrate_ioapic_irq_desc(desc, mask);
2296} 2344}
2297static void set_ir_ioapic_affinity_irq(unsigned int irq, 2345static void set_ir_ioapic_affinity_irq(unsigned int irq,
2298 const struct cpumask *mask) 2346 const struct cpumask *mask)
@@ -2331,7 +2379,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2331 if (!cfg->move_cleanup_count) 2379 if (!cfg->move_cleanup_count)
2332 goto unlock; 2380 goto unlock;
2333 2381
2334 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) 2382 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2335 goto unlock; 2383 goto unlock;
2336 2384
2337 __get_cpu_var(vector_irq)[vector] = -1; 2385 __get_cpu_var(vector_irq)[vector] = -1;
@@ -2354,14 +2402,8 @@ static void irq_complete_move(struct irq_desc **descp)
2354 2402
2355 vector = ~get_irq_regs()->orig_ax; 2403 vector = ~get_irq_regs()->orig_ax;
2356 me = smp_processor_id(); 2404 me = smp_processor_id();
2357 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) { 2405 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2358 cpumask_t cleanup_mask; 2406 send_cleanup_vector(cfg);
2359
2360 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2361 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2362 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2363 cfg->move_in_progress = 0;
2364 }
2365} 2407}
2366#else 2408#else
2367static inline void irq_complete_move(struct irq_desc **descp) {} 2409static inline void irq_complete_move(struct irq_desc **descp) {}
@@ -3086,16 +3128,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3086 struct irq_cfg *cfg; 3128 struct irq_cfg *cfg;
3087 int err; 3129 int err;
3088 unsigned dest; 3130 unsigned dest;
3089 cpumask_t tmp;
3090 3131
3091 cfg = irq_cfg(irq); 3132 cfg = irq_cfg(irq);
3092 tmp = TARGET_CPUS; 3133 err = assign_irq_vector(irq, cfg, TARGET_CPUS);
3093 err = assign_irq_vector(irq, cfg, tmp);
3094 if (err) 3134 if (err)
3095 return err; 3135 return err;
3096 3136
3097 cpus_and(tmp, cfg->domain, tmp); 3137 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
3098 dest = cpu_mask_to_apicid(tmp);
3099 3138
3100#ifdef CONFIG_INTR_REMAP 3139#ifdef CONFIG_INTR_REMAP
3101 if (irq_remapped(irq)) { 3140 if (irq_remapped(irq)) {
@@ -3155,19 +3194,12 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3155 struct irq_cfg *cfg; 3194 struct irq_cfg *cfg;
3156 struct msi_msg msg; 3195 struct msi_msg msg;
3157 unsigned int dest; 3196 unsigned int dest;
3158 cpumask_t tmp;
3159 3197
3160 if (!cpumask_intersects(mask, cpu_online_mask)) 3198 dest = set_desc_affinity(desc, mask);
3199 if (dest == BAD_APICID)
3161 return; 3200 return;
3162 3201
3163 cfg = desc->chip_data; 3202 cfg = desc->chip_data;
3164 if (assign_irq_vector(irq, cfg, *mask))
3165 return;
3166
3167 set_extra_move_desc(desc, *mask);
3168
3169 cpumask_and(&tmp, &cfg->domain, mask);
3170 dest = cpu_mask_to_apicid(tmp);
3171 3203
3172 read_msi_msg_desc(desc, &msg); 3204 read_msi_msg_desc(desc, &msg);
3173 3205
@@ -3177,37 +3209,27 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3177 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3209 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3178 3210
3179 write_msi_msg_desc(desc, &msg); 3211 write_msi_msg_desc(desc, &msg);
3180 cpumask_copy(&desc->affinity, mask);
3181} 3212}
3182#ifdef CONFIG_INTR_REMAP 3213#ifdef CONFIG_INTR_REMAP
3183/* 3214/*
3184 * Migrate the MSI irq to another cpumask. This migration is 3215 * Migrate the MSI irq to another cpumask. This migration is
3185 * done in the process context using interrupt-remapping hardware. 3216 * done in the process context using interrupt-remapping hardware.
3186 */ 3217 */
3187static void ir_set_msi_irq_affinity(unsigned int irq, 3218static void
3188 const struct cpumask *mask) 3219ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3189{ 3220{
3190 struct irq_desc *desc = irq_to_desc(irq); 3221 struct irq_desc *desc = irq_to_desc(irq);
3191 struct irq_cfg *cfg; 3222 struct irq_cfg *cfg;
3192 unsigned int dest; 3223 unsigned int dest;
3193 cpumask_t tmp, cleanup_mask;
3194 struct irte irte; 3224 struct irte irte;
3195 3225
3196 if (!cpumask_intersects(mask, cpu_online_mask))
3197 return;
3198
3199 if (get_irte(irq, &irte)) 3226 if (get_irte(irq, &irte))
3200 return; 3227 return;
3201 3228
3202 cfg = desc->chip_data; 3229 dest = set_desc_affinity(desc, mask);
3203 if (assign_irq_vector(irq, cfg, *mask)) 3230 if (dest == BAD_APICID)
3204 return; 3231 return;
3205 3232
3206 set_extra_move_desc(desc, *mask);
3207
3208 cpumask_and(&tmp, &cfg->domain, mask);
3209 dest = cpu_mask_to_apicid(tmp);
3210
3211 irte.vector = cfg->vector; 3233 irte.vector = cfg->vector;
3212 irte.dest_id = IRTE_DEST(dest); 3234 irte.dest_id = IRTE_DEST(dest);
3213 3235
@@ -3221,14 +3243,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq,
3221 * at the new destination. So, time to cleanup the previous 3243 * at the new destination. So, time to cleanup the previous
3222 * vector allocation. 3244 * vector allocation.
3223 */ 3245 */
3224 if (cfg->move_in_progress) { 3246 if (cfg->move_in_progress)
3225 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 3247 send_cleanup_vector(cfg);
3226 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
3227 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
3228 cfg->move_in_progress = 0;
3229 }
3230
3231 cpumask_copy(&desc->affinity, mask);
3232} 3248}
3233 3249
3234#endif 3250#endif
@@ -3425,19 +3441,12 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3425 struct irq_cfg *cfg; 3441 struct irq_cfg *cfg;
3426 struct msi_msg msg; 3442 struct msi_msg msg;
3427 unsigned int dest; 3443 unsigned int dest;
3428 cpumask_t tmp;
3429 3444
3430 if (!cpumask_intersects(mask, cpu_online_mask)) 3445 dest = set_desc_affinity(desc, mask);
3446 if (dest == BAD_APICID)
3431 return; 3447 return;
3432 3448
3433 cfg = desc->chip_data; 3449 cfg = desc->chip_data;
3434 if (assign_irq_vector(irq, cfg, *mask))
3435 return;
3436
3437 set_extra_move_desc(desc, *mask);
3438
3439 cpumask_and(&tmp, &cfg->domain, mask);
3440 dest = cpu_mask_to_apicid(tmp);
3441 3450
3442 dmar_msi_read(irq, &msg); 3451 dmar_msi_read(irq, &msg);
3443 3452
@@ -3447,7 +3456,6 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3447 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3456 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3448 3457
3449 dmar_msi_write(irq, &msg); 3458 dmar_msi_write(irq, &msg);
3450 cpumask_copy(&desc->affinity, mask);
3451} 3459}
3452 3460
3453#endif /* CONFIG_SMP */ 3461#endif /* CONFIG_SMP */
@@ -3487,19 +3495,12 @@ static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3487 struct irq_cfg *cfg; 3495 struct irq_cfg *cfg;
3488 struct msi_msg msg; 3496 struct msi_msg msg;
3489 unsigned int dest; 3497 unsigned int dest;
3490 cpumask_t tmp;
3491 3498
3492 if (!cpumask_intersects(mask, cpu_online_mask)) 3499 dest = set_desc_affinity(desc, mask);
3500 if (dest == BAD_APICID)
3493 return; 3501 return;
3494 3502
3495 cfg = desc->chip_data; 3503 cfg = desc->chip_data;
3496 if (assign_irq_vector(irq, cfg, *mask))
3497 return;
3498
3499 set_extra_move_desc(desc, *mask);
3500
3501 cpumask_and(&tmp, &cfg->domain, mask);
3502 dest = cpu_mask_to_apicid(tmp);
3503 3504
3504 hpet_msi_read(irq, &msg); 3505 hpet_msi_read(irq, &msg);
3505 3506
@@ -3509,7 +3510,6 @@ static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3509 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3510 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3510 3511
3511 hpet_msi_write(irq, &msg); 3512 hpet_msi_write(irq, &msg);
3512 cpumask_copy(&desc->affinity, mask);
3513} 3513}
3514 3514
3515#endif /* CONFIG_SMP */ 3515#endif /* CONFIG_SMP */
@@ -3569,22 +3569,14 @@ static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3569 struct irq_desc *desc = irq_to_desc(irq); 3569 struct irq_desc *desc = irq_to_desc(irq);
3570 struct irq_cfg *cfg; 3570 struct irq_cfg *cfg;
3571 unsigned int dest; 3571 unsigned int dest;
3572 cpumask_t tmp;
3573 3572
3574 if (!cpumask_intersects(mask, cpu_online_mask)) 3573 dest = set_desc_affinity(desc, mask);
3574 if (dest == BAD_APICID)
3575 return; 3575 return;
3576 3576
3577 cfg = desc->chip_data; 3577 cfg = desc->chip_data;
3578 if (assign_irq_vector(irq, cfg, *mask))
3579 return;
3580
3581 set_extra_move_desc(desc, *mask);
3582
3583 cpumask_and(&tmp, &cfg->domain, mask);
3584 dest = cpu_mask_to_apicid(tmp);
3585 3578
3586 target_ht_irq(irq, dest, cfg->vector); 3579 target_ht_irq(irq, dest, cfg->vector);
3587 cpumask_copy(&desc->affinity, mask);
3588} 3580}
3589 3581
3590#endif 3582#endif
@@ -3604,17 +3596,14 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3604{ 3596{
3605 struct irq_cfg *cfg; 3597 struct irq_cfg *cfg;
3606 int err; 3598 int err;
3607 cpumask_t tmp;
3608 3599
3609 cfg = irq_cfg(irq); 3600 cfg = irq_cfg(irq);
3610 tmp = TARGET_CPUS; 3601 err = assign_irq_vector(irq, cfg, TARGET_CPUS);
3611 err = assign_irq_vector(irq, cfg, tmp);
3612 if (!err) { 3602 if (!err) {
3613 struct ht_irq_msg msg; 3603 struct ht_irq_msg msg;
3614 unsigned dest; 3604 unsigned dest;
3615 3605
3616 cpus_and(tmp, cfg->domain, tmp); 3606 dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
3617 dest = cpu_mask_to_apicid(tmp);
3618 3607
3619 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3608 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3620 3609
@@ -3650,7 +3639,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3650int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, 3639int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3651 unsigned long mmr_offset) 3640 unsigned long mmr_offset)
3652{ 3641{
3653 const cpumask_t *eligible_cpu = get_cpu_mask(cpu); 3642 const struct cpumask *eligible_cpu = cpumask_of(cpu);
3654 struct irq_cfg *cfg; 3643 struct irq_cfg *cfg;
3655 int mmr_pnode; 3644 int mmr_pnode;
3656 unsigned long mmr_value; 3645 unsigned long mmr_value;
@@ -3660,7 +3649,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3660 3649
3661 cfg = irq_cfg(irq); 3650 cfg = irq_cfg(irq);
3662 3651
3663 err = assign_irq_vector(irq, cfg, *eligible_cpu); 3652 err = assign_irq_vector(irq, cfg, eligible_cpu);
3664 if (err != 0) 3653 if (err != 0)
3665 return err; 3654 return err;
3666 3655
@@ -3679,7 +3668,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3679 entry->polarity = 0; 3668 entry->polarity = 0;
3680 entry->trigger = 0; 3669 entry->trigger = 0;
3681 entry->mask = 0; 3670 entry->mask = 0;
3682 entry->dest = cpu_mask_to_apicid(*eligible_cpu); 3671 entry->dest = cpu_mask_to_apicid(eligible_cpu);
3683 3672
3684 mmr_pnode = uv_blade_to_pnode(mmr_blade); 3673 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3685 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 3674 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -3890,7 +3879,7 @@ void __init setup_ioapic_dest(void)
3890 int pin, ioapic, irq, irq_entry; 3879 int pin, ioapic, irq, irq_entry;
3891 struct irq_desc *desc; 3880 struct irq_desc *desc;
3892 struct irq_cfg *cfg; 3881 struct irq_cfg *cfg;
3893 cpumask_t mask; 3882 const struct cpumask *mask;
3894 3883
3895 if (skip_ioapic_setup == 1) 3884 if (skip_ioapic_setup == 1)
3896 return; 3885 return;
@@ -3921,16 +3910,16 @@ void __init setup_ioapic_dest(void)
3921 */ 3910 */
3922 if (desc->status & 3911 if (desc->status &
3923 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 3912 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3924 mask = desc->affinity; 3913 mask = &desc->affinity;
3925 else 3914 else
3926 mask = TARGET_CPUS; 3915 mask = TARGET_CPUS;
3927 3916
3928#ifdef CONFIG_INTR_REMAP 3917#ifdef CONFIG_INTR_REMAP
3929 if (intr_remapping_enabled) 3918 if (intr_remapping_enabled)
3930 set_ir_ioapic_affinity_irq_desc(desc, &mask); 3919 set_ir_ioapic_affinity_irq_desc(desc, mask);
3931 else 3920 else
3932#endif 3921#endif
3933 set_ioapic_affinity_irq_desc(desc, &mask); 3922 set_ioapic_affinity_irq_desc(desc, mask);
3934 } 3923 }
3935 3924
3936 } 3925 }
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
index f1c688e46f35..285bbf8831fa 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/ipi.c
@@ -116,18 +116,18 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector)
116/* 116/*
117 * This is only used on smaller machines. 117 * This is only used on smaller machines.
118 */ 118 */
119void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) 119void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
120{ 120{
121 unsigned long mask = cpus_addr(cpumask)[0]; 121 unsigned long mask = cpumask_bits(cpumask)[0];
122 unsigned long flags; 122 unsigned long flags;
123 123
124 local_irq_save(flags); 124 local_irq_save(flags);
125 WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); 125 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
126 __send_IPI_dest_field(mask, vector); 126 __send_IPI_dest_field(mask, vector);
127 local_irq_restore(flags); 127 local_irq_restore(flags);
128} 128}
129 129
130void send_IPI_mask_sequence(cpumask_t mask, int vector) 130void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
131{ 131{
132 unsigned long flags; 132 unsigned long flags;
133 unsigned int query_cpu; 133 unsigned int query_cpu;
@@ -139,12 +139,24 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
139 */ 139 */
140 140
141 local_irq_save(flags); 141 local_irq_save(flags);
142 for_each_possible_cpu(query_cpu) { 142 for_each_cpu(query_cpu, mask)
143 if (cpu_isset(query_cpu, mask)) { 143 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
144 local_irq_restore(flags);
145}
146
147void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
148{
149 unsigned long flags;
150 unsigned int query_cpu;
151 unsigned int this_cpu = smp_processor_id();
152
153 /* See Hack comment above */
154
155 local_irq_save(flags);
156 for_each_cpu(query_cpu, mask)
157 if (query_cpu != this_cpu)
144 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), 158 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
145 vector); 159 vector);
146 }
147 }
148 local_irq_restore(flags); 160 local_irq_restore(flags);
149} 161}
150 162
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 9cf9cbbf7a02..9dc5588f336a 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -233,27 +233,28 @@ unsigned int do_IRQ(struct pt_regs *regs)
233#ifdef CONFIG_HOTPLUG_CPU 233#ifdef CONFIG_HOTPLUG_CPU
234#include <mach_apic.h> 234#include <mach_apic.h>
235 235
236void fixup_irqs(cpumask_t map) 236/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
237void fixup_irqs(void)
237{ 238{
238 unsigned int irq; 239 unsigned int irq;
239 static int warned; 240 static int warned;
240 struct irq_desc *desc; 241 struct irq_desc *desc;
241 242
242 for_each_irq_desc(irq, desc) { 243 for_each_irq_desc(irq, desc) {
243 cpumask_t mask; 244 const struct cpumask *affinity;
244 245
245 if (!desc) 246 if (!desc)
246 continue; 247 continue;
247 if (irq == 2) 248 if (irq == 2)
248 continue; 249 continue;
249 250
250 cpus_and(mask, desc->affinity, map); 251 affinity = &desc->affinity;
251 if (any_online_cpu(mask) == NR_CPUS) { 252 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
252 printk("Breaking affinity for irq %i\n", irq); 253 printk("Breaking affinity for irq %i\n", irq);
253 mask = map; 254 affinity = cpu_all_mask;
254 } 255 }
255 if (desc->chip->set_affinity) 256 if (desc->chip->set_affinity)
256 desc->chip->set_affinity(irq, &mask); 257 desc->chip->set_affinity(irq, affinity);
257 else if (desc->action && !(warned++)) 258 else if (desc->action && !(warned++))
258 printk("Cannot set affinity for irq %i\n", irq); 259 printk("Cannot set affinity for irq %i\n", irq);
259 } 260 }
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 27f2307b0a34..fca2991443f5 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -83,16 +83,17 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
83} 83}
84 84
85#ifdef CONFIG_HOTPLUG_CPU 85#ifdef CONFIG_HOTPLUG_CPU
86void fixup_irqs(cpumask_t map) 86/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
87void fixup_irqs(void)
87{ 88{
88 unsigned int irq; 89 unsigned int irq;
89 static int warned; 90 static int warned;
90 struct irq_desc *desc; 91 struct irq_desc *desc;
91 92
92 for_each_irq_desc(irq, desc) { 93 for_each_irq_desc(irq, desc) {
93 cpumask_t mask;
94 int break_affinity = 0; 94 int break_affinity = 0;
95 int set_affinity = 1; 95 int set_affinity = 1;
96 const struct cpumask *affinity;
96 97
97 if (!desc) 98 if (!desc)
98 continue; 99 continue;
@@ -102,23 +103,23 @@ void fixup_irqs(cpumask_t map)
102 /* interrupt's are disabled at this point */ 103 /* interrupt's are disabled at this point */
103 spin_lock(&desc->lock); 104 spin_lock(&desc->lock);
104 105
106 affinity = &desc->affinity;
105 if (!irq_has_action(irq) || 107 if (!irq_has_action(irq) ||
106 cpus_equal(desc->affinity, map)) { 108 cpumask_equal(affinity, cpu_online_mask)) {
107 spin_unlock(&desc->lock); 109 spin_unlock(&desc->lock);
108 continue; 110 continue;
109 } 111 }
110 112
111 cpus_and(mask, desc->affinity, map); 113 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
112 if (cpus_empty(mask)) {
113 break_affinity = 1; 114 break_affinity = 1;
114 mask = map; 115 affinity = cpu_all_mask;
115 } 116 }
116 117
117 if (desc->chip->mask) 118 if (desc->chip->mask)
118 desc->chip->mask(irq); 119 desc->chip->mask(irq);
119 120
120 if (desc->chip->set_affinity) 121 if (desc->chip->set_affinity)
121 desc->chip->set_affinity(irq, &mask); 122 desc->chip->set_affinity(irq, affinity);
122 else if (!(warned++)) 123 else if (!(warned++))
123 set_affinity = 0; 124 set_affinity = 0;
124 125
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 1c2084291f97..0b63b08e7530 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -152,6 +152,11 @@ void __init setup_per_cpu_areas(void)
152 old_size = PERCPU_ENOUGH_ROOM; 152 old_size = PERCPU_ENOUGH_ROOM;
153 align = max_t(unsigned long, PAGE_SIZE, align); 153 align = max_t(unsigned long, PAGE_SIZE, align);
154 size = roundup(old_size, align); 154 size = roundup(old_size, align);
155
156 printk(KERN_INFO
157 "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
158 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
159
155 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 160 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
156 size); 161 size);
157 162
@@ -168,24 +173,24 @@ void __init setup_per_cpu_areas(void)
168 "cpu %d has no node %d or node-local memory\n", 173 "cpu %d has no node %d or node-local memory\n",
169 cpu, node); 174 cpu, node);
170 if (ptr) 175 if (ptr)
171 printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n", 176 printk(KERN_DEBUG
177 "per cpu data for cpu%d at %016lx\n",
172 cpu, __pa(ptr)); 178 cpu, __pa(ptr));
173 } 179 }
174 else { 180 else {
175 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, 181 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
176 __pa(MAX_DMA_ADDRESS)); 182 __pa(MAX_DMA_ADDRESS));
177 if (ptr) 183 if (ptr)
178 printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", 184 printk(KERN_DEBUG
179 cpu, node, __pa(ptr)); 185 "per cpu data for cpu%d on node%d "
186 "at %016lx\n",
187 cpu, node, __pa(ptr));
180 } 188 }
181#endif 189#endif
182 per_cpu_offset(cpu) = ptr - __per_cpu_start; 190 per_cpu_offset(cpu) = ptr - __per_cpu_start;
183 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 191 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
184 } 192 }
185 193
186 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
187 NR_CPUS, nr_cpu_ids, nr_node_ids);
188
189 /* Setup percpu data maps */ 194 /* Setup percpu data maps */
190 setup_per_cpu_maps(); 195 setup_per_cpu_maps();
191 196
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 3f92b134ab90..49ed667b06f3 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -118,22 +118,22 @@ static void native_smp_send_reschedule(int cpu)
118 WARN_ON(1); 118 WARN_ON(1);
119 return; 119 return;
120 } 120 }
121 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); 121 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
122} 122}
123 123
124void native_send_call_func_single_ipi(int cpu) 124void native_send_call_func_single_ipi(int cpu)
125{ 125{
126 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); 126 send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
127} 127}
128 128
129void native_send_call_func_ipi(cpumask_t mask) 129void native_send_call_func_ipi(const struct cpumask *mask)
130{ 130{
131 cpumask_t allbutself; 131 cpumask_t allbutself;
132 132
133 allbutself = cpu_online_map; 133 allbutself = cpu_online_map;
134 cpu_clear(smp_processor_id(), allbutself); 134 cpu_clear(smp_processor_id(), allbutself);
135 135
136 if (cpus_equal(mask, allbutself) && 136 if (cpus_equal(*mask, allbutself) &&
137 cpus_equal(cpu_online_map, cpu_callout_map)) 137 cpus_equal(cpu_online_map, cpu_callout_map))
138 send_IPI_allbutself(CALL_FUNCTION_VECTOR); 138 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
139 else 139 else
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 199dcbb73d37..be9466788043 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1344,7 +1344,7 @@ void cpu_disable_common(void)
1344 lock_vector_lock(); 1344 lock_vector_lock();
1345 remove_cpu_from_maps(cpu); 1345 remove_cpu_from_maps(cpu);
1346 unlock_vector_lock(); 1346 unlock_vector_lock();
1347 fixup_irqs(cpu_online_map); 1347 fixup_irqs();
1348} 1348}
1349 1349
1350int native_cpu_disable(void) 1350int native_cpu_disable(void)
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
index f4049f3513b6..174ea90d1cbd 100644
--- a/arch/x86/kernel/tlb_32.c
+++ b/arch/x86/kernel/tlb_32.c
@@ -164,7 +164,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
164 * We have to send the IPI only to 164 * We have to send the IPI only to
165 * CPUs affected. 165 * CPUs affected.
166 */ 166 */
167 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); 167 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
168 168
169 while (!cpus_empty(flush_cpumask)) 169 while (!cpus_empty(flush_cpumask))
170 /* nothing. lockup detection does not belong here */ 170 /* nothing. lockup detection does not belong here */
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index 8f919ca69494..de6f1bda0c50 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
191 * We have to send the IPI only to 191 * We have to send the IPI only to
192 * CPUs affected. 192 * CPUs affected.
193 */ 193 */
194 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); 194 send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
195 195
196 while (!cpus_empty(f->flush_cpumask)) 196 while (!cpus_empty(f->flush_cpumask))
197 cpu_relax(); 197 cpu_relax();
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
index 3624a364b7f3..bc4c7840b2a8 100644
--- a/arch/x86/mach-generic/bigsmp.c
+++ b/arch/x86/mach-generic/bigsmp.c
@@ -42,9 +42,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
42 { } 42 { }
43}; 43};
44 44
45static cpumask_t vector_allocation_domain(int cpu) 45static void vector_allocation_domain(int cpu, cpumask_t *retmask)
46{ 46{
47 return cpumask_of_cpu(cpu); 47 cpus_clear(*retmask);
48 cpu_set(cpu, *retmask);
48} 49}
49 50
50static int probe_bigsmp(void) 51static int probe_bigsmp(void)
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
index 7b4e6d0d1690..4ba5ccaa1584 100644
--- a/arch/x86/mach-generic/es7000.c
+++ b/arch/x86/mach-generic/es7000.c
@@ -87,7 +87,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
87} 87}
88#endif 88#endif
89 89
90static cpumask_t vector_allocation_domain(int cpu) 90static void vector_allocation_domain(int cpu, cpumask_t *retmask)
91{ 91{
92 /* Careful. Some cpus do not strictly honor the set of cpus 92 /* Careful. Some cpus do not strictly honor the set of cpus
93 * specified in the interrupt destination when using lowest 93 * specified in the interrupt destination when using lowest
@@ -97,8 +97,7 @@ static cpumask_t vector_allocation_domain(int cpu)
97 * deliver interrupts to the wrong hyperthread when only one 97 * deliver interrupts to the wrong hyperthread when only one
98 * hyperthread was specified in the interrupt desitination. 98 * hyperthread was specified in the interrupt desitination.
99 */ 99 */
100 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 100 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
101 return domain;
102} 101}
103 102
104struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); 103struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c
index 71a309b122e6..511d7941364f 100644
--- a/arch/x86/mach-generic/numaq.c
+++ b/arch/x86/mach-generic/numaq.c
@@ -38,7 +38,7 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
38 return 0; 38 return 0;
39} 39}
40 40
41static cpumask_t vector_allocation_domain(int cpu) 41static void vector_allocation_domain(int cpu, cpumask_t *retmask)
42{ 42{
43 /* Careful. Some cpus do not strictly honor the set of cpus 43 /* Careful. Some cpus do not strictly honor the set of cpus
44 * specified in the interrupt destination when using lowest 44 * specified in the interrupt destination when using lowest
@@ -48,8 +48,7 @@ static cpumask_t vector_allocation_domain(int cpu)
48 * deliver interrupts to the wrong hyperthread when only one 48 * deliver interrupts to the wrong hyperthread when only one
49 * hyperthread was specified in the interrupt desitination. 49 * hyperthread was specified in the interrupt desitination.
50 */ 50 */
51 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 51 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
52 return domain;
53} 52}
54 53
55struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); 54struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c
index 2c6d234e0009..2821ffc188b5 100644
--- a/arch/x86/mach-generic/summit.c
+++ b/arch/x86/mach-generic/summit.c
@@ -24,7 +24,7 @@ static int probe_summit(void)
24 return 0; 24 return 0;
25} 25}
26 26
27static cpumask_t vector_allocation_domain(int cpu) 27static void vector_allocation_domain(int cpu, cpumask_t *retmask)
28{ 28{
29 /* Careful. Some cpus do not strictly honor the set of cpus 29 /* Careful. Some cpus do not strictly honor the set of cpus
30 * specified in the interrupt destination when using lowest 30 * specified in the interrupt destination when using lowest
@@ -34,8 +34,7 @@ static cpumask_t vector_allocation_domain(int cpu)
34 * deliver interrupts to the wrong hyperthread when only one 34 * deliver interrupts to the wrong hyperthread when only one
35 * hyperthread was specified in the interrupt desitination. 35 * hyperthread was specified in the interrupt desitination.
36 */ 36 */
37 cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; 37 *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
38 return domain;
39} 38}
40 39
41struct genapic apic_summit = APIC_INIT("summit", probe_summit); 40struct genapic apic_summit = APIC_INIT("summit", probe_summit);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 9c990185e9f2..a5bc05492b1e 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -672,7 +672,7 @@ void __init smp_boot_cpus(void)
672 672
673 /* loop over all the extended VIC CPUs and boot them. The 673 /* loop over all the extended VIC CPUs and boot them. The
674 * Quad CPUs must be bootstrapped by their extended VIC cpu */ 674 * Quad CPUs must be bootstrapped by their extended VIC cpu */
675 for (i = 0; i < NR_CPUS; i++) { 675 for (i = 0; i < nr_cpu_ids; i++) {
676 if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) 676 if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
677 continue; 677 continue;
678 do_boot_cpu(i); 678 do_boot_cpu(i);
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index cebcbf152d46..71a14f89f89e 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -278,7 +278,7 @@ void __init numa_init_array(void)
278 int rr, i; 278 int rr, i;
279 279
280 rr = first_node(node_online_map); 280 rr = first_node(node_online_map);
281 for (i = 0; i < NR_CPUS; i++) { 281 for (i = 0; i < nr_cpu_ids; i++) {
282 if (early_cpu_to_node(i) != NUMA_NO_NODE) 282 if (early_cpu_to_node(i) != NUMA_NO_NODE)
283 continue; 283 continue;
284 numa_set_node(i, rr); 284 numa_set_node(i, rr);
@@ -549,7 +549,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn)
549 memnodemap[0] = 0; 549 memnodemap[0] = 0;
550 node_set_online(0); 550 node_set_online(0);
551 node_set(0, node_possible_map); 551 node_set(0, node_possible_map);
552 for (i = 0; i < NR_CPUS; i++) 552 for (i = 0; i < nr_cpu_ids; i++)
553 numa_set_node(i, 0); 553 numa_set_node(i, 0);
554 e820_register_active_regions(0, start_pfn, last_pfn); 554 e820_register_active_regions(0, start_pfn, last_pfn);
555 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); 555 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 51c0a2fc14fe..09737c8af074 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -382,7 +382,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
382 if (!node_online(i)) 382 if (!node_online(i))
383 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 383 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
384 384
385 for (i = 0; i < NR_CPUS; i++) { 385 for (i = 0; i < nr_cpu_ids; i++) {
386 int node = early_cpu_to_node(i); 386 int node = early_cpu_to_node(i);
387 387
388 if (node == NUMA_NO_NODE) 388 if (node == NUMA_NO_NODE)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 636ef4caa52d..e59e53b11e2b 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1079,7 +1079,7 @@ static void drop_other_mm_ref(void *info)
1079 1079
1080static void xen_drop_mm_ref(struct mm_struct *mm) 1080static void xen_drop_mm_ref(struct mm_struct *mm)
1081{ 1081{
1082 cpumask_t mask; 1082 cpumask_var_t mask;
1083 unsigned cpu; 1083 unsigned cpu;
1084 1084
1085 if (current->active_mm == mm) { 1085 if (current->active_mm == mm) {
@@ -1091,7 +1091,16 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
1091 } 1091 }
1092 1092
1093 /* Get the "official" set of cpus referring to our pagetable. */ 1093 /* Get the "official" set of cpus referring to our pagetable. */
1094 mask = mm->cpu_vm_mask; 1094 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1095 for_each_online_cpu(cpu) {
1096 if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
1097 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1098 continue;
1099 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1100 }
1101 return;
1102 }
1103 cpumask_copy(mask, &mm->cpu_vm_mask);
1095 1104
1096 /* It's possible that a vcpu may have a stale reference to our 1105 /* It's possible that a vcpu may have a stale reference to our
1097 cr3, because its in lazy mode, and it hasn't yet flushed 1106 cr3, because its in lazy mode, and it hasn't yet flushed
@@ -1100,11 +1109,12 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
1100 if needed. */ 1109 if needed. */
1101 for_each_online_cpu(cpu) { 1110 for_each_online_cpu(cpu) {
1102 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) 1111 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1103 cpu_set(cpu, mask); 1112 cpumask_set_cpu(cpu, mask);
1104 } 1113 }
1105 1114
1106 if (!cpus_empty(mask)) 1115 if (!cpumask_empty(mask))
1107 smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); 1116 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1117 free_cpumask_var(mask);
1108} 1118}
1109#else 1119#else
1110static void xen_drop_mm_ref(struct mm_struct *mm) 1120static void xen_drop_mm_ref(struct mm_struct *mm)
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index acd9b6705e02..c44e2069c7c7 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -33,7 +33,7 @@
33#include "xen-ops.h" 33#include "xen-ops.h"
34#include "mmu.h" 34#include "mmu.h"
35 35
36cpumask_t xen_cpu_initialized_map; 36cpumask_var_t xen_cpu_initialized_map;
37 37
38static DEFINE_PER_CPU(int, resched_irq); 38static DEFINE_PER_CPU(int, resched_irq);
39static DEFINE_PER_CPU(int, callfunc_irq); 39static DEFINE_PER_CPU(int, callfunc_irq);
@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void)
158{ 158{
159 int i, rc; 159 int i, rc;
160 160
161 for (i = 0; i < NR_CPUS; i++) { 161 for (i = 0; i < nr_cpu_ids; i++) {
162 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 162 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
163 if (rc >= 0) { 163 if (rc >= 0) {
164 num_processors++; 164 num_processors++;
@@ -192,11 +192,14 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
192 if (xen_smp_intr_init(0)) 192 if (xen_smp_intr_init(0))
193 BUG(); 193 BUG();
194 194
195 xen_cpu_initialized_map = cpumask_of_cpu(0); 195 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
196 panic("could not allocate xen_cpu_initialized_map\n");
197
198 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
196 199
197 /* Restrict the possible_map according to max_cpus. */ 200 /* Restrict the possible_map according to max_cpus. */
198 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { 201 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
199 for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) 202 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
200 continue; 203 continue;
201 cpu_clear(cpu, cpu_possible_map); 204 cpu_clear(cpu, cpu_possible_map);
202 } 205 }
@@ -221,7 +224,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
221 struct vcpu_guest_context *ctxt; 224 struct vcpu_guest_context *ctxt;
222 struct desc_struct *gdt; 225 struct desc_struct *gdt;
223 226
224 if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) 227 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
225 return 0; 228 return 0;
226 229
227 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 230 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
@@ -408,24 +411,23 @@ static void xen_smp_send_reschedule(int cpu)
408 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 411 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
409} 412}
410 413
411static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) 414static void xen_send_IPI_mask(const struct cpumask *mask,
415 enum ipi_vector vector)
412{ 416{
413 unsigned cpu; 417 unsigned cpu;
414 418
415 cpus_and(mask, mask, cpu_online_map); 419 for_each_cpu_and(cpu, mask, cpu_online_mask)
416
417 for_each_cpu_mask_nr(cpu, mask)
418 xen_send_IPI_one(cpu, vector); 420 xen_send_IPI_one(cpu, vector);
419} 421}
420 422
421static void xen_smp_send_call_function_ipi(cpumask_t mask) 423static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
422{ 424{
423 int cpu; 425 int cpu;
424 426
425 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 427 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
426 428
427 /* Make sure other vcpus get a chance to run if they need to. */ 429 /* Make sure other vcpus get a chance to run if they need to. */
428 for_each_cpu_mask_nr(cpu, mask) { 430 for_each_cpu(cpu, mask) {
429 if (xen_vcpu_stolen(cpu)) { 431 if (xen_vcpu_stolen(cpu)) {
430 HYPERVISOR_sched_op(SCHEDOP_yield, 0); 432 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
431 break; 433 break;
@@ -435,7 +437,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask)
435 437
436static void xen_smp_send_call_function_single_ipi(int cpu) 438static void xen_smp_send_call_function_single_ipi(int cpu)
437{ 439{
438 xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); 440 xen_send_IPI_mask(cpumask_of(cpu),
441 XEN_CALL_FUNCTION_SINGLE_VECTOR);
439} 442}
440 443
441static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 444static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 2a234db5949b..212ffe012b76 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -35,7 +35,8 @@ void xen_post_suspend(int suspend_cancelled)
35 pfn_to_mfn(xen_start_info->console.domU.mfn); 35 pfn_to_mfn(xen_start_info->console.domU.mfn);
36 } else { 36 } else {
37#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
38 xen_cpu_initialized_map = cpu_online_map; 38 BUG_ON(xen_cpu_initialized_map == NULL);
39 cpumask_copy(xen_cpu_initialized_map, cpu_online_mask);
39#endif 40#endif
40 xen_vcpu_restore(); 41 xen_vcpu_restore();
41 } 42 }
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 9e1afae8461f..c1f8faf0a2c5 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -58,7 +58,7 @@ void __init xen_init_spinlocks(void);
58__cpuinit void xen_init_lock_cpu(int cpu); 58__cpuinit void xen_init_lock_cpu(int cpu);
59void xen_uninit_lock_cpu(int cpu); 59void xen_uninit_lock_cpu(int cpu);
60 60
61extern cpumask_t xen_cpu_initialized_map; 61extern cpumask_var_t xen_cpu_initialized_map;
62#else 62#else
63static inline void xen_smp_init(void) {} 63static inline void xen_smp_init(void) {}
64#endif 64#endif