diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-02 14:44:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-02 14:44:09 -0500 |
commit | b840d79631c882786925303c2b0f4fefc31845ed (patch) | |
tree | cda60a95d4507fe1321fc285af38982d7eb9693b /arch/x86 | |
parent | 597b0d21626da4e6f09f132442caf0cc2b0eb47c (diff) | |
parent | c3d80000e3a812fe5a200d6bde755fbd7fa65481 (diff) |
Merge branch 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (66 commits)
x86: export vector_used_by_percpu_irq
x86: use logical apicid in x2apic_cluster's x2apic_cpu_mask_to_apicid_and()
sched: nominate preferred wakeup cpu, fix
x86: fix lguest used_vectors breakage, -v2
x86: fix warning in arch/x86/kernel/io_apic.c
sched: fix warning in kernel/sched.c
sched: move test_sd_parent() to an SMP section of sched.h
sched: add SD_BALANCE_NEWIDLE at MC and CPU level for sched_mc>0
sched: activate active load balancing in new idle cpus
sched: bias task wakeups to preferred semi-idle packages
sched: nominate preferred wakeup cpu
sched: favour lower logical cpu number for sched_mc balance
sched: framework for sched_mc/smt_power_savings=N
sched: convert BALANCE_FOR_xx_POWER to inline functions
x86: use possible_cpus=NUM to extend the possible cpus allowed
x86: fix cpu_mask_to_apicid_and to include cpu_online_mask
x86: update io_apic.c to the new cpumask code
x86: Introduce topology_core_cpumask()/topology_thread_cpumask()
x86: xen: use smp_call_function_many()
x86: use work_on_cpu in x86/kernel/cpu/mcheck/mce_amd_64.c
...
Fixed up trivial conflict in kernel/time/tick-sched.c manually
Diffstat (limited to 'arch/x86')
57 files changed, 937 insertions, 576 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0f44add3e0b7..249d1e0824b5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -601,19 +601,20 @@ config IOMMU_HELPER | |||
601 | 601 | ||
602 | config MAXSMP | 602 | config MAXSMP |
603 | bool "Configure Maximum number of SMP Processors and NUMA Nodes" | 603 | bool "Configure Maximum number of SMP Processors and NUMA Nodes" |
604 | depends on X86_64 && SMP && BROKEN | 604 | depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL |
605 | select CPUMASK_OFFSTACK | ||
605 | default n | 606 | default n |
606 | help | 607 | help |
607 | Configure maximum number of CPUS and NUMA Nodes for this architecture. | 608 | Configure maximum number of CPUS and NUMA Nodes for this architecture. |
608 | If unsure, say N. | 609 | If unsure, say N. |
609 | 610 | ||
610 | config NR_CPUS | 611 | config NR_CPUS |
611 | int "Maximum number of CPUs (2-512)" if !MAXSMP | 612 | int "Maximum number of CPUs" if SMP && !MAXSMP |
612 | range 2 512 | 613 | range 2 512 if SMP && !MAXSMP |
613 | depends on SMP | 614 | default "1" if !SMP |
614 | default "4096" if MAXSMP | 615 | default "4096" if MAXSMP |
615 | default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 | 616 | default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000) |
616 | default "8" | 617 | default "8" if SMP |
617 | help | 618 | help |
618 | This allows you to specify the maximum number of CPUs which this | 619 | This allows you to specify the maximum number of CPUs which this |
619 | kernel will support. The maximum supported value is 512 and the | 620 | kernel will support. The maximum supported value is 512 and the |
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index ce547f24a1cd..d8dd9f537911 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h | |||
@@ -9,12 +9,12 @@ static inline int apic_id_registered(void) | |||
9 | return (1); | 9 | return (1); |
10 | } | 10 | } |
11 | 11 | ||
12 | static inline cpumask_t target_cpus(void) | 12 | static inline const cpumask_t *target_cpus(void) |
13 | { | 13 | { |
14 | #ifdef CONFIG_SMP | 14 | #ifdef CONFIG_SMP |
15 | return cpu_online_map; | 15 | return &cpu_online_map; |
16 | #else | 16 | #else |
17 | return cpumask_of_cpu(0); | 17 | return &cpumask_of_cpu(0); |
18 | #endif | 18 | #endif |
19 | } | 19 | } |
20 | 20 | ||
@@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid) | |||
79 | 79 | ||
80 | static inline int cpu_present_to_apicid(int mps_cpu) | 80 | static inline int cpu_present_to_apicid(int mps_cpu) |
81 | { | 81 | { |
82 | if (mps_cpu < NR_CPUS) | 82 | if (mps_cpu < nr_cpu_ids) |
83 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); | 83 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
84 | 84 | ||
85 | return BAD_APICID; | 85 | return BAD_APICID; |
@@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[]; | |||
94 | /* Mapping from cpu number to logical apicid */ | 94 | /* Mapping from cpu number to logical apicid */ |
95 | static inline int cpu_to_logical_apicid(int cpu) | 95 | static inline int cpu_to_logical_apicid(int cpu) |
96 | { | 96 | { |
97 | if (cpu >= NR_CPUS) | 97 | if (cpu >= nr_cpu_ids) |
98 | return BAD_APICID; | 98 | return BAD_APICID; |
99 | return cpu_physical_id(cpu); | 99 | return cpu_physical_id(cpu); |
100 | } | 100 | } |
@@ -119,16 +119,34 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
119 | } | 119 | } |
120 | 120 | ||
121 | /* As we are using single CPU as destination, pick only one CPU here */ | 121 | /* As we are using single CPU as destination, pick only one CPU here */ |
122 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 122 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
123 | { | 123 | { |
124 | int cpu; | 124 | int cpu; |
125 | int apicid; | 125 | int apicid; |
126 | 126 | ||
127 | cpu = first_cpu(cpumask); | 127 | cpu = first_cpu(*cpumask); |
128 | apicid = cpu_to_logical_apicid(cpu); | 128 | apicid = cpu_to_logical_apicid(cpu); |
129 | return apicid; | 129 | return apicid; |
130 | } | 130 | } |
131 | 131 | ||
132 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
133 | const struct cpumask *andmask) | ||
134 | { | ||
135 | int cpu; | ||
136 | |||
137 | /* | ||
138 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
139 | * May as well be the first. | ||
140 | */ | ||
141 | for_each_cpu_and(cpu, cpumask, andmask) | ||
142 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
143 | break; | ||
144 | if (cpu < nr_cpu_ids) | ||
145 | return cpu_to_logical_apicid(cpu); | ||
146 | |||
147 | return BAD_APICID; | ||
148 | } | ||
149 | |||
132 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | 150 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) |
133 | { | 151 | { |
134 | return cpuid_apic >> index_msb; | 152 | return cpuid_apic >> index_msb; |
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h index 9404c535b7ec..27fcd01b3ae6 100644 --- a/arch/x86/include/asm/bigsmp/ipi.h +++ b/arch/x86/include/asm/bigsmp/ipi.h | |||
@@ -1,25 +1,22 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | 1 | #ifndef __ASM_MACH_IPI_H |
2 | #define __ASM_MACH_IPI_H | 2 | #define __ASM_MACH_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(const struct cpumask *mask, int vector); |
5 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const struct cpumask *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
10 | 11 | ||
11 | static inline void send_IPI_allbutself(int vector) | 12 | static inline void send_IPI_allbutself(int vector) |
12 | { | 13 | { |
13 | cpumask_t mask = cpu_online_map; | 14 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | |||
16 | if (!cpus_empty(mask)) | ||
17 | send_IPI_mask(mask, vector); | ||
18 | } | 15 | } |
19 | 16 | ||
20 | static inline void send_IPI_all(int vector) | 17 | static inline void send_IPI_all(int vector) |
21 | { | 18 | { |
22 | send_IPI_mask(cpu_online_map, vector); | 19 | send_IPI_mask(cpu_online_mask, vector); |
23 | } | 20 | } |
24 | 21 | ||
25 | #endif /* __ASM_MACH_IPI_H */ | 22 | #endif /* __ASM_MACH_IPI_H */ |
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index e6b82b17b072..dc27705f5443 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
@@ -320,16 +320,14 @@ static inline void set_intr_gate(unsigned int n, void *addr) | |||
320 | _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); | 320 | _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); |
321 | } | 321 | } |
322 | 322 | ||
323 | #define SYS_VECTOR_FREE 0 | ||
324 | #define SYS_VECTOR_ALLOCED 1 | ||
325 | |||
326 | extern int first_system_vector; | 323 | extern int first_system_vector; |
327 | extern char system_vectors[]; | 324 | /* used_vectors is BITMAP for irq is not managed by percpu vector_irq */ |
325 | extern unsigned long used_vectors[]; | ||
328 | 326 | ||
329 | static inline void alloc_system_vector(int vector) | 327 | static inline void alloc_system_vector(int vector) |
330 | { | 328 | { |
331 | if (system_vectors[vector] == SYS_VECTOR_FREE) { | 329 | if (!test_bit(vector, used_vectors)) { |
332 | system_vectors[vector] = SYS_VECTOR_ALLOCED; | 330 | set_bit(vector, used_vectors); |
333 | if (first_system_vector > vector) | 331 | if (first_system_vector > vector) |
334 | first_system_vector = vector; | 332 | first_system_vector = vector; |
335 | } else | 333 | } else |
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index e24ef876915f..51ac1230294e 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h | |||
@@ -9,14 +9,14 @@ static inline int apic_id_registered(void) | |||
9 | return (1); | 9 | return (1); |
10 | } | 10 | } |
11 | 11 | ||
12 | static inline cpumask_t target_cpus_cluster(void) | 12 | static inline const cpumask_t *target_cpus_cluster(void) |
13 | { | 13 | { |
14 | return CPU_MASK_ALL; | 14 | return &CPU_MASK_ALL; |
15 | } | 15 | } |
16 | 16 | ||
17 | static inline cpumask_t target_cpus(void) | 17 | static inline const cpumask_t *target_cpus(void) |
18 | { | 18 | { |
19 | return cpumask_of_cpu(smp_processor_id()); | 19 | return &cpumask_of_cpu(smp_processor_id()); |
20 | } | 20 | } |
21 | 21 | ||
22 | #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) | 22 | #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) |
@@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS]; | |||
80 | static inline void setup_apic_routing(void) | 80 | static inline void setup_apic_routing(void) |
81 | { | 81 | { |
82 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); | 82 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); |
83 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | 83 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", |
84 | (apic_version[apic] == 0x14) ? | 84 | (apic_version[apic] == 0x14) ? |
85 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); | 85 | "Physical Cluster" : "Logical Cluster", |
86 | nr_ioapics, cpus_addr(*target_cpus())[0]); | ||
86 | } | 87 | } |
87 | 88 | ||
88 | static inline int multi_timer_check(int apic, int irq) | 89 | static inline int multi_timer_check(int apic, int irq) |
@@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) | |||
100 | { | 101 | { |
101 | if (!mps_cpu) | 102 | if (!mps_cpu) |
102 | return boot_cpu_physical_apicid; | 103 | return boot_cpu_physical_apicid; |
103 | else if (mps_cpu < NR_CPUS) | 104 | else if (mps_cpu < nr_cpu_ids) |
104 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); | 105 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
105 | else | 106 | else |
106 | return BAD_APICID; | 107 | return BAD_APICID; |
@@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[]; | |||
120 | static inline int cpu_to_logical_apicid(int cpu) | 121 | static inline int cpu_to_logical_apicid(int cpu) |
121 | { | 122 | { |
122 | #ifdef CONFIG_SMP | 123 | #ifdef CONFIG_SMP |
123 | if (cpu >= NR_CPUS) | 124 | if (cpu >= nr_cpu_ids) |
124 | return BAD_APICID; | 125 | return BAD_APICID; |
125 | return (int)cpu_2_logical_apicid[cpu]; | 126 | return (int)cpu_2_logical_apicid[cpu]; |
126 | #else | 127 | #else |
127 | return logical_smp_processor_id(); | 128 | return logical_smp_processor_id(); |
128 | #endif | 129 | #endif |
@@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid) | |||
146 | return (1); | 147 | return (1); |
147 | } | 148 | } |
148 | 149 | ||
149 | static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) | 150 | static inline unsigned int |
151 | cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) | ||
150 | { | 152 | { |
151 | int num_bits_set; | 153 | int num_bits_set; |
152 | int cpus_found = 0; | 154 | int cpus_found = 0; |
153 | int cpu; | 155 | int cpu; |
154 | int apicid; | 156 | int apicid; |
155 | 157 | ||
156 | num_bits_set = cpus_weight(cpumask); | 158 | num_bits_set = cpumask_weight(cpumask); |
157 | /* Return id to all */ | 159 | /* Return id to all */ |
158 | if (num_bits_set == NR_CPUS) | 160 | if (num_bits_set == NR_CPUS) |
159 | return 0xFF; | 161 | return 0xFF; |
@@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) | |||
161 | * The cpus in the mask must all be on the apic cluster. If are not | 163 | * The cpus in the mask must all be on the apic cluster. If are not |
162 | * on the same apicid cluster return default value of TARGET_CPUS. | 164 | * on the same apicid cluster return default value of TARGET_CPUS. |
163 | */ | 165 | */ |
164 | cpu = first_cpu(cpumask); | 166 | cpu = cpumask_first(cpumask); |
165 | apicid = cpu_to_logical_apicid(cpu); | 167 | apicid = cpu_to_logical_apicid(cpu); |
166 | while (cpus_found < num_bits_set) { | 168 | while (cpus_found < num_bits_set) { |
167 | if (cpu_isset(cpu, cpumask)) { | 169 | if (cpumask_test_cpu(cpu, cpumask)) { |
168 | int new_apicid = cpu_to_logical_apicid(cpu); | 170 | int new_apicid = cpu_to_logical_apicid(cpu); |
169 | if (apicid_cluster(apicid) != | 171 | if (apicid_cluster(apicid) != |
170 | apicid_cluster(new_apicid)){ | 172 | apicid_cluster(new_apicid)){ |
@@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) | |||
179 | return apicid; | 181 | return apicid; |
180 | } | 182 | } |
181 | 183 | ||
182 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 184 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
183 | { | 185 | { |
184 | int num_bits_set; | 186 | int num_bits_set; |
185 | int cpus_found = 0; | 187 | int cpus_found = 0; |
186 | int cpu; | 188 | int cpu; |
187 | int apicid; | 189 | int apicid; |
188 | 190 | ||
189 | num_bits_set = cpus_weight(cpumask); | 191 | num_bits_set = cpus_weight(*cpumask); |
190 | /* Return id to all */ | 192 | /* Return id to all */ |
191 | if (num_bits_set == NR_CPUS) | 193 | if (num_bits_set == NR_CPUS) |
192 | return cpu_to_logical_apicid(0); | 194 | return cpu_to_logical_apicid(0); |
@@ -194,10 +196,52 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
194 | * The cpus in the mask must all be on the apic cluster. If are not | 196 | * The cpus in the mask must all be on the apic cluster. If are not |
195 | * on the same apicid cluster return default value of TARGET_CPUS. | 197 | * on the same apicid cluster return default value of TARGET_CPUS. |
196 | */ | 198 | */ |
197 | cpu = first_cpu(cpumask); | 199 | cpu = first_cpu(*cpumask); |
200 | apicid = cpu_to_logical_apicid(cpu); | ||
201 | while (cpus_found < num_bits_set) { | ||
202 | if (cpu_isset(cpu, *cpumask)) { | ||
203 | int new_apicid = cpu_to_logical_apicid(cpu); | ||
204 | if (apicid_cluster(apicid) != | ||
205 | apicid_cluster(new_apicid)){ | ||
206 | printk ("%s: Not a valid mask!\n", __func__); | ||
207 | return cpu_to_logical_apicid(0); | ||
208 | } | ||
209 | apicid = new_apicid; | ||
210 | cpus_found++; | ||
211 | } | ||
212 | cpu++; | ||
213 | } | ||
214 | return apicid; | ||
215 | } | ||
216 | |||
217 | |||
218 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, | ||
219 | const struct cpumask *andmask) | ||
220 | { | ||
221 | int num_bits_set; | ||
222 | int cpus_found = 0; | ||
223 | int cpu; | ||
224 | int apicid = cpu_to_logical_apicid(0); | ||
225 | cpumask_var_t cpumask; | ||
226 | |||
227 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | ||
228 | return apicid; | ||
229 | |||
230 | cpumask_and(cpumask, inmask, andmask); | ||
231 | cpumask_and(cpumask, cpumask, cpu_online_mask); | ||
232 | |||
233 | num_bits_set = cpumask_weight(cpumask); | ||
234 | /* Return id to all */ | ||
235 | if (num_bits_set == NR_CPUS) | ||
236 | goto exit; | ||
237 | /* | ||
238 | * The cpus in the mask must all be on the apic cluster. If are not | ||
239 | * on the same apicid cluster return default value of TARGET_CPUS. | ||
240 | */ | ||
241 | cpu = cpumask_first(cpumask); | ||
198 | apicid = cpu_to_logical_apicid(cpu); | 242 | apicid = cpu_to_logical_apicid(cpu); |
199 | while (cpus_found < num_bits_set) { | 243 | while (cpus_found < num_bits_set) { |
200 | if (cpu_isset(cpu, cpumask)) { | 244 | if (cpumask_test_cpu(cpu, cpumask)) { |
201 | int new_apicid = cpu_to_logical_apicid(cpu); | 245 | int new_apicid = cpu_to_logical_apicid(cpu); |
202 | if (apicid_cluster(apicid) != | 246 | if (apicid_cluster(apicid) != |
203 | apicid_cluster(new_apicid)){ | 247 | apicid_cluster(new_apicid)){ |
@@ -209,6 +253,8 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
209 | } | 253 | } |
210 | cpu++; | 254 | cpu++; |
211 | } | 255 | } |
256 | exit: | ||
257 | free_cpumask_var(cpumask); | ||
212 | return apicid; | 258 | return apicid; |
213 | } | 259 | } |
214 | 260 | ||
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h index 632a955fcc0a..7e8ed24d4b8a 100644 --- a/arch/x86/include/asm/es7000/ipi.h +++ b/arch/x86/include/asm/es7000/ipi.h | |||
@@ -1,24 +1,22 @@ | |||
1 | #ifndef __ASM_ES7000_IPI_H | 1 | #ifndef __ASM_ES7000_IPI_H |
2 | #define __ASM_ES7000_IPI_H | 2 | #define __ASM_ES7000_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(const struct cpumask *mask, int vector); |
5 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const struct cpumask *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
10 | 11 | ||
11 | static inline void send_IPI_allbutself(int vector) | 12 | static inline void send_IPI_allbutself(int vector) |
12 | { | 13 | { |
13 | cpumask_t mask = cpu_online_map; | 14 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | if (!cpus_empty(mask)) | ||
16 | send_IPI_mask(mask, vector); | ||
17 | } | 15 | } |
18 | 16 | ||
19 | static inline void send_IPI_all(int vector) | 17 | static inline void send_IPI_all(int vector) |
20 | { | 18 | { |
21 | send_IPI_mask(cpu_online_map, vector); | 19 | send_IPI_mask(cpu_online_mask, vector); |
22 | } | 20 | } |
23 | 21 | ||
24 | #endif /* __ASM_ES7000_IPI_H */ | 22 | #endif /* __ASM_ES7000_IPI_H */ |
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index 0ac17d33a8c7..746f37a7963a 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h | |||
@@ -24,7 +24,7 @@ struct genapic { | |||
24 | int (*probe)(void); | 24 | int (*probe)(void); |
25 | 25 | ||
26 | int (*apic_id_registered)(void); | 26 | int (*apic_id_registered)(void); |
27 | cpumask_t (*target_cpus)(void); | 27 | const struct cpumask *(*target_cpus)(void); |
28 | int int_delivery_mode; | 28 | int int_delivery_mode; |
29 | int int_dest_mode; | 29 | int int_dest_mode; |
30 | int ESR_DISABLE; | 30 | int ESR_DISABLE; |
@@ -57,12 +57,16 @@ struct genapic { | |||
57 | 57 | ||
58 | unsigned (*get_apic_id)(unsigned long x); | 58 | unsigned (*get_apic_id)(unsigned long x); |
59 | unsigned long apic_id_mask; | 59 | unsigned long apic_id_mask; |
60 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | 60 | unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); |
61 | cpumask_t (*vector_allocation_domain)(int cpu); | 61 | unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, |
62 | const struct cpumask *andmask); | ||
63 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); | ||
62 | 64 | ||
63 | #ifdef CONFIG_SMP | 65 | #ifdef CONFIG_SMP |
64 | /* ipi */ | 66 | /* ipi */ |
65 | void (*send_IPI_mask)(cpumask_t mask, int vector); | 67 | void (*send_IPI_mask)(const struct cpumask *mask, int vector); |
68 | void (*send_IPI_mask_allbutself)(const struct cpumask *mask, | ||
69 | int vector); | ||
66 | void (*send_IPI_allbutself)(int vector); | 70 | void (*send_IPI_allbutself)(int vector); |
67 | void (*send_IPI_all)(int vector); | 71 | void (*send_IPI_all)(int vector); |
68 | #endif | 72 | #endif |
@@ -114,6 +118,7 @@ struct genapic { | |||
114 | APICFUNC(get_apic_id) \ | 118 | APICFUNC(get_apic_id) \ |
115 | .apic_id_mask = APIC_ID_MASK, \ | 119 | .apic_id_mask = APIC_ID_MASK, \ |
116 | APICFUNC(cpu_mask_to_apicid) \ | 120 | APICFUNC(cpu_mask_to_apicid) \ |
121 | APICFUNC(cpu_mask_to_apicid_and) \ | ||
117 | APICFUNC(vector_allocation_domain) \ | 122 | APICFUNC(vector_allocation_domain) \ |
118 | APICFUNC(acpi_madt_oem_check) \ | 123 | APICFUNC(acpi_madt_oem_check) \ |
119 | IPIFUNC(send_IPI_mask) \ | 124 | IPIFUNC(send_IPI_mask) \ |
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h index 2cae011668b7..adf32fb56aa6 100644 --- a/arch/x86/include/asm/genapic_64.h +++ b/arch/x86/include/asm/genapic_64.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_GENAPIC_64_H | 1 | #ifndef _ASM_X86_GENAPIC_64_H |
2 | #define _ASM_X86_GENAPIC_64_H | 2 | #define _ASM_X86_GENAPIC_64_H |
3 | 3 | ||
4 | #include <linux/cpumask.h> | ||
5 | |||
4 | /* | 6 | /* |
5 | * Copyright 2004 James Cleverdon, IBM. | 7 | * Copyright 2004 James Cleverdon, IBM. |
6 | * Subject to the GNU Public License, v.2 | 8 | * Subject to the GNU Public License, v.2 |
@@ -18,16 +20,20 @@ struct genapic { | |||
18 | u32 int_delivery_mode; | 20 | u32 int_delivery_mode; |
19 | u32 int_dest_mode; | 21 | u32 int_dest_mode; |
20 | int (*apic_id_registered)(void); | 22 | int (*apic_id_registered)(void); |
21 | cpumask_t (*target_cpus)(void); | 23 | const struct cpumask *(*target_cpus)(void); |
22 | cpumask_t (*vector_allocation_domain)(int cpu); | 24 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); |
23 | void (*init_apic_ldr)(void); | 25 | void (*init_apic_ldr)(void); |
24 | /* ipi */ | 26 | /* ipi */ |
25 | void (*send_IPI_mask)(cpumask_t mask, int vector); | 27 | void (*send_IPI_mask)(const struct cpumask *mask, int vector); |
28 | void (*send_IPI_mask_allbutself)(const struct cpumask *mask, | ||
29 | int vector); | ||
26 | void (*send_IPI_allbutself)(int vector); | 30 | void (*send_IPI_allbutself)(int vector); |
27 | void (*send_IPI_all)(int vector); | 31 | void (*send_IPI_all)(int vector); |
28 | void (*send_IPI_self)(int vector); | 32 | void (*send_IPI_self)(int vector); |
29 | /* */ | 33 | /* */ |
30 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | 34 | unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); |
35 | unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, | ||
36 | const struct cpumask *andmask); | ||
31 | unsigned int (*phys_pkg_id)(int index_msb); | 37 | unsigned int (*phys_pkg_id)(int index_msb); |
32 | unsigned int (*get_apic_id)(unsigned long x); | 38 | unsigned int (*get_apic_id)(unsigned long x); |
33 | unsigned long (*set_apic_id)(unsigned int id); | 39 | unsigned long (*set_apic_id)(unsigned int id); |
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index f89dffb28aa9..c745a306f7d3 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h | |||
@@ -117,7 +117,8 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, | |||
117 | native_apic_mem_write(APIC_ICR, cfg); | 117 | native_apic_mem_write(APIC_ICR, cfg); |
118 | } | 118 | } |
119 | 119 | ||
120 | static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | 120 | static inline void send_IPI_mask_sequence(const struct cpumask *mask, |
121 | int vector) | ||
121 | { | 122 | { |
122 | unsigned long flags; | 123 | unsigned long flags; |
123 | unsigned long query_cpu; | 124 | unsigned long query_cpu; |
@@ -128,11 +129,29 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
128 | * - mbligh | 129 | * - mbligh |
129 | */ | 130 | */ |
130 | local_irq_save(flags); | 131 | local_irq_save(flags); |
131 | for_each_cpu_mask_nr(query_cpu, mask) { | 132 | for_each_cpu(query_cpu, mask) { |
132 | __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), | 133 | __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), |
133 | vector, APIC_DEST_PHYSICAL); | 134 | vector, APIC_DEST_PHYSICAL); |
134 | } | 135 | } |
135 | local_irq_restore(flags); | 136 | local_irq_restore(flags); |
136 | } | 137 | } |
137 | 138 | ||
139 | static inline void send_IPI_mask_allbutself(const struct cpumask *mask, | ||
140 | int vector) | ||
141 | { | ||
142 | unsigned long flags; | ||
143 | unsigned int query_cpu; | ||
144 | unsigned int this_cpu = smp_processor_id(); | ||
145 | |||
146 | /* See Hack comment above */ | ||
147 | |||
148 | local_irq_save(flags); | ||
149 | for_each_cpu(query_cpu, mask) | ||
150 | if (query_cpu != this_cpu) | ||
151 | __send_IPI_dest_field( | ||
152 | per_cpu(x86_cpu_to_apicid, query_cpu), | ||
153 | vector, APIC_DEST_PHYSICAL); | ||
154 | local_irq_restore(flags); | ||
155 | } | ||
156 | |||
138 | #endif /* _ASM_X86_IPI_H */ | 157 | #endif /* _ASM_X86_IPI_H */ |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 28e409fc73f3..592688ed04d3 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -33,7 +33,7 @@ static inline int irq_canonicalize(int irq) | |||
33 | 33 | ||
34 | #ifdef CONFIG_HOTPLUG_CPU | 34 | #ifdef CONFIG_HOTPLUG_CPU |
35 | #include <linux/cpumask.h> | 35 | #include <linux/cpumask.h> |
36 | extern void fixup_irqs(cpumask_t map); | 36 | extern void fixup_irqs(void); |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | extern unsigned int do_IRQ(struct pt_regs *regs); | 39 | extern unsigned int do_IRQ(struct pt_regs *regs); |
@@ -42,5 +42,6 @@ extern void native_init_IRQ(void); | |||
42 | 42 | ||
43 | /* Interrupt vector management */ | 43 | /* Interrupt vector management */ |
44 | extern DECLARE_BITMAP(used_vectors, NR_VECTORS); | 44 | extern DECLARE_BITMAP(used_vectors, NR_VECTORS); |
45 | extern int vector_used_by_percpu_irq(unsigned int vector); | ||
45 | 46 | ||
46 | #endif /* _ASM_X86_IRQ_H */ | 47 | #endif /* _ASM_X86_IRQ_H */ |
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 6cb3a467e067..cc09cbbee27e 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h | |||
@@ -8,12 +8,12 @@ | |||
8 | 8 | ||
9 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) | 9 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) |
10 | 10 | ||
11 | static inline cpumask_t target_cpus(void) | 11 | static inline const struct cpumask *target_cpus(void) |
12 | { | 12 | { |
13 | #ifdef CONFIG_SMP | 13 | #ifdef CONFIG_SMP |
14 | return cpu_online_map; | 14 | return cpu_online_mask; |
15 | #else | 15 | #else |
16 | return cpumask_of_cpu(0); | 16 | return cpumask_of(0); |
17 | #endif | 17 | #endif |
18 | } | 18 | } |
19 | 19 | ||
@@ -28,6 +28,7 @@ static inline cpumask_t target_cpus(void) | |||
28 | #define apic_id_registered (genapic->apic_id_registered) | 28 | #define apic_id_registered (genapic->apic_id_registered) |
29 | #define init_apic_ldr (genapic->init_apic_ldr) | 29 | #define init_apic_ldr (genapic->init_apic_ldr) |
30 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | 30 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) |
31 | #define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and) | ||
31 | #define phys_pkg_id (genapic->phys_pkg_id) | 32 | #define phys_pkg_id (genapic->phys_pkg_id) |
32 | #define vector_allocation_domain (genapic->vector_allocation_domain) | 33 | #define vector_allocation_domain (genapic->vector_allocation_domain) |
33 | #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) | 34 | #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) |
@@ -61,9 +62,19 @@ static inline int apic_id_registered(void) | |||
61 | return physid_isset(read_apic_id(), phys_cpu_present_map); | 62 | return physid_isset(read_apic_id(), phys_cpu_present_map); |
62 | } | 63 | } |
63 | 64 | ||
64 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 65 | static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask) |
65 | { | 66 | { |
66 | return cpus_addr(cpumask)[0]; | 67 | return cpumask_bits(cpumask)[0]; |
68 | } | ||
69 | |||
70 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
71 | const struct cpumask *andmask) | ||
72 | { | ||
73 | unsigned long mask1 = cpumask_bits(cpumask)[0]; | ||
74 | unsigned long mask2 = cpumask_bits(andmask)[0]; | ||
75 | unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; | ||
76 | |||
77 | return (unsigned int)(mask1 & mask2 & mask3); | ||
67 | } | 78 | } |
68 | 79 | ||
69 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | 80 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) |
@@ -88,7 +99,7 @@ static inline int apicid_to_node(int logical_apicid) | |||
88 | #endif | 99 | #endif |
89 | } | 100 | } |
90 | 101 | ||
91 | static inline cpumask_t vector_allocation_domain(int cpu) | 102 | static inline void vector_allocation_domain(int cpu, struct cpumask *retmask) |
92 | { | 103 | { |
93 | /* Careful. Some cpus do not strictly honor the set of cpus | 104 | /* Careful. Some cpus do not strictly honor the set of cpus |
94 | * specified in the interrupt destination when using lowest | 105 | * specified in the interrupt destination when using lowest |
@@ -98,8 +109,7 @@ static inline cpumask_t vector_allocation_domain(int cpu) | |||
98 | * deliver interrupts to the wrong hyperthread when only one | 109 | * deliver interrupts to the wrong hyperthread when only one |
99 | * hyperthread was specified in the interrupt desitination. | 110 | * hyperthread was specified in the interrupt desitination. |
100 | */ | 111 | */ |
101 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 112 | *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; |
102 | return domain; | ||
103 | } | 113 | } |
104 | #endif | 114 | #endif |
105 | 115 | ||
@@ -131,7 +141,7 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
131 | 141 | ||
132 | static inline int cpu_present_to_apicid(int mps_cpu) | 142 | static inline int cpu_present_to_apicid(int mps_cpu) |
133 | { | 143 | { |
134 | if (mps_cpu < NR_CPUS && cpu_present(mps_cpu)) | 144 | if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) |
135 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); | 145 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); |
136 | else | 146 | else |
137 | return BAD_APICID; | 147 | return BAD_APICID; |
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h index fabca01ebacf..191312d155da 100644 --- a/arch/x86/include/asm/mach-default/mach_ipi.h +++ b/arch/x86/include/asm/mach-default/mach_ipi.h | |||
@@ -4,7 +4,8 @@ | |||
4 | /* Avoid include hell */ | 4 | /* Avoid include hell */ |
5 | #define NMI_VECTOR 0x02 | 5 | #define NMI_VECTOR 0x02 |
6 | 6 | ||
7 | void send_IPI_mask_bitmask(cpumask_t mask, int vector); | 7 | void send_IPI_mask_bitmask(const struct cpumask *mask, int vector); |
8 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
8 | void __send_IPI_shortcut(unsigned int shortcut, int vector); | 9 | void __send_IPI_shortcut(unsigned int shortcut, int vector); |
9 | 10 | ||
10 | extern int no_broadcast; | 11 | extern int no_broadcast; |
@@ -12,28 +13,27 @@ extern int no_broadcast; | |||
12 | #ifdef CONFIG_X86_64 | 13 | #ifdef CONFIG_X86_64 |
13 | #include <asm/genapic.h> | 14 | #include <asm/genapic.h> |
14 | #define send_IPI_mask (genapic->send_IPI_mask) | 15 | #define send_IPI_mask (genapic->send_IPI_mask) |
16 | #define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself) | ||
15 | #else | 17 | #else |
16 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 18 | static inline void send_IPI_mask(const struct cpumask *mask, int vector) |
17 | { | 19 | { |
18 | send_IPI_mask_bitmask(mask, vector); | 20 | send_IPI_mask_bitmask(mask, vector); |
19 | } | 21 | } |
22 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
20 | #endif | 23 | #endif |
21 | 24 | ||
22 | static inline void __local_send_IPI_allbutself(int vector) | 25 | static inline void __local_send_IPI_allbutself(int vector) |
23 | { | 26 | { |
24 | if (no_broadcast || vector == NMI_VECTOR) { | 27 | if (no_broadcast || vector == NMI_VECTOR) |
25 | cpumask_t mask = cpu_online_map; | 28 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
26 | 29 | else | |
27 | cpu_clear(smp_processor_id(), mask); | ||
28 | send_IPI_mask(mask, vector); | ||
29 | } else | ||
30 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); | 30 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline void __local_send_IPI_all(int vector) | 33 | static inline void __local_send_IPI_all(int vector) |
34 | { | 34 | { |
35 | if (no_broadcast || vector == NMI_VECTOR) | 35 | if (no_broadcast || vector == NMI_VECTOR) |
36 | send_IPI_mask(cpu_online_map, vector); | 36 | send_IPI_mask(cpu_online_mask, vector); |
37 | else | 37 | else |
38 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); | 38 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); |
39 | } | 39 | } |
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index e430f47df667..48553e958ad5 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) | 24 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) |
25 | #define check_apicid_used (genapic->check_apicid_used) | 25 | #define check_apicid_used (genapic->check_apicid_used) |
26 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | 26 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) |
27 | #define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and) | ||
27 | #define vector_allocation_domain (genapic->vector_allocation_domain) | 28 | #define vector_allocation_domain (genapic->vector_allocation_domain) |
28 | #define enable_apic_mode (genapic->enable_apic_mode) | 29 | #define enable_apic_mode (genapic->enable_apic_mode) |
29 | #define phys_pkg_id (genapic->phys_pkg_id) | 30 | #define phys_pkg_id (genapic->phys_pkg_id) |
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 0bf2a06b7a4e..c80f00d29965 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h | |||
@@ -7,9 +7,9 @@ | |||
7 | 7 | ||
8 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 8 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
9 | 9 | ||
10 | static inline cpumask_t target_cpus(void) | 10 | static inline const cpumask_t *target_cpus(void) |
11 | { | 11 | { |
12 | return CPU_MASK_ALL; | 12 | return &CPU_MASK_ALL; |
13 | } | 13 | } |
14 | 14 | ||
15 | #define NO_BALANCE_IRQ (1) | 15 | #define NO_BALANCE_IRQ (1) |
@@ -122,7 +122,13 @@ static inline void enable_apic_mode(void) | |||
122 | * We use physical apicids here, not logical, so just return the default | 122 | * We use physical apicids here, not logical, so just return the default |
123 | * physical broadcast to stop people from breaking us | 123 | * physical broadcast to stop people from breaking us |
124 | */ | 124 | */ |
125 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 125 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
126 | { | ||
127 | return (int) 0xF; | ||
128 | } | ||
129 | |||
130 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
131 | const struct cpumask *andmask) | ||
126 | { | 132 | { |
127 | return (int) 0xF; | 133 | return (int) 0xF; |
128 | } | 134 | } |
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h index 935588d286cf..a8374c652778 100644 --- a/arch/x86/include/asm/numaq/ipi.h +++ b/arch/x86/include/asm/numaq/ipi.h | |||
@@ -1,25 +1,22 @@ | |||
1 | #ifndef __ASM_NUMAQ_IPI_H | 1 | #ifndef __ASM_NUMAQ_IPI_H |
2 | #define __ASM_NUMAQ_IPI_H | 2 | #define __ASM_NUMAQ_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t, int vector); | 4 | void send_IPI_mask_sequence(const struct cpumask *mask, int vector); |
5 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const struct cpumask *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
10 | 11 | ||
11 | static inline void send_IPI_allbutself(int vector) | 12 | static inline void send_IPI_allbutself(int vector) |
12 | { | 13 | { |
13 | cpumask_t mask = cpu_online_map; | 14 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | |||
16 | if (!cpus_empty(mask)) | ||
17 | send_IPI_mask(mask, vector); | ||
18 | } | 15 | } |
19 | 16 | ||
20 | static inline void send_IPI_all(int vector) | 17 | static inline void send_IPI_all(int vector) |
21 | { | 18 | { |
22 | send_IPI_mask(cpu_online_map, vector); | 19 | send_IPI_mask(cpu_online_mask, vector); |
23 | } | 20 | } |
24 | 21 | ||
25 | #endif /* __ASM_NUMAQ_IPI_H */ | 22 | #endif /* __ASM_NUMAQ_IPI_H */ |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index d12811ce51d9..830b9fcb6427 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -60,7 +60,7 @@ struct smp_ops { | |||
60 | void (*cpu_die)(unsigned int cpu); | 60 | void (*cpu_die)(unsigned int cpu); |
61 | void (*play_dead)(void); | 61 | void (*play_dead)(void); |
62 | 62 | ||
63 | void (*send_call_func_ipi)(cpumask_t mask); | 63 | void (*send_call_func_ipi)(const struct cpumask *mask); |
64 | void (*send_call_func_single_ipi)(int cpu); | 64 | void (*send_call_func_single_ipi)(int cpu); |
65 | }; | 65 | }; |
66 | 66 | ||
@@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu) | |||
125 | 125 | ||
126 | static inline void arch_send_call_function_ipi(cpumask_t mask) | 126 | static inline void arch_send_call_function_ipi(cpumask_t mask) |
127 | { | 127 | { |
128 | smp_ops.send_call_func_ipi(mask); | 128 | smp_ops.send_call_func_ipi(&mask); |
129 | } | 129 | } |
130 | 130 | ||
131 | void cpu_disable_common(void); | 131 | void cpu_disable_common(void); |
@@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu); | |||
138 | void native_play_dead(void); | 138 | void native_play_dead(void); |
139 | void play_dead_common(void); | 139 | void play_dead_common(void); |
140 | 140 | ||
141 | void native_send_call_func_ipi(cpumask_t mask); | 141 | void native_send_call_func_ipi(const struct cpumask *mask); |
142 | void native_send_call_func_single_ipi(int cpu); | 142 | void native_send_call_func_single_ipi(int cpu); |
143 | 143 | ||
144 | extern void prefill_possible_map(void); | 144 | extern void prefill_possible_map(void); |
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 9b3070f1c2ac..99327d1be49f 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h | |||
@@ -14,13 +14,13 @@ | |||
14 | 14 | ||
15 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 15 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
16 | 16 | ||
17 | static inline cpumask_t target_cpus(void) | 17 | static inline const cpumask_t *target_cpus(void) |
18 | { | 18 | { |
19 | /* CPU_MASK_ALL (0xff) has undefined behaviour with | 19 | /* CPU_MASK_ALL (0xff) has undefined behaviour with |
20 | * dest_LowestPrio mode logical clustered apic interrupt routing | 20 | * dest_LowestPrio mode logical clustered apic interrupt routing |
21 | * Just start on cpu 0. IRQ balancing will spread load | 21 | * Just start on cpu 0. IRQ balancing will spread load |
22 | */ | 22 | */ |
23 | return cpumask_of_cpu(0); | 23 | return &cpumask_of_cpu(0); |
24 | } | 24 | } |
25 | 25 | ||
26 | #define INT_DELIVERY_MODE (dest_LowestPrio) | 26 | #define INT_DELIVERY_MODE (dest_LowestPrio) |
@@ -137,14 +137,14 @@ static inline void enable_apic_mode(void) | |||
137 | { | 137 | { |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 140 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
141 | { | 141 | { |
142 | int num_bits_set; | 142 | int num_bits_set; |
143 | int cpus_found = 0; | 143 | int cpus_found = 0; |
144 | int cpu; | 144 | int cpu; |
145 | int apicid; | 145 | int apicid; |
146 | 146 | ||
147 | num_bits_set = cpus_weight(cpumask); | 147 | num_bits_set = cpus_weight(*cpumask); |
148 | /* Return id to all */ | 148 | /* Return id to all */ |
149 | if (num_bits_set == NR_CPUS) | 149 | if (num_bits_set == NR_CPUS) |
150 | return (int) 0xFF; | 150 | return (int) 0xFF; |
@@ -152,10 +152,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
152 | * The cpus in the mask must all be on the apic cluster. If are not | 152 | * The cpus in the mask must all be on the apic cluster. If are not |
153 | * on the same apicid cluster return default value of TARGET_CPUS. | 153 | * on the same apicid cluster return default value of TARGET_CPUS. |
154 | */ | 154 | */ |
155 | cpu = first_cpu(cpumask); | 155 | cpu = first_cpu(*cpumask); |
156 | apicid = cpu_to_logical_apicid(cpu); | 156 | apicid = cpu_to_logical_apicid(cpu); |
157 | while (cpus_found < num_bits_set) { | 157 | while (cpus_found < num_bits_set) { |
158 | if (cpu_isset(cpu, cpumask)) { | 158 | if (cpu_isset(cpu, *cpumask)) { |
159 | int new_apicid = cpu_to_logical_apicid(cpu); | 159 | int new_apicid = cpu_to_logical_apicid(cpu); |
160 | if (apicid_cluster(apicid) != | 160 | if (apicid_cluster(apicid) != |
161 | apicid_cluster(new_apicid)){ | 161 | apicid_cluster(new_apicid)){ |
@@ -170,6 +170,49 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
170 | return apicid; | 170 | return apicid; |
171 | } | 171 | } |
172 | 172 | ||
173 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, | ||
174 | const struct cpumask *andmask) | ||
175 | { | ||
176 | int num_bits_set; | ||
177 | int cpus_found = 0; | ||
178 | int cpu; | ||
179 | int apicid = 0xFF; | ||
180 | cpumask_var_t cpumask; | ||
181 | |||
182 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | ||
183 | return (int) 0xFF; | ||
184 | |||
185 | cpumask_and(cpumask, inmask, andmask); | ||
186 | cpumask_and(cpumask, cpumask, cpu_online_mask); | ||
187 | |||
188 | num_bits_set = cpumask_weight(cpumask); | ||
189 | /* Return id to all */ | ||
190 | if (num_bits_set == nr_cpu_ids) | ||
191 | goto exit; | ||
192 | /* | ||
193 | * The cpus in the mask must all be on the apic cluster. If are not | ||
194 | * on the same apicid cluster return default value of TARGET_CPUS. | ||
195 | */ | ||
196 | cpu = cpumask_first(cpumask); | ||
197 | apicid = cpu_to_logical_apicid(cpu); | ||
198 | while (cpus_found < num_bits_set) { | ||
199 | if (cpumask_test_cpu(cpu, cpumask)) { | ||
200 | int new_apicid = cpu_to_logical_apicid(cpu); | ||
201 | if (apicid_cluster(apicid) != | ||
202 | apicid_cluster(new_apicid)){ | ||
203 | printk ("%s: Not a valid mask!\n", __func__); | ||
204 | return 0xFF; | ||
205 | } | ||
206 | apicid = apicid | new_apicid; | ||
207 | cpus_found++; | ||
208 | } | ||
209 | cpu++; | ||
210 | } | ||
211 | exit: | ||
212 | free_cpumask_var(cpumask); | ||
213 | return apicid; | ||
214 | } | ||
215 | |||
173 | /* cpuid returns the value latched in the HW at reset, not the APIC ID | 216 | /* cpuid returns the value latched in the HW at reset, not the APIC ID |
174 | * register's value. For any box whose BIOS changes APIC IDs, like | 217 | * register's value. For any box whose BIOS changes APIC IDs, like |
175 | * clustered APIC systems, we must use hard_smp_processor_id. | 218 | * clustered APIC systems, we must use hard_smp_processor_id. |
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h index 53bd1e7bd7b4..a8a2c24f50cc 100644 --- a/arch/x86/include/asm/summit/ipi.h +++ b/arch/x86/include/asm/summit/ipi.h | |||
@@ -1,9 +1,10 @@ | |||
1 | #ifndef __ASM_SUMMIT_IPI_H | 1 | #ifndef __ASM_SUMMIT_IPI_H |
2 | #define __ASM_SUMMIT_IPI_H | 2 | #define __ASM_SUMMIT_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(const cpumask_t *mask, int vector); |
5 | void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const cpumask_t *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) | |||
14 | cpu_clear(smp_processor_id(), mask); | 15 | cpu_clear(smp_processor_id(), mask); |
15 | 16 | ||
16 | if (!cpus_empty(mask)) | 17 | if (!cpus_empty(mask)) |
17 | send_IPI_mask(mask, vector); | 18 | send_IPI_mask(&mask, vector); |
18 | } | 19 | } |
19 | 20 | ||
20 | static inline void send_IPI_all(int vector) | 21 | static inline void send_IPI_all(int vector) |
21 | { | 22 | { |
22 | send_IPI_mask(cpu_online_map, vector); | 23 | send_IPI_mask(&cpu_online_map, vector); |
23 | } | 24 | } |
24 | 25 | ||
25 | #endif /* __ASM_SUMMIT_IPI_H */ | 26 | #endif /* __ASM_SUMMIT_IPI_H */ |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index ff386ff50ed7..79e31e9dcdda 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -226,6 +226,8 @@ extern cpumask_t cpu_coregroup_map(int cpu); | |||
226 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) | 226 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) |
227 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) | 227 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) |
228 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | 228 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) |
229 | #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) | ||
230 | #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | ||
229 | 231 | ||
230 | /* indicates that pointers to the topology cpumask_t maps are valid */ | 232 | /* indicates that pointers to the topology cpumask_t maps are valid */ |
231 | #define arch_provides_topology_pointers yes | 233 | #define arch_provides_topology_pointers yes |
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index b5229affb953..6b7f824db160 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
@@ -119,8 +119,6 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); | |||
119 | 119 | ||
120 | int first_system_vector = 0xfe; | 120 | int first_system_vector = 0xfe; |
121 | 121 | ||
122 | char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; | ||
123 | |||
124 | /* | 122 | /* |
125 | * Debug level, exported for io_apic.c | 123 | * Debug level, exported for io_apic.c |
126 | */ | 124 | */ |
@@ -142,7 +140,7 @@ static int lapic_next_event(unsigned long delta, | |||
142 | struct clock_event_device *evt); | 140 | struct clock_event_device *evt); |
143 | static void lapic_timer_setup(enum clock_event_mode mode, | 141 | static void lapic_timer_setup(enum clock_event_mode mode, |
144 | struct clock_event_device *evt); | 142 | struct clock_event_device *evt); |
145 | static void lapic_timer_broadcast(cpumask_t mask); | 143 | static void lapic_timer_broadcast(const cpumask_t *mask); |
146 | static void apic_pm_activate(void); | 144 | static void apic_pm_activate(void); |
147 | 145 | ||
148 | /* | 146 | /* |
@@ -455,7 +453,7 @@ static void lapic_timer_setup(enum clock_event_mode mode, | |||
455 | /* | 453 | /* |
456 | * Local APIC timer broadcast function | 454 | * Local APIC timer broadcast function |
457 | */ | 455 | */ |
458 | static void lapic_timer_broadcast(cpumask_t mask) | 456 | static void lapic_timer_broadcast(const cpumask_t *mask) |
459 | { | 457 | { |
460 | #ifdef CONFIG_SMP | 458 | #ifdef CONFIG_SMP |
461 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); | 459 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); |
@@ -471,7 +469,7 @@ static void __cpuinit setup_APIC_timer(void) | |||
471 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 469 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
472 | 470 | ||
473 | memcpy(levt, &lapic_clockevent, sizeof(*levt)); | 471 | memcpy(levt, &lapic_clockevent, sizeof(*levt)); |
474 | levt->cpumask = cpumask_of_cpu(smp_processor_id()); | 472 | levt->cpumask = cpumask_of(smp_processor_id()); |
475 | 473 | ||
476 | clockevents_register_device(levt); | 474 | clockevents_register_device(levt); |
477 | } | 475 | } |
@@ -1807,28 +1805,32 @@ void disconnect_bsp_APIC(int virt_wire_setup) | |||
1807 | void __cpuinit generic_processor_info(int apicid, int version) | 1805 | void __cpuinit generic_processor_info(int apicid, int version) |
1808 | { | 1806 | { |
1809 | int cpu; | 1807 | int cpu; |
1810 | cpumask_t tmp_map; | ||
1811 | 1808 | ||
1812 | /* | 1809 | /* |
1813 | * Validate version | 1810 | * Validate version |
1814 | */ | 1811 | */ |
1815 | if (version == 0x0) { | 1812 | if (version == 0x0) { |
1816 | pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " | 1813 | pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " |
1817 | "fixing up to 0x10. (tell your hw vendor)\n", | 1814 | "fixing up to 0x10. (tell your hw vendor)\n", |
1818 | version); | 1815 | version); |
1819 | version = 0x10; | 1816 | version = 0x10; |
1820 | } | 1817 | } |
1821 | apic_version[apicid] = version; | 1818 | apic_version[apicid] = version; |
1822 | 1819 | ||
1823 | if (num_processors >= NR_CPUS) { | 1820 | if (num_processors >= nr_cpu_ids) { |
1824 | pr_warning("WARNING: NR_CPUS limit of %i reached." | 1821 | int max = nr_cpu_ids; |
1825 | " Processor ignored.\n", NR_CPUS); | 1822 | int thiscpu = max + disabled_cpus; |
1823 | |||
1824 | pr_warning( | ||
1825 | "ACPI: NR_CPUS/possible_cpus limit of %i reached." | ||
1826 | " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); | ||
1827 | |||
1828 | disabled_cpus++; | ||
1826 | return; | 1829 | return; |
1827 | } | 1830 | } |
1828 | 1831 | ||
1829 | num_processors++; | 1832 | num_processors++; |
1830 | cpus_complement(tmp_map, cpu_present_map); | 1833 | cpu = cpumask_next_zero(-1, cpu_present_mask); |
1831 | cpu = first_cpu(tmp_map); | ||
1832 | 1834 | ||
1833 | physid_set(apicid, phys_cpu_present_map); | 1835 | physid_set(apicid, phys_cpu_present_map); |
1834 | if (apicid == boot_cpu_physical_apicid) { | 1836 | if (apicid == boot_cpu_physical_apicid) { |
@@ -1878,8 +1880,8 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1878 | } | 1880 | } |
1879 | #endif | 1881 | #endif |
1880 | 1882 | ||
1881 | cpu_set(cpu, cpu_possible_map); | 1883 | set_cpu_possible(cpu, true); |
1882 | cpu_set(cpu, cpu_present_map); | 1884 | set_cpu_present(cpu, true); |
1883 | } | 1885 | } |
1884 | 1886 | ||
1885 | #ifdef CONFIG_X86_64 | 1887 | #ifdef CONFIG_X86_64 |
@@ -2081,7 +2083,7 @@ __cpuinit int apic_is_clustered_box(void) | |||
2081 | bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); | 2083 | bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); |
2082 | bitmap_zero(clustermap, NUM_APIC_CLUSTERS); | 2084 | bitmap_zero(clustermap, NUM_APIC_CLUSTERS); |
2083 | 2085 | ||
2084 | for (i = 0; i < NR_CPUS; i++) { | 2086 | for (i = 0; i < nr_cpu_ids; i++) { |
2085 | /* are we being called early in kernel startup? */ | 2087 | /* are we being called early in kernel startup? */ |
2086 | if (bios_cpu_apicid) { | 2088 | if (bios_cpu_apicid) { |
2087 | id = bios_cpu_apicid[i]; | 2089 | id = bios_cpu_apicid[i]; |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 68b5d8681cbb..c6ecda64f5f1 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -534,31 +534,16 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
534 | per_cpu(cpuid4_info, cpu) = NULL; | 534 | per_cpu(cpuid4_info, cpu) = NULL; |
535 | } | 535 | } |
536 | 536 | ||
537 | static int __cpuinit detect_cache_attributes(unsigned int cpu) | 537 | static void get_cpu_leaves(void *_retval) |
538 | { | 538 | { |
539 | struct _cpuid4_info *this_leaf; | 539 | int j, *retval = _retval, cpu = smp_processor_id(); |
540 | unsigned long j; | ||
541 | int retval; | ||
542 | cpumask_t oldmask; | ||
543 | |||
544 | if (num_cache_leaves == 0) | ||
545 | return -ENOENT; | ||
546 | |||
547 | per_cpu(cpuid4_info, cpu) = kzalloc( | ||
548 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | ||
549 | if (per_cpu(cpuid4_info, cpu) == NULL) | ||
550 | return -ENOMEM; | ||
551 | |||
552 | oldmask = current->cpus_allowed; | ||
553 | retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
554 | if (retval) | ||
555 | goto out; | ||
556 | 540 | ||
557 | /* Do cpuid and store the results */ | 541 | /* Do cpuid and store the results */ |
558 | for (j = 0; j < num_cache_leaves; j++) { | 542 | for (j = 0; j < num_cache_leaves; j++) { |
543 | struct _cpuid4_info *this_leaf; | ||
559 | this_leaf = CPUID4_INFO_IDX(cpu, j); | 544 | this_leaf = CPUID4_INFO_IDX(cpu, j); |
560 | retval = cpuid4_cache_lookup(j, this_leaf); | 545 | *retval = cpuid4_cache_lookup(j, this_leaf); |
561 | if (unlikely(retval < 0)) { | 546 | if (unlikely(*retval < 0)) { |
562 | int i; | 547 | int i; |
563 | 548 | ||
564 | for (i = 0; i < j; i++) | 549 | for (i = 0; i < j; i++) |
@@ -567,9 +552,21 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
567 | } | 552 | } |
568 | cache_shared_cpu_map_setup(cpu, j); | 553 | cache_shared_cpu_map_setup(cpu, j); |
569 | } | 554 | } |
570 | set_cpus_allowed_ptr(current, &oldmask); | 555 | } |
556 | |||
557 | static int __cpuinit detect_cache_attributes(unsigned int cpu) | ||
558 | { | ||
559 | int retval; | ||
560 | |||
561 | if (num_cache_leaves == 0) | ||
562 | return -ENOENT; | ||
563 | |||
564 | per_cpu(cpuid4_info, cpu) = kzalloc( | ||
565 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | ||
566 | if (per_cpu(cpuid4_info, cpu) == NULL) | ||
567 | return -ENOMEM; | ||
571 | 568 | ||
572 | out: | 569 | smp_call_function_single(cpu, get_cpu_leaves, &retval, true); |
573 | if (retval) { | 570 | if (retval) { |
574 | kfree(per_cpu(cpuid4_info, cpu)); | 571 | kfree(per_cpu(cpuid4_info, cpu)); |
575 | per_cpu(cpuid4_info, cpu) = NULL; | 572 | per_cpu(cpuid4_info, cpu) = NULL; |
@@ -626,8 +623,8 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
626 | cpumask_t *mask = &this_leaf->shared_cpu_map; | 623 | cpumask_t *mask = &this_leaf->shared_cpu_map; |
627 | 624 | ||
628 | n = type? | 625 | n = type? |
629 | cpulist_scnprintf(buf, len-2, *mask): | 626 | cpulist_scnprintf(buf, len-2, mask) : |
630 | cpumask_scnprintf(buf, len-2, *mask); | 627 | cpumask_scnprintf(buf, len-2, mask); |
631 | buf[n++] = '\n'; | 628 | buf[n++] = '\n'; |
632 | buf[n] = '\0'; | 629 | buf[n] = '\0'; |
633 | } | 630 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 748c8f9e7a05..a5a5e0530370 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -83,34 +83,41 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ | |||
83 | * CPU Initialization | 83 | * CPU Initialization |
84 | */ | 84 | */ |
85 | 85 | ||
86 | struct thresh_restart { | ||
87 | struct threshold_block *b; | ||
88 | int reset; | ||
89 | u16 old_limit; | ||
90 | }; | ||
91 | |||
86 | /* must be called with correct cpu affinity */ | 92 | /* must be called with correct cpu affinity */ |
87 | static void threshold_restart_bank(struct threshold_block *b, | 93 | static long threshold_restart_bank(void *_tr) |
88 | int reset, u16 old_limit) | ||
89 | { | 94 | { |
95 | struct thresh_restart *tr = _tr; | ||
90 | u32 mci_misc_hi, mci_misc_lo; | 96 | u32 mci_misc_hi, mci_misc_lo; |
91 | 97 | ||
92 | rdmsr(b->address, mci_misc_lo, mci_misc_hi); | 98 | rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi); |
93 | 99 | ||
94 | if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) | 100 | if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) |
95 | reset = 1; /* limit cannot be lower than err count */ | 101 | tr->reset = 1; /* limit cannot be lower than err count */ |
96 | 102 | ||
97 | if (reset) { /* reset err count and overflow bit */ | 103 | if (tr->reset) { /* reset err count and overflow bit */ |
98 | mci_misc_hi = | 104 | mci_misc_hi = |
99 | (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | | 105 | (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | |
100 | (THRESHOLD_MAX - b->threshold_limit); | 106 | (THRESHOLD_MAX - tr->b->threshold_limit); |
101 | } else if (old_limit) { /* change limit w/o reset */ | 107 | } else if (tr->old_limit) { /* change limit w/o reset */ |
102 | int new_count = (mci_misc_hi & THRESHOLD_MAX) + | 108 | int new_count = (mci_misc_hi & THRESHOLD_MAX) + |
103 | (old_limit - b->threshold_limit); | 109 | (tr->old_limit - tr->b->threshold_limit); |
104 | mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | | 110 | mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | |
105 | (new_count & THRESHOLD_MAX); | 111 | (new_count & THRESHOLD_MAX); |
106 | } | 112 | } |
107 | 113 | ||
108 | b->interrupt_enable ? | 114 | tr->b->interrupt_enable ? |
109 | (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : | 115 | (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : |
110 | (mci_misc_hi &= ~MASK_INT_TYPE_HI); | 116 | (mci_misc_hi &= ~MASK_INT_TYPE_HI); |
111 | 117 | ||
112 | mci_misc_hi |= MASK_COUNT_EN_HI; | 118 | mci_misc_hi |= MASK_COUNT_EN_HI; |
113 | wrmsr(b->address, mci_misc_lo, mci_misc_hi); | 119 | wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi); |
120 | return 0; | ||
114 | } | 121 | } |
115 | 122 | ||
116 | /* cpu init entry point, called from mce.c with preempt off */ | 123 | /* cpu init entry point, called from mce.c with preempt off */ |
@@ -120,6 +127,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
120 | unsigned int cpu = smp_processor_id(); | 127 | unsigned int cpu = smp_processor_id(); |
121 | u8 lvt_off; | 128 | u8 lvt_off; |
122 | u32 low = 0, high = 0, address = 0; | 129 | u32 low = 0, high = 0, address = 0; |
130 | struct thresh_restart tr; | ||
123 | 131 | ||
124 | for (bank = 0; bank < NR_BANKS; ++bank) { | 132 | for (bank = 0; bank < NR_BANKS; ++bank) { |
125 | for (block = 0; block < NR_BLOCKS; ++block) { | 133 | for (block = 0; block < NR_BLOCKS; ++block) { |
@@ -162,7 +170,10 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
162 | wrmsr(address, low, high); | 170 | wrmsr(address, low, high); |
163 | 171 | ||
164 | threshold_defaults.address = address; | 172 | threshold_defaults.address = address; |
165 | threshold_restart_bank(&threshold_defaults, 0, 0); | 173 | tr.b = &threshold_defaults; |
174 | tr.reset = 0; | ||
175 | tr.old_limit = 0; | ||
176 | threshold_restart_bank(&tr); | ||
166 | } | 177 | } |
167 | } | 178 | } |
168 | } | 179 | } |
@@ -251,20 +262,6 @@ struct threshold_attr { | |||
251 | ssize_t(*store) (struct threshold_block *, const char *, size_t count); | 262 | ssize_t(*store) (struct threshold_block *, const char *, size_t count); |
252 | }; | 263 | }; |
253 | 264 | ||
254 | static void affinity_set(unsigned int cpu, cpumask_t *oldmask, | ||
255 | cpumask_t *newmask) | ||
256 | { | ||
257 | *oldmask = current->cpus_allowed; | ||
258 | cpus_clear(*newmask); | ||
259 | cpu_set(cpu, *newmask); | ||
260 | set_cpus_allowed_ptr(current, newmask); | ||
261 | } | ||
262 | |||
263 | static void affinity_restore(const cpumask_t *oldmask) | ||
264 | { | ||
265 | set_cpus_allowed_ptr(current, oldmask); | ||
266 | } | ||
267 | |||
268 | #define SHOW_FIELDS(name) \ | 265 | #define SHOW_FIELDS(name) \ |
269 | static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ | 266 | static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ |
270 | { \ | 267 | { \ |
@@ -277,15 +274,16 @@ static ssize_t store_interrupt_enable(struct threshold_block *b, | |||
277 | const char *buf, size_t count) | 274 | const char *buf, size_t count) |
278 | { | 275 | { |
279 | char *end; | 276 | char *end; |
280 | cpumask_t oldmask, newmask; | 277 | struct thresh_restart tr; |
281 | unsigned long new = simple_strtoul(buf, &end, 0); | 278 | unsigned long new = simple_strtoul(buf, &end, 0); |
282 | if (end == buf) | 279 | if (end == buf) |
283 | return -EINVAL; | 280 | return -EINVAL; |
284 | b->interrupt_enable = !!new; | 281 | b->interrupt_enable = !!new; |
285 | 282 | ||
286 | affinity_set(b->cpu, &oldmask, &newmask); | 283 | tr.b = b; |
287 | threshold_restart_bank(b, 0, 0); | 284 | tr.reset = 0; |
288 | affinity_restore(&oldmask); | 285 | tr.old_limit = 0; |
286 | work_on_cpu(b->cpu, threshold_restart_bank, &tr); | ||
289 | 287 | ||
290 | return end - buf; | 288 | return end - buf; |
291 | } | 289 | } |
@@ -294,8 +292,7 @@ static ssize_t store_threshold_limit(struct threshold_block *b, | |||
294 | const char *buf, size_t count) | 292 | const char *buf, size_t count) |
295 | { | 293 | { |
296 | char *end; | 294 | char *end; |
297 | cpumask_t oldmask, newmask; | 295 | struct thresh_restart tr; |
298 | u16 old; | ||
299 | unsigned long new = simple_strtoul(buf, &end, 0); | 296 | unsigned long new = simple_strtoul(buf, &end, 0); |
300 | if (end == buf) | 297 | if (end == buf) |
301 | return -EINVAL; | 298 | return -EINVAL; |
@@ -303,34 +300,36 @@ static ssize_t store_threshold_limit(struct threshold_block *b, | |||
303 | new = THRESHOLD_MAX; | 300 | new = THRESHOLD_MAX; |
304 | if (new < 1) | 301 | if (new < 1) |
305 | new = 1; | 302 | new = 1; |
306 | old = b->threshold_limit; | 303 | tr.old_limit = b->threshold_limit; |
307 | b->threshold_limit = new; | 304 | b->threshold_limit = new; |
305 | tr.b = b; | ||
306 | tr.reset = 0; | ||
308 | 307 | ||
309 | affinity_set(b->cpu, &oldmask, &newmask); | 308 | work_on_cpu(b->cpu, threshold_restart_bank, &tr); |
310 | threshold_restart_bank(b, 0, old); | ||
311 | affinity_restore(&oldmask); | ||
312 | 309 | ||
313 | return end - buf; | 310 | return end - buf; |
314 | } | 311 | } |
315 | 312 | ||
316 | static ssize_t show_error_count(struct threshold_block *b, char *buf) | 313 | static long local_error_count(void *_b) |
317 | { | 314 | { |
318 | u32 high, low; | 315 | struct threshold_block *b = _b; |
319 | cpumask_t oldmask, newmask; | 316 | u32 low, high; |
320 | affinity_set(b->cpu, &oldmask, &newmask); | 317 | |
321 | rdmsr(b->address, low, high); | 318 | rdmsr(b->address, low, high); |
322 | affinity_restore(&oldmask); | 319 | return (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit); |
323 | return sprintf(buf, "%x\n", | 320 | } |
324 | (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); | 321 | |
322 | static ssize_t show_error_count(struct threshold_block *b, char *buf) | ||
323 | { | ||
324 | return sprintf(buf, "%lx\n", work_on_cpu(b->cpu, local_error_count, b)); | ||
325 | } | 325 | } |
326 | 326 | ||
327 | static ssize_t store_error_count(struct threshold_block *b, | 327 | static ssize_t store_error_count(struct threshold_block *b, |
328 | const char *buf, size_t count) | 328 | const char *buf, size_t count) |
329 | { | 329 | { |
330 | cpumask_t oldmask, newmask; | 330 | struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 }; |
331 | affinity_set(b->cpu, &oldmask, &newmask); | 331 | |
332 | threshold_restart_bank(b, 1, 0); | 332 | work_on_cpu(b->cpu, threshold_restart_bank, &tr); |
333 | affinity_restore(&oldmask); | ||
334 | return 1; | 333 | return 1; |
335 | } | 334 | } |
336 | 335 | ||
@@ -463,12 +462,19 @@ out_free: | |||
463 | return err; | 462 | return err; |
464 | } | 463 | } |
465 | 464 | ||
465 | static long local_allocate_threshold_blocks(void *_bank) | ||
466 | { | ||
467 | unsigned int *bank = _bank; | ||
468 | |||
469 | return allocate_threshold_blocks(smp_processor_id(), *bank, 0, | ||
470 | MSR_IA32_MC0_MISC + *bank * 4); | ||
471 | } | ||
472 | |||
466 | /* symlinks sibling shared banks to first core. first core owns dir/files. */ | 473 | /* symlinks sibling shared banks to first core. first core owns dir/files. */ |
467 | static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | 474 | static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) |
468 | { | 475 | { |
469 | int i, err = 0; | 476 | int i, err = 0; |
470 | struct threshold_bank *b = NULL; | 477 | struct threshold_bank *b = NULL; |
471 | cpumask_t oldmask, newmask; | ||
472 | char name[32]; | 478 | char name[32]; |
473 | 479 | ||
474 | sprintf(name, "threshold_bank%i", bank); | 480 | sprintf(name, "threshold_bank%i", bank); |
@@ -519,11 +525,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
519 | 525 | ||
520 | per_cpu(threshold_banks, cpu)[bank] = b; | 526 | per_cpu(threshold_banks, cpu)[bank] = b; |
521 | 527 | ||
522 | affinity_set(cpu, &oldmask, &newmask); | 528 | err = work_on_cpu(cpu, local_allocate_threshold_blocks, &bank); |
523 | err = allocate_threshold_blocks(cpu, bank, 0, | ||
524 | MSR_IA32_MC0_MISC + bank * 4); | ||
525 | affinity_restore(&oldmask); | ||
526 | |||
527 | if (err) | 529 | if (err) |
528 | goto out_free; | 530 | goto out_free; |
529 | 531 | ||
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index c0262791bda4..34185488e4fb 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c | |||
@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
30 | return 1; | 30 | return 1; |
31 | } | 31 | } |
32 | 32 | ||
33 | static cpumask_t flat_target_cpus(void) | 33 | static const struct cpumask *flat_target_cpus(void) |
34 | { | 34 | { |
35 | return cpu_online_map; | 35 | return cpu_online_mask; |
36 | } | 36 | } |
37 | 37 | ||
38 | static cpumask_t flat_vector_allocation_domain(int cpu) | 38 | static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask) |
39 | { | 39 | { |
40 | /* Careful. Some cpus do not strictly honor the set of cpus | 40 | /* Careful. Some cpus do not strictly honor the set of cpus |
41 | * specified in the interrupt destination when using lowest | 41 | * specified in the interrupt destination when using lowest |
@@ -45,8 +45,8 @@ static cpumask_t flat_vector_allocation_domain(int cpu) | |||
45 | * deliver interrupts to the wrong hyperthread when only one | 45 | * deliver interrupts to the wrong hyperthread when only one |
46 | * hyperthread was specified in the interrupt desitination. | 46 | * hyperthread was specified in the interrupt desitination. |
47 | */ | 47 | */ |
48 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 48 | cpumask_clear(retmask); |
49 | return domain; | 49 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; |
50 | } | 50 | } |
51 | 51 | ||
52 | /* | 52 | /* |
@@ -69,9 +69,8 @@ static void flat_init_apic_ldr(void) | |||
69 | apic_write(APIC_LDR, val); | 69 | apic_write(APIC_LDR, val); |
70 | } | 70 | } |
71 | 71 | ||
72 | static void flat_send_IPI_mask(cpumask_t cpumask, int vector) | 72 | static inline void _flat_send_IPI_mask(unsigned long mask, int vector) |
73 | { | 73 | { |
74 | unsigned long mask = cpus_addr(cpumask)[0]; | ||
75 | unsigned long flags; | 74 | unsigned long flags; |
76 | 75 | ||
77 | local_irq_save(flags); | 76 | local_irq_save(flags); |
@@ -79,20 +78,41 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector) | |||
79 | local_irq_restore(flags); | 78 | local_irq_restore(flags); |
80 | } | 79 | } |
81 | 80 | ||
81 | static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) | ||
82 | { | ||
83 | unsigned long mask = cpumask_bits(cpumask)[0]; | ||
84 | |||
85 | _flat_send_IPI_mask(mask, vector); | ||
86 | } | ||
87 | |||
88 | static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, | ||
89 | int vector) | ||
90 | { | ||
91 | unsigned long mask = cpumask_bits(cpumask)[0]; | ||
92 | int cpu = smp_processor_id(); | ||
93 | |||
94 | if (cpu < BITS_PER_LONG) | ||
95 | clear_bit(cpu, &mask); | ||
96 | _flat_send_IPI_mask(mask, vector); | ||
97 | } | ||
98 | |||
82 | static void flat_send_IPI_allbutself(int vector) | 99 | static void flat_send_IPI_allbutself(int vector) |
83 | { | 100 | { |
101 | int cpu = smp_processor_id(); | ||
84 | #ifdef CONFIG_HOTPLUG_CPU | 102 | #ifdef CONFIG_HOTPLUG_CPU |
85 | int hotplug = 1; | 103 | int hotplug = 1; |
86 | #else | 104 | #else |
87 | int hotplug = 0; | 105 | int hotplug = 0; |
88 | #endif | 106 | #endif |
89 | if (hotplug || vector == NMI_VECTOR) { | 107 | if (hotplug || vector == NMI_VECTOR) { |
90 | cpumask_t allbutme = cpu_online_map; | 108 | if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { |
109 | unsigned long mask = cpumask_bits(cpu_online_mask)[0]; | ||
91 | 110 | ||
92 | cpu_clear(smp_processor_id(), allbutme); | 111 | if (cpu < BITS_PER_LONG) |
112 | clear_bit(cpu, &mask); | ||
93 | 113 | ||
94 | if (!cpus_empty(allbutme)) | 114 | _flat_send_IPI_mask(mask, vector); |
95 | flat_send_IPI_mask(allbutme, vector); | 115 | } |
96 | } else if (num_online_cpus() > 1) { | 116 | } else if (num_online_cpus() > 1) { |
97 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); | 117 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); |
98 | } | 118 | } |
@@ -101,7 +121,7 @@ static void flat_send_IPI_allbutself(int vector) | |||
101 | static void flat_send_IPI_all(int vector) | 121 | static void flat_send_IPI_all(int vector) |
102 | { | 122 | { |
103 | if (vector == NMI_VECTOR) | 123 | if (vector == NMI_VECTOR) |
104 | flat_send_IPI_mask(cpu_online_map, vector); | 124 | flat_send_IPI_mask(cpu_online_mask, vector); |
105 | else | 125 | else |
106 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); | 126 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); |
107 | } | 127 | } |
@@ -135,9 +155,18 @@ static int flat_apic_id_registered(void) | |||
135 | return physid_isset(read_xapic_id(), phys_cpu_present_map); | 155 | return physid_isset(read_xapic_id(), phys_cpu_present_map); |
136 | } | 156 | } |
137 | 157 | ||
138 | static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) | 158 | static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask) |
159 | { | ||
160 | return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; | ||
161 | } | ||
162 | |||
163 | static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
164 | const struct cpumask *andmask) | ||
139 | { | 165 | { |
140 | return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; | 166 | unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; |
167 | unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS; | ||
168 | |||
169 | return mask1 & mask2; | ||
141 | } | 170 | } |
142 | 171 | ||
143 | static unsigned int phys_pkg_id(int index_msb) | 172 | static unsigned int phys_pkg_id(int index_msb) |
@@ -157,8 +186,10 @@ struct genapic apic_flat = { | |||
157 | .send_IPI_all = flat_send_IPI_all, | 186 | .send_IPI_all = flat_send_IPI_all, |
158 | .send_IPI_allbutself = flat_send_IPI_allbutself, | 187 | .send_IPI_allbutself = flat_send_IPI_allbutself, |
159 | .send_IPI_mask = flat_send_IPI_mask, | 188 | .send_IPI_mask = flat_send_IPI_mask, |
189 | .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, | ||
160 | .send_IPI_self = apic_send_IPI_self, | 190 | .send_IPI_self = apic_send_IPI_self, |
161 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, | 191 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, |
192 | .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, | ||
162 | .phys_pkg_id = phys_pkg_id, | 193 | .phys_pkg_id = phys_pkg_id, |
163 | .get_apic_id = get_apic_id, | 194 | .get_apic_id = get_apic_id, |
164 | .set_apic_id = set_apic_id, | 195 | .set_apic_id = set_apic_id, |
@@ -188,35 +219,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
188 | return 0; | 219 | return 0; |
189 | } | 220 | } |
190 | 221 | ||
191 | static cpumask_t physflat_target_cpus(void) | 222 | static const struct cpumask *physflat_target_cpus(void) |
192 | { | 223 | { |
193 | return cpu_online_map; | 224 | return cpu_online_mask; |
194 | } | 225 | } |
195 | 226 | ||
196 | static cpumask_t physflat_vector_allocation_domain(int cpu) | 227 | static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) |
197 | { | 228 | { |
198 | return cpumask_of_cpu(cpu); | 229 | cpumask_clear(retmask); |
230 | cpumask_set_cpu(cpu, retmask); | ||
199 | } | 231 | } |
200 | 232 | ||
201 | static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) | 233 | static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) |
202 | { | 234 | { |
203 | send_IPI_mask_sequence(cpumask, vector); | 235 | send_IPI_mask_sequence(cpumask, vector); |
204 | } | 236 | } |
205 | 237 | ||
206 | static void physflat_send_IPI_allbutself(int vector) | 238 | static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, |
239 | int vector) | ||
207 | { | 240 | { |
208 | cpumask_t allbutme = cpu_online_map; | 241 | send_IPI_mask_allbutself(cpumask, vector); |
242 | } | ||
209 | 243 | ||
210 | cpu_clear(smp_processor_id(), allbutme); | 244 | static void physflat_send_IPI_allbutself(int vector) |
211 | physflat_send_IPI_mask(allbutme, vector); | 245 | { |
246 | send_IPI_mask_allbutself(cpu_online_mask, vector); | ||
212 | } | 247 | } |
213 | 248 | ||
214 | static void physflat_send_IPI_all(int vector) | 249 | static void physflat_send_IPI_all(int vector) |
215 | { | 250 | { |
216 | physflat_send_IPI_mask(cpu_online_map, vector); | 251 | physflat_send_IPI_mask(cpu_online_mask, vector); |
217 | } | 252 | } |
218 | 253 | ||
219 | static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | 254 | static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask) |
220 | { | 255 | { |
221 | int cpu; | 256 | int cpu; |
222 | 257 | ||
@@ -224,13 +259,31 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | |||
224 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 259 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
225 | * May as well be the first. | 260 | * May as well be the first. |
226 | */ | 261 | */ |
227 | cpu = first_cpu(cpumask); | 262 | cpu = cpumask_first(cpumask); |
228 | if ((unsigned)cpu < nr_cpu_ids) | 263 | if ((unsigned)cpu < nr_cpu_ids) |
229 | return per_cpu(x86_cpu_to_apicid, cpu); | 264 | return per_cpu(x86_cpu_to_apicid, cpu); |
230 | else | 265 | else |
231 | return BAD_APICID; | 266 | return BAD_APICID; |
232 | } | 267 | } |
233 | 268 | ||
269 | static unsigned int | ||
270 | physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
271 | const struct cpumask *andmask) | ||
272 | { | ||
273 | int cpu; | ||
274 | |||
275 | /* | ||
276 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
277 | * May as well be the first. | ||
278 | */ | ||
279 | for_each_cpu_and(cpu, cpumask, andmask) | ||
280 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
281 | break; | ||
282 | if (cpu < nr_cpu_ids) | ||
283 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
284 | return BAD_APICID; | ||
285 | } | ||
286 | |||
234 | struct genapic apic_physflat = { | 287 | struct genapic apic_physflat = { |
235 | .name = "physical flat", | 288 | .name = "physical flat", |
236 | .acpi_madt_oem_check = physflat_acpi_madt_oem_check, | 289 | .acpi_madt_oem_check = physflat_acpi_madt_oem_check, |
@@ -243,8 +296,10 @@ struct genapic apic_physflat = { | |||
243 | .send_IPI_all = physflat_send_IPI_all, | 296 | .send_IPI_all = physflat_send_IPI_all, |
244 | .send_IPI_allbutself = physflat_send_IPI_allbutself, | 297 | .send_IPI_allbutself = physflat_send_IPI_allbutself, |
245 | .send_IPI_mask = physflat_send_IPI_mask, | 298 | .send_IPI_mask = physflat_send_IPI_mask, |
299 | .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, | ||
246 | .send_IPI_self = apic_send_IPI_self, | 300 | .send_IPI_self = apic_send_IPI_self, |
247 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, | 301 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, |
302 | .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, | ||
248 | .phys_pkg_id = phys_pkg_id, | 303 | .phys_pkg_id = phys_pkg_id, |
249 | .get_apic_id = get_apic_id, | 304 | .get_apic_id = get_apic_id, |
250 | .set_apic_id = set_apic_id, | 305 | .set_apic_id = set_apic_id, |
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index f6a2c8eb48a6..6ce497cc372d 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c | |||
@@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
22 | 22 | ||
23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
24 | 24 | ||
25 | static cpumask_t x2apic_target_cpus(void) | 25 | static const struct cpumask *x2apic_target_cpus(void) |
26 | { | 26 | { |
27 | return cpumask_of_cpu(0); | 27 | return cpumask_of(0); |
28 | } | 28 | } |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * for now each logical cpu is in its own vector allocation domain. | 31 | * for now each logical cpu is in its own vector allocation domain. |
32 | */ | 32 | */ |
33 | static cpumask_t x2apic_vector_allocation_domain(int cpu) | 33 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) |
34 | { | 34 | { |
35 | cpumask_t domain = CPU_MASK_NONE; | 35 | cpumask_clear(retmask); |
36 | cpu_set(cpu, domain); | 36 | cpumask_set_cpu(cpu, retmask); |
37 | return domain; | ||
38 | } | 37 | } |
39 | 38 | ||
40 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | 39 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, |
@@ -56,32 +55,53 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | |||
56 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register | 55 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register |
57 | * writes. | 56 | * writes. |
58 | */ | 57 | */ |
59 | static void x2apic_send_IPI_mask(cpumask_t mask, int vector) | 58 | static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) |
60 | { | 59 | { |
61 | unsigned long flags; | 60 | unsigned long flags; |
62 | unsigned long query_cpu; | 61 | unsigned long query_cpu; |
63 | 62 | ||
64 | local_irq_save(flags); | 63 | local_irq_save(flags); |
65 | for_each_cpu_mask(query_cpu, mask) { | 64 | for_each_cpu(query_cpu, mask) |
66 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), | 65 | __x2apic_send_IPI_dest( |
67 | vector, APIC_DEST_LOGICAL); | 66 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
68 | } | 67 | vector, APIC_DEST_LOGICAL); |
69 | local_irq_restore(flags); | 68 | local_irq_restore(flags); |
70 | } | 69 | } |
71 | 70 | ||
72 | static void x2apic_send_IPI_allbutself(int vector) | 71 | static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, |
72 | int vector) | ||
73 | { | 73 | { |
74 | cpumask_t mask = cpu_online_map; | 74 | unsigned long flags; |
75 | unsigned long query_cpu; | ||
76 | unsigned long this_cpu = smp_processor_id(); | ||
75 | 77 | ||
76 | cpu_clear(smp_processor_id(), mask); | 78 | local_irq_save(flags); |
79 | for_each_cpu(query_cpu, mask) | ||
80 | if (query_cpu != this_cpu) | ||
81 | __x2apic_send_IPI_dest( | ||
82 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
83 | vector, APIC_DEST_LOGICAL); | ||
84 | local_irq_restore(flags); | ||
85 | } | ||
86 | |||
87 | static void x2apic_send_IPI_allbutself(int vector) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | unsigned long query_cpu; | ||
91 | unsigned long this_cpu = smp_processor_id(); | ||
77 | 92 | ||
78 | if (!cpus_empty(mask)) | 93 | local_irq_save(flags); |
79 | x2apic_send_IPI_mask(mask, vector); | 94 | for_each_online_cpu(query_cpu) |
95 | if (query_cpu != this_cpu) | ||
96 | __x2apic_send_IPI_dest( | ||
97 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
98 | vector, APIC_DEST_LOGICAL); | ||
99 | local_irq_restore(flags); | ||
80 | } | 100 | } |
81 | 101 | ||
82 | static void x2apic_send_IPI_all(int vector) | 102 | static void x2apic_send_IPI_all(int vector) |
83 | { | 103 | { |
84 | x2apic_send_IPI_mask(cpu_online_map, vector); | 104 | x2apic_send_IPI_mask(cpu_online_mask, vector); |
85 | } | 105 | } |
86 | 106 | ||
87 | static int x2apic_apic_id_registered(void) | 107 | static int x2apic_apic_id_registered(void) |
@@ -89,21 +109,38 @@ static int x2apic_apic_id_registered(void) | |||
89 | return 1; | 109 | return 1; |
90 | } | 110 | } |
91 | 111 | ||
92 | static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | 112 | static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) |
93 | { | 113 | { |
94 | int cpu; | 114 | int cpu; |
95 | 115 | ||
96 | /* | 116 | /* |
97 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 117 | * We're using fixed IRQ delivery, can only return one logical APIC ID. |
98 | * May as well be the first. | 118 | * May as well be the first. |
99 | */ | 119 | */ |
100 | cpu = first_cpu(cpumask); | 120 | cpu = cpumask_first(cpumask); |
101 | if ((unsigned)cpu < NR_CPUS) | 121 | if ((unsigned)cpu < nr_cpu_ids) |
102 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | 122 | return per_cpu(x86_cpu_to_logical_apicid, cpu); |
103 | else | 123 | else |
104 | return BAD_APICID; | 124 | return BAD_APICID; |
105 | } | 125 | } |
106 | 126 | ||
127 | static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
128 | const struct cpumask *andmask) | ||
129 | { | ||
130 | int cpu; | ||
131 | |||
132 | /* | ||
133 | * We're using fixed IRQ delivery, can only return one logical APIC ID. | ||
134 | * May as well be the first. | ||
135 | */ | ||
136 | for_each_cpu_and(cpu, cpumask, andmask) | ||
137 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
138 | break; | ||
139 | if (cpu < nr_cpu_ids) | ||
140 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
141 | return BAD_APICID; | ||
142 | } | ||
143 | |||
107 | static unsigned int get_apic_id(unsigned long x) | 144 | static unsigned int get_apic_id(unsigned long x) |
108 | { | 145 | { |
109 | unsigned int id; | 146 | unsigned int id; |
@@ -150,8 +187,10 @@ struct genapic apic_x2apic_cluster = { | |||
150 | .send_IPI_all = x2apic_send_IPI_all, | 187 | .send_IPI_all = x2apic_send_IPI_all, |
151 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | 188 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, |
152 | .send_IPI_mask = x2apic_send_IPI_mask, | 189 | .send_IPI_mask = x2apic_send_IPI_mask, |
190 | .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, | ||
153 | .send_IPI_self = x2apic_send_IPI_self, | 191 | .send_IPI_self = x2apic_send_IPI_self, |
154 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | 192 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, |
193 | .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, | ||
155 | .phys_pkg_id = phys_pkg_id, | 194 | .phys_pkg_id = phys_pkg_id, |
156 | .get_apic_id = get_apic_id, | 195 | .get_apic_id = get_apic_id, |
157 | .set_apic_id = set_apic_id, | 196 | .set_apic_id = set_apic_id, |
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index d042211768b7..62895cf315ff 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c | |||
@@ -29,16 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
29 | 29 | ||
30 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 30 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
31 | 31 | ||
32 | static cpumask_t x2apic_target_cpus(void) | 32 | static const struct cpumask *x2apic_target_cpus(void) |
33 | { | 33 | { |
34 | return cpumask_of_cpu(0); | 34 | return cpumask_of(0); |
35 | } | 35 | } |
36 | 36 | ||
37 | static cpumask_t x2apic_vector_allocation_domain(int cpu) | 37 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) |
38 | { | 38 | { |
39 | cpumask_t domain = CPU_MASK_NONE; | 39 | cpumask_clear(retmask); |
40 | cpu_set(cpu, domain); | 40 | cpumask_set_cpu(cpu, retmask); |
41 | return domain; | ||
42 | } | 41 | } |
43 | 42 | ||
44 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | 43 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, |
@@ -54,32 +53,54 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | |||
54 | x2apic_icr_write(cfg, apicid); | 53 | x2apic_icr_write(cfg, apicid); |
55 | } | 54 | } |
56 | 55 | ||
57 | static void x2apic_send_IPI_mask(cpumask_t mask, int vector) | 56 | static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) |
58 | { | 57 | { |
59 | unsigned long flags; | 58 | unsigned long flags; |
60 | unsigned long query_cpu; | 59 | unsigned long query_cpu; |
61 | 60 | ||
62 | local_irq_save(flags); | 61 | local_irq_save(flags); |
63 | for_each_cpu_mask(query_cpu, mask) { | 62 | for_each_cpu(query_cpu, mask) { |
64 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), | 63 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), |
65 | vector, APIC_DEST_PHYSICAL); | 64 | vector, APIC_DEST_PHYSICAL); |
66 | } | 65 | } |
67 | local_irq_restore(flags); | 66 | local_irq_restore(flags); |
68 | } | 67 | } |
69 | 68 | ||
70 | static void x2apic_send_IPI_allbutself(int vector) | 69 | static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, |
70 | int vector) | ||
71 | { | 71 | { |
72 | cpumask_t mask = cpu_online_map; | 72 | unsigned long flags; |
73 | unsigned long query_cpu; | ||
74 | unsigned long this_cpu = smp_processor_id(); | ||
75 | |||
76 | local_irq_save(flags); | ||
77 | for_each_cpu(query_cpu, mask) { | ||
78 | if (query_cpu != this_cpu) | ||
79 | __x2apic_send_IPI_dest( | ||
80 | per_cpu(x86_cpu_to_apicid, query_cpu), | ||
81 | vector, APIC_DEST_PHYSICAL); | ||
82 | } | ||
83 | local_irq_restore(flags); | ||
84 | } | ||
73 | 85 | ||
74 | cpu_clear(smp_processor_id(), mask); | 86 | static void x2apic_send_IPI_allbutself(int vector) |
87 | { | ||
88 | unsigned long flags; | ||
89 | unsigned long query_cpu; | ||
90 | unsigned long this_cpu = smp_processor_id(); | ||
75 | 91 | ||
76 | if (!cpus_empty(mask)) | 92 | local_irq_save(flags); |
77 | x2apic_send_IPI_mask(mask, vector); | 93 | for_each_online_cpu(query_cpu) |
94 | if (query_cpu != this_cpu) | ||
95 | __x2apic_send_IPI_dest( | ||
96 | per_cpu(x86_cpu_to_apicid, query_cpu), | ||
97 | vector, APIC_DEST_PHYSICAL); | ||
98 | local_irq_restore(flags); | ||
78 | } | 99 | } |
79 | 100 | ||
80 | static void x2apic_send_IPI_all(int vector) | 101 | static void x2apic_send_IPI_all(int vector) |
81 | { | 102 | { |
82 | x2apic_send_IPI_mask(cpu_online_map, vector); | 103 | x2apic_send_IPI_mask(cpu_online_mask, vector); |
83 | } | 104 | } |
84 | 105 | ||
85 | static int x2apic_apic_id_registered(void) | 106 | static int x2apic_apic_id_registered(void) |
@@ -87,7 +108,7 @@ static int x2apic_apic_id_registered(void) | |||
87 | return 1; | 108 | return 1; |
88 | } | 109 | } |
89 | 110 | ||
90 | static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | 111 | static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) |
91 | { | 112 | { |
92 | int cpu; | 113 | int cpu; |
93 | 114 | ||
@@ -95,13 +116,30 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | |||
95 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 116 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
96 | * May as well be the first. | 117 | * May as well be the first. |
97 | */ | 118 | */ |
98 | cpu = first_cpu(cpumask); | 119 | cpu = cpumask_first(cpumask); |
99 | if ((unsigned)cpu < NR_CPUS) | 120 | if ((unsigned)cpu < nr_cpu_ids) |
100 | return per_cpu(x86_cpu_to_apicid, cpu); | 121 | return per_cpu(x86_cpu_to_apicid, cpu); |
101 | else | 122 | else |
102 | return BAD_APICID; | 123 | return BAD_APICID; |
103 | } | 124 | } |
104 | 125 | ||
126 | static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
127 | const struct cpumask *andmask) | ||
128 | { | ||
129 | int cpu; | ||
130 | |||
131 | /* | ||
132 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
133 | * May as well be the first. | ||
134 | */ | ||
135 | for_each_cpu_and(cpu, cpumask, andmask) | ||
136 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
137 | break; | ||
138 | if (cpu < nr_cpu_ids) | ||
139 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
140 | return BAD_APICID; | ||
141 | } | ||
142 | |||
105 | static unsigned int get_apic_id(unsigned long x) | 143 | static unsigned int get_apic_id(unsigned long x) |
106 | { | 144 | { |
107 | unsigned int id; | 145 | unsigned int id; |
@@ -145,8 +183,10 @@ struct genapic apic_x2apic_phys = { | |||
145 | .send_IPI_all = x2apic_send_IPI_all, | 183 | .send_IPI_all = x2apic_send_IPI_all, |
146 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | 184 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, |
147 | .send_IPI_mask = x2apic_send_IPI_mask, | 185 | .send_IPI_mask = x2apic_send_IPI_mask, |
186 | .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, | ||
148 | .send_IPI_self = x2apic_send_IPI_self, | 187 | .send_IPI_self = x2apic_send_IPI_self, |
149 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | 188 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, |
189 | .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, | ||
150 | .phys_pkg_id = phys_pkg_id, | 190 | .phys_pkg_id = phys_pkg_id, |
151 | .get_apic_id = get_apic_id, | 191 | .get_apic_id = get_apic_id, |
152 | .set_apic_id = set_apic_id, | 192 | .set_apic_id = set_apic_id, |
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index dece17289731..b193e082f6ce 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -79,16 +79,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); | |||
79 | 79 | ||
80 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 80 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
81 | 81 | ||
82 | static cpumask_t uv_target_cpus(void) | 82 | static const struct cpumask *uv_target_cpus(void) |
83 | { | 83 | { |
84 | return cpumask_of_cpu(0); | 84 | return cpumask_of(0); |
85 | } | 85 | } |
86 | 86 | ||
87 | static cpumask_t uv_vector_allocation_domain(int cpu) | 87 | static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) |
88 | { | 88 | { |
89 | cpumask_t domain = CPU_MASK_NONE; | 89 | cpumask_clear(retmask); |
90 | cpu_set(cpu, domain); | 90 | cpumask_set_cpu(cpu, retmask); |
91 | return domain; | ||
92 | } | 91 | } |
93 | 92 | ||
94 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) | 93 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) |
@@ -127,28 +126,37 @@ static void uv_send_IPI_one(int cpu, int vector) | |||
127 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 126 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
128 | } | 127 | } |
129 | 128 | ||
130 | static void uv_send_IPI_mask(cpumask_t mask, int vector) | 129 | static void uv_send_IPI_mask(const struct cpumask *mask, int vector) |
131 | { | 130 | { |
132 | unsigned int cpu; | 131 | unsigned int cpu; |
133 | 132 | ||
134 | for_each_possible_cpu(cpu) | 133 | for_each_cpu(cpu, mask) |
135 | if (cpu_isset(cpu, mask)) | 134 | uv_send_IPI_one(cpu, vector); |
135 | } | ||
136 | |||
137 | static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) | ||
138 | { | ||
139 | unsigned int cpu; | ||
140 | unsigned int this_cpu = smp_processor_id(); | ||
141 | |||
142 | for_each_cpu(cpu, mask) | ||
143 | if (cpu != this_cpu) | ||
136 | uv_send_IPI_one(cpu, vector); | 144 | uv_send_IPI_one(cpu, vector); |
137 | } | 145 | } |
138 | 146 | ||
139 | static void uv_send_IPI_allbutself(int vector) | 147 | static void uv_send_IPI_allbutself(int vector) |
140 | { | 148 | { |
141 | cpumask_t mask = cpu_online_map; | 149 | unsigned int cpu; |
142 | 150 | unsigned int this_cpu = smp_processor_id(); | |
143 | cpu_clear(smp_processor_id(), mask); | ||
144 | 151 | ||
145 | if (!cpus_empty(mask)) | 152 | for_each_online_cpu(cpu) |
146 | uv_send_IPI_mask(mask, vector); | 153 | if (cpu != this_cpu) |
154 | uv_send_IPI_one(cpu, vector); | ||
147 | } | 155 | } |
148 | 156 | ||
149 | static void uv_send_IPI_all(int vector) | 157 | static void uv_send_IPI_all(int vector) |
150 | { | 158 | { |
151 | uv_send_IPI_mask(cpu_online_map, vector); | 159 | uv_send_IPI_mask(cpu_online_mask, vector); |
152 | } | 160 | } |
153 | 161 | ||
154 | static int uv_apic_id_registered(void) | 162 | static int uv_apic_id_registered(void) |
@@ -160,7 +168,7 @@ static void uv_init_apic_ldr(void) | |||
160 | { | 168 | { |
161 | } | 169 | } |
162 | 170 | ||
163 | static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | 171 | static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) |
164 | { | 172 | { |
165 | int cpu; | 173 | int cpu; |
166 | 174 | ||
@@ -168,13 +176,30 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | |||
168 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 176 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
169 | * May as well be the first. | 177 | * May as well be the first. |
170 | */ | 178 | */ |
171 | cpu = first_cpu(cpumask); | 179 | cpu = cpumask_first(cpumask); |
172 | if ((unsigned)cpu < nr_cpu_ids) | 180 | if ((unsigned)cpu < nr_cpu_ids) |
173 | return per_cpu(x86_cpu_to_apicid, cpu); | 181 | return per_cpu(x86_cpu_to_apicid, cpu); |
174 | else | 182 | else |
175 | return BAD_APICID; | 183 | return BAD_APICID; |
176 | } | 184 | } |
177 | 185 | ||
186 | static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
187 | const struct cpumask *andmask) | ||
188 | { | ||
189 | int cpu; | ||
190 | |||
191 | /* | ||
192 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
193 | * May as well be the first. | ||
194 | */ | ||
195 | for_each_cpu_and(cpu, cpumask, andmask) | ||
196 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
197 | break; | ||
198 | if (cpu < nr_cpu_ids) | ||
199 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
200 | return BAD_APICID; | ||
201 | } | ||
202 | |||
178 | static unsigned int get_apic_id(unsigned long x) | 203 | static unsigned int get_apic_id(unsigned long x) |
179 | { | 204 | { |
180 | unsigned int id; | 205 | unsigned int id; |
@@ -222,8 +247,10 @@ struct genapic apic_x2apic_uv_x = { | |||
222 | .send_IPI_all = uv_send_IPI_all, | 247 | .send_IPI_all = uv_send_IPI_all, |
223 | .send_IPI_allbutself = uv_send_IPI_allbutself, | 248 | .send_IPI_allbutself = uv_send_IPI_allbutself, |
224 | .send_IPI_mask = uv_send_IPI_mask, | 249 | .send_IPI_mask = uv_send_IPI_mask, |
250 | .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, | ||
225 | .send_IPI_self = uv_send_IPI_self, | 251 | .send_IPI_self = uv_send_IPI_self, |
226 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, | 252 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, |
253 | .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, | ||
227 | .phys_pkg_id = phys_pkg_id, | 254 | .phys_pkg_id = phys_pkg_id, |
228 | .get_apic_id = get_apic_id, | 255 | .get_apic_id = get_apic_id, |
229 | .set_apic_id = set_apic_id, | 256 | .set_apic_id = set_apic_id, |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 845ea097383e..cd759ad90690 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -248,7 +248,7 @@ static void hpet_legacy_clockevent_register(void) | |||
248 | * Start hpet with the boot cpu mask and make it | 248 | * Start hpet with the boot cpu mask and make it |
249 | * global after the IO_APIC has been initialized. | 249 | * global after the IO_APIC has been initialized. |
250 | */ | 250 | */ |
251 | hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); | 251 | hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); |
252 | clockevents_register_device(&hpet_clockevent); | 252 | clockevents_register_device(&hpet_clockevent); |
253 | global_clock_event = &hpet_clockevent; | 253 | global_clock_event = &hpet_clockevent; |
254 | printk(KERN_DEBUG "hpet clockevent registered\n"); | 254 | printk(KERN_DEBUG "hpet clockevent registered\n"); |
@@ -303,7 +303,7 @@ static void hpet_set_mode(enum clock_event_mode mode, | |||
303 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | 303 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); |
304 | hpet_setup_msi_irq(hdev->irq); | 304 | hpet_setup_msi_irq(hdev->irq); |
305 | disable_irq(hdev->irq); | 305 | disable_irq(hdev->irq); |
306 | irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu)); | 306 | irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); |
307 | enable_irq(hdev->irq); | 307 | enable_irq(hdev->irq); |
308 | } | 308 | } |
309 | break; | 309 | break; |
@@ -451,7 +451,7 @@ static int hpet_setup_irq(struct hpet_dev *dev) | |||
451 | return -1; | 451 | return -1; |
452 | 452 | ||
453 | disable_irq(dev->irq); | 453 | disable_irq(dev->irq); |
454 | irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu)); | 454 | irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); |
455 | enable_irq(dev->irq); | 455 | enable_irq(dev->irq); |
456 | 456 | ||
457 | printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", | 457 | printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", |
@@ -502,7 +502,7 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) | |||
502 | /* 5 usec minimum reprogramming delta. */ | 502 | /* 5 usec minimum reprogramming delta. */ |
503 | evt->min_delta_ns = 5000; | 503 | evt->min_delta_ns = 5000; |
504 | 504 | ||
505 | evt->cpumask = cpumask_of_cpu(hdev->cpu); | 505 | evt->cpumask = cpumask_of(hdev->cpu); |
506 | clockevents_register_device(evt); | 506 | clockevents_register_device(evt); |
507 | } | 507 | } |
508 | 508 | ||
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index c1b5e3ece1f2..10f92fb532f3 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c | |||
@@ -114,7 +114,7 @@ void __init setup_pit_timer(void) | |||
114 | * Start pit with the boot cpu mask and make it global after the | 114 | * Start pit with the boot cpu mask and make it global after the |
115 | * IO_APIC has been initialized. | 115 | * IO_APIC has been initialized. |
116 | */ | 116 | */ |
117 | pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); | 117 | pit_clockevent.cpumask = cpumask_of(smp_processor_id()); |
118 | pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, | 118 | pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, |
119 | pit_clockevent.shift); | 119 | pit_clockevent.shift); |
120 | pit_clockevent.max_delta_ns = | 120 | pit_clockevent.max_delta_ns = |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 74917658b004..62ecfc991e1e 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -136,8 +136,8 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) | |||
136 | 136 | ||
137 | struct irq_cfg { | 137 | struct irq_cfg { |
138 | struct irq_pin_list *irq_2_pin; | 138 | struct irq_pin_list *irq_2_pin; |
139 | cpumask_t domain; | 139 | cpumask_var_t domain; |
140 | cpumask_t old_domain; | 140 | cpumask_var_t old_domain; |
141 | unsigned move_cleanup_count; | 141 | unsigned move_cleanup_count; |
142 | u8 vector; | 142 | u8 vector; |
143 | u8 move_in_progress : 1; | 143 | u8 move_in_progress : 1; |
@@ -152,22 +152,22 @@ static struct irq_cfg irq_cfgx[] = { | |||
152 | #else | 152 | #else |
153 | static struct irq_cfg irq_cfgx[NR_IRQS] = { | 153 | static struct irq_cfg irq_cfgx[NR_IRQS] = { |
154 | #endif | 154 | #endif |
155 | [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, | 155 | [0] = { .vector = IRQ0_VECTOR, }, |
156 | [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, | 156 | [1] = { .vector = IRQ1_VECTOR, }, |
157 | [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, | 157 | [2] = { .vector = IRQ2_VECTOR, }, |
158 | [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, | 158 | [3] = { .vector = IRQ3_VECTOR, }, |
159 | [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, | 159 | [4] = { .vector = IRQ4_VECTOR, }, |
160 | [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, | 160 | [5] = { .vector = IRQ5_VECTOR, }, |
161 | [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, | 161 | [6] = { .vector = IRQ6_VECTOR, }, |
162 | [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, | 162 | [7] = { .vector = IRQ7_VECTOR, }, |
163 | [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, | 163 | [8] = { .vector = IRQ8_VECTOR, }, |
164 | [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, | 164 | [9] = { .vector = IRQ9_VECTOR, }, |
165 | [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, | 165 | [10] = { .vector = IRQ10_VECTOR, }, |
166 | [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, | 166 | [11] = { .vector = IRQ11_VECTOR, }, |
167 | [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, | 167 | [12] = { .vector = IRQ12_VECTOR, }, |
168 | [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, | 168 | [13] = { .vector = IRQ13_VECTOR, }, |
169 | [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, | 169 | [14] = { .vector = IRQ14_VECTOR, }, |
170 | [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, | 170 | [15] = { .vector = IRQ15_VECTOR, }, |
171 | }; | 171 | }; |
172 | 172 | ||
173 | int __init arch_early_irq_init(void) | 173 | int __init arch_early_irq_init(void) |
@@ -183,6 +183,10 @@ int __init arch_early_irq_init(void) | |||
183 | for (i = 0; i < count; i++) { | 183 | for (i = 0; i < count; i++) { |
184 | desc = irq_to_desc(i); | 184 | desc = irq_to_desc(i); |
185 | desc->chip_data = &cfg[i]; | 185 | desc->chip_data = &cfg[i]; |
186 | alloc_bootmem_cpumask_var(&cfg[i].domain); | ||
187 | alloc_bootmem_cpumask_var(&cfg[i].old_domain); | ||
188 | if (i < NR_IRQS_LEGACY) | ||
189 | cpumask_setall(cfg[i].domain); | ||
186 | } | 190 | } |
187 | 191 | ||
188 | return 0; | 192 | return 0; |
@@ -209,6 +213,20 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu) | |||
209 | node = cpu_to_node(cpu); | 213 | node = cpu_to_node(cpu); |
210 | 214 | ||
211 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); | 215 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); |
216 | if (cfg) { | ||
217 | /* FIXME: needs alloc_cpumask_var_node() */ | ||
218 | if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) { | ||
219 | kfree(cfg); | ||
220 | cfg = NULL; | ||
221 | } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) { | ||
222 | free_cpumask_var(cfg->domain); | ||
223 | kfree(cfg); | ||
224 | cfg = NULL; | ||
225 | } else { | ||
226 | cpumask_clear(cfg->domain); | ||
227 | cpumask_clear(cfg->old_domain); | ||
228 | } | ||
229 | } | ||
212 | printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); | 230 | printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); |
213 | 231 | ||
214 | return cfg; | 232 | return cfg; |
@@ -333,13 +351,14 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | |||
333 | } | 351 | } |
334 | } | 352 | } |
335 | 353 | ||
336 | static void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) | 354 | static void |
355 | set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
337 | { | 356 | { |
338 | struct irq_cfg *cfg = desc->chip_data; | 357 | struct irq_cfg *cfg = desc->chip_data; |
339 | 358 | ||
340 | if (!cfg->move_in_progress) { | 359 | if (!cfg->move_in_progress) { |
341 | /* it means that domain is not changed */ | 360 | /* it means that domain is not changed */ |
342 | if (!cpus_intersects(desc->affinity, mask)) | 361 | if (!cpumask_intersects(&desc->affinity, mask)) |
343 | cfg->move_desc_pending = 1; | 362 | cfg->move_desc_pending = 1; |
344 | } | 363 | } |
345 | } | 364 | } |
@@ -354,7 +373,8 @@ static struct irq_cfg *irq_cfg(unsigned int irq) | |||
354 | #endif | 373 | #endif |
355 | 374 | ||
356 | #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC | 375 | #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC |
357 | static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) | 376 | static inline void |
377 | set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
358 | { | 378 | { |
359 | } | 379 | } |
360 | #endif | 380 | #endif |
@@ -485,6 +505,26 @@ static void ioapic_mask_entry(int apic, int pin) | |||
485 | } | 505 | } |
486 | 506 | ||
487 | #ifdef CONFIG_SMP | 507 | #ifdef CONFIG_SMP |
508 | static void send_cleanup_vector(struct irq_cfg *cfg) | ||
509 | { | ||
510 | cpumask_var_t cleanup_mask; | ||
511 | |||
512 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | ||
513 | unsigned int i; | ||
514 | cfg->move_cleanup_count = 0; | ||
515 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
516 | cfg->move_cleanup_count++; | ||
517 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
518 | send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); | ||
519 | } else { | ||
520 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | ||
521 | cfg->move_cleanup_count = cpumask_weight(cleanup_mask); | ||
522 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
523 | free_cpumask_var(cleanup_mask); | ||
524 | } | ||
525 | cfg->move_in_progress = 0; | ||
526 | } | ||
527 | |||
488 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) | 528 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) |
489 | { | 529 | { |
490 | int apic, pin; | 530 | int apic, pin; |
@@ -520,41 +560,55 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
520 | } | 560 | } |
521 | } | 561 | } |
522 | 562 | ||
523 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); | 563 | static int |
564 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); | ||
524 | 565 | ||
525 | static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) | 566 | /* |
567 | * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid | ||
568 | * of that, or returns BAD_APICID and leaves desc->affinity untouched. | ||
569 | */ | ||
570 | static unsigned int | ||
571 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) | ||
526 | { | 572 | { |
527 | struct irq_cfg *cfg; | 573 | struct irq_cfg *cfg; |
528 | unsigned long flags; | ||
529 | unsigned int dest; | ||
530 | cpumask_t tmp; | ||
531 | unsigned int irq; | 574 | unsigned int irq; |
532 | 575 | ||
533 | cpus_and(tmp, mask, cpu_online_map); | 576 | if (!cpumask_intersects(mask, cpu_online_mask)) |
534 | if (cpus_empty(tmp)) | 577 | return BAD_APICID; |
535 | return; | ||
536 | 578 | ||
537 | irq = desc->irq; | 579 | irq = desc->irq; |
538 | cfg = desc->chip_data; | 580 | cfg = desc->chip_data; |
539 | if (assign_irq_vector(irq, cfg, mask)) | 581 | if (assign_irq_vector(irq, cfg, mask)) |
540 | return; | 582 | return BAD_APICID; |
541 | 583 | ||
584 | cpumask_and(&desc->affinity, cfg->domain, mask); | ||
542 | set_extra_move_desc(desc, mask); | 585 | set_extra_move_desc(desc, mask); |
586 | return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); | ||
587 | } | ||
543 | 588 | ||
544 | cpus_and(tmp, cfg->domain, mask); | 589 | static void |
545 | dest = cpu_mask_to_apicid(tmp); | 590 | set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) |
546 | /* | 591 | { |
547 | * Only the high 8 bits are valid. | 592 | struct irq_cfg *cfg; |
548 | */ | 593 | unsigned long flags; |
549 | dest = SET_APIC_LOGICAL_ID(dest); | 594 | unsigned int dest; |
595 | unsigned int irq; | ||
596 | |||
597 | irq = desc->irq; | ||
598 | cfg = desc->chip_data; | ||
550 | 599 | ||
551 | spin_lock_irqsave(&ioapic_lock, flags); | 600 | spin_lock_irqsave(&ioapic_lock, flags); |
552 | __target_IO_APIC_irq(irq, dest, cfg); | 601 | dest = set_desc_affinity(desc, mask); |
553 | desc->affinity = mask; | 602 | if (dest != BAD_APICID) { |
603 | /* Only the high 8 bits are valid. */ | ||
604 | dest = SET_APIC_LOGICAL_ID(dest); | ||
605 | __target_IO_APIC_irq(irq, dest, cfg); | ||
606 | } | ||
554 | spin_unlock_irqrestore(&ioapic_lock, flags); | 607 | spin_unlock_irqrestore(&ioapic_lock, flags); |
555 | } | 608 | } |
556 | 609 | ||
557 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 610 | static void |
611 | set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) | ||
558 | { | 612 | { |
559 | struct irq_desc *desc; | 613 | struct irq_desc *desc; |
560 | 614 | ||
@@ -1222,7 +1276,8 @@ void unlock_vector_lock(void) | |||
1222 | spin_unlock(&vector_lock); | 1276 | spin_unlock(&vector_lock); |
1223 | } | 1277 | } |
1224 | 1278 | ||
1225 | static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | 1279 | static int |
1280 | __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
1226 | { | 1281 | { |
1227 | /* | 1282 | /* |
1228 | * NOTE! The local APIC isn't very good at handling | 1283 | * NOTE! The local APIC isn't very good at handling |
@@ -1237,49 +1292,49 @@ static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | |||
1237 | */ | 1292 | */ |
1238 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; | 1293 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; |
1239 | unsigned int old_vector; | 1294 | unsigned int old_vector; |
1240 | int cpu; | 1295 | int cpu, err; |
1296 | cpumask_var_t tmp_mask; | ||
1241 | 1297 | ||
1242 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) | 1298 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) |
1243 | return -EBUSY; | 1299 | return -EBUSY; |
1244 | 1300 | ||
1245 | /* Only try and allocate irqs on cpus that are present */ | 1301 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) |
1246 | cpus_and(mask, mask, cpu_online_map); | 1302 | return -ENOMEM; |
1247 | 1303 | ||
1248 | old_vector = cfg->vector; | 1304 | old_vector = cfg->vector; |
1249 | if (old_vector) { | 1305 | if (old_vector) { |
1250 | cpumask_t tmp; | 1306 | cpumask_and(tmp_mask, mask, cpu_online_mask); |
1251 | cpus_and(tmp, cfg->domain, mask); | 1307 | cpumask_and(tmp_mask, cfg->domain, tmp_mask); |
1252 | if (!cpus_empty(tmp)) | 1308 | if (!cpumask_empty(tmp_mask)) { |
1309 | free_cpumask_var(tmp_mask); | ||
1253 | return 0; | 1310 | return 0; |
1311 | } | ||
1254 | } | 1312 | } |
1255 | 1313 | ||
1256 | for_each_cpu_mask_nr(cpu, mask) { | 1314 | /* Only try and allocate irqs on cpus that are present */ |
1257 | cpumask_t domain, new_mask; | 1315 | err = -ENOSPC; |
1316 | for_each_cpu_and(cpu, mask, cpu_online_mask) { | ||
1258 | int new_cpu; | 1317 | int new_cpu; |
1259 | int vector, offset; | 1318 | int vector, offset; |
1260 | 1319 | ||
1261 | domain = vector_allocation_domain(cpu); | 1320 | vector_allocation_domain(cpu, tmp_mask); |
1262 | cpus_and(new_mask, domain, cpu_online_map); | ||
1263 | 1321 | ||
1264 | vector = current_vector; | 1322 | vector = current_vector; |
1265 | offset = current_offset; | 1323 | offset = current_offset; |
1266 | next: | 1324 | next: |
1267 | vector += 8; | 1325 | vector += 8; |
1268 | if (vector >= first_system_vector) { | 1326 | if (vector >= first_system_vector) { |
1269 | /* If we run out of vectors on large boxen, must share them. */ | 1327 | /* If out of vectors on large boxen, must share them. */ |
1270 | offset = (offset + 1) % 8; | 1328 | offset = (offset + 1) % 8; |
1271 | vector = FIRST_DEVICE_VECTOR + offset; | 1329 | vector = FIRST_DEVICE_VECTOR + offset; |
1272 | } | 1330 | } |
1273 | if (unlikely(current_vector == vector)) | 1331 | if (unlikely(current_vector == vector)) |
1274 | continue; | 1332 | continue; |
1275 | #ifdef CONFIG_X86_64 | 1333 | |
1276 | if (vector == IA32_SYSCALL_VECTOR) | 1334 | if (test_bit(vector, used_vectors)) |
1277 | goto next; | ||
1278 | #else | ||
1279 | if (vector == SYSCALL_VECTOR) | ||
1280 | goto next; | 1335 | goto next; |
1281 | #endif | 1336 | |
1282 | for_each_cpu_mask_nr(new_cpu, new_mask) | 1337 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) |
1283 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 1338 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
1284 | goto next; | 1339 | goto next; |
1285 | /* Found one! */ | 1340 | /* Found one! */ |
@@ -1287,18 +1342,21 @@ next: | |||
1287 | current_offset = offset; | 1342 | current_offset = offset; |
1288 | if (old_vector) { | 1343 | if (old_vector) { |
1289 | cfg->move_in_progress = 1; | 1344 | cfg->move_in_progress = 1; |
1290 | cfg->old_domain = cfg->domain; | 1345 | cpumask_copy(cfg->old_domain, cfg->domain); |
1291 | } | 1346 | } |
1292 | for_each_cpu_mask_nr(new_cpu, new_mask) | 1347 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) |
1293 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 1348 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
1294 | cfg->vector = vector; | 1349 | cfg->vector = vector; |
1295 | cfg->domain = domain; | 1350 | cpumask_copy(cfg->domain, tmp_mask); |
1296 | return 0; | 1351 | err = 0; |
1352 | break; | ||
1297 | } | 1353 | } |
1298 | return -ENOSPC; | 1354 | free_cpumask_var(tmp_mask); |
1355 | return err; | ||
1299 | } | 1356 | } |
1300 | 1357 | ||
1301 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | 1358 | static int |
1359 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
1302 | { | 1360 | { |
1303 | int err; | 1361 | int err; |
1304 | unsigned long flags; | 1362 | unsigned long flags; |
@@ -1311,23 +1369,20 @@ static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | |||
1311 | 1369 | ||
1312 | static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | 1370 | static void __clear_irq_vector(int irq, struct irq_cfg *cfg) |
1313 | { | 1371 | { |
1314 | cpumask_t mask; | ||
1315 | int cpu, vector; | 1372 | int cpu, vector; |
1316 | 1373 | ||
1317 | BUG_ON(!cfg->vector); | 1374 | BUG_ON(!cfg->vector); |
1318 | 1375 | ||
1319 | vector = cfg->vector; | 1376 | vector = cfg->vector; |
1320 | cpus_and(mask, cfg->domain, cpu_online_map); | 1377 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) |
1321 | for_each_cpu_mask_nr(cpu, mask) | ||
1322 | per_cpu(vector_irq, cpu)[vector] = -1; | 1378 | per_cpu(vector_irq, cpu)[vector] = -1; |
1323 | 1379 | ||
1324 | cfg->vector = 0; | 1380 | cfg->vector = 0; |
1325 | cpus_clear(cfg->domain); | 1381 | cpumask_clear(cfg->domain); |
1326 | 1382 | ||
1327 | if (likely(!cfg->move_in_progress)) | 1383 | if (likely(!cfg->move_in_progress)) |
1328 | return; | 1384 | return; |
1329 | cpus_and(mask, cfg->old_domain, cpu_online_map); | 1385 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { |
1330 | for_each_cpu_mask_nr(cpu, mask) { | ||
1331 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | 1386 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; |
1332 | vector++) { | 1387 | vector++) { |
1333 | if (per_cpu(vector_irq, cpu)[vector] != irq) | 1388 | if (per_cpu(vector_irq, cpu)[vector] != irq) |
@@ -1350,7 +1405,7 @@ void __setup_vector_irq(int cpu) | |||
1350 | /* Mark the inuse vectors */ | 1405 | /* Mark the inuse vectors */ |
1351 | for_each_irq_desc(irq, desc) { | 1406 | for_each_irq_desc(irq, desc) { |
1352 | cfg = desc->chip_data; | 1407 | cfg = desc->chip_data; |
1353 | if (!cpu_isset(cpu, cfg->domain)) | 1408 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
1354 | continue; | 1409 | continue; |
1355 | vector = cfg->vector; | 1410 | vector = cfg->vector; |
1356 | per_cpu(vector_irq, cpu)[vector] = irq; | 1411 | per_cpu(vector_irq, cpu)[vector] = irq; |
@@ -1362,7 +1417,7 @@ void __setup_vector_irq(int cpu) | |||
1362 | continue; | 1417 | continue; |
1363 | 1418 | ||
1364 | cfg = irq_cfg(irq); | 1419 | cfg = irq_cfg(irq); |
1365 | if (!cpu_isset(cpu, cfg->domain)) | 1420 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
1366 | per_cpu(vector_irq, cpu)[vector] = -1; | 1421 | per_cpu(vector_irq, cpu)[vector] = -1; |
1367 | } | 1422 | } |
1368 | } | 1423 | } |
@@ -1498,18 +1553,17 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de | |||
1498 | { | 1553 | { |
1499 | struct irq_cfg *cfg; | 1554 | struct irq_cfg *cfg; |
1500 | struct IO_APIC_route_entry entry; | 1555 | struct IO_APIC_route_entry entry; |
1501 | cpumask_t mask; | 1556 | unsigned int dest; |
1502 | 1557 | ||
1503 | if (!IO_APIC_IRQ(irq)) | 1558 | if (!IO_APIC_IRQ(irq)) |
1504 | return; | 1559 | return; |
1505 | 1560 | ||
1506 | cfg = desc->chip_data; | 1561 | cfg = desc->chip_data; |
1507 | 1562 | ||
1508 | mask = TARGET_CPUS; | 1563 | if (assign_irq_vector(irq, cfg, TARGET_CPUS)) |
1509 | if (assign_irq_vector(irq, cfg, mask)) | ||
1510 | return; | 1564 | return; |
1511 | 1565 | ||
1512 | cpus_and(mask, cfg->domain, mask); | 1566 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
1513 | 1567 | ||
1514 | apic_printk(APIC_VERBOSE,KERN_DEBUG | 1568 | apic_printk(APIC_VERBOSE,KERN_DEBUG |
1515 | "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " | 1569 | "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " |
@@ -1519,8 +1573,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de | |||
1519 | 1573 | ||
1520 | 1574 | ||
1521 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, | 1575 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, |
1522 | cpu_mask_to_apicid(mask), trigger, polarity, | 1576 | dest, trigger, polarity, cfg->vector)) { |
1523 | cfg->vector)) { | ||
1524 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", | 1577 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", |
1525 | mp_ioapics[apic].mp_apicid, pin); | 1578 | mp_ioapics[apic].mp_apicid, pin); |
1526 | __clear_irq_vector(irq, cfg); | 1579 | __clear_irq_vector(irq, cfg); |
@@ -2240,7 +2293,7 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2240 | unsigned long flags; | 2293 | unsigned long flags; |
2241 | 2294 | ||
2242 | spin_lock_irqsave(&vector_lock, flags); | 2295 | spin_lock_irqsave(&vector_lock, flags); |
2243 | send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); | 2296 | send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); |
2244 | spin_unlock_irqrestore(&vector_lock, flags); | 2297 | spin_unlock_irqrestore(&vector_lock, flags); |
2245 | 2298 | ||
2246 | return 1; | 2299 | return 1; |
@@ -2289,18 +2342,17 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); | |||
2289 | * as simple as edge triggered migration and we can do the irq migration | 2342 | * as simple as edge triggered migration and we can do the irq migration |
2290 | * with a simple atomic update to IO-APIC RTE. | 2343 | * with a simple atomic update to IO-APIC RTE. |
2291 | */ | 2344 | */ |
2292 | static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | 2345 | static void |
2346 | migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
2293 | { | 2347 | { |
2294 | struct irq_cfg *cfg; | 2348 | struct irq_cfg *cfg; |
2295 | cpumask_t tmp, cleanup_mask; | ||
2296 | struct irte irte; | 2349 | struct irte irte; |
2297 | int modify_ioapic_rte; | 2350 | int modify_ioapic_rte; |
2298 | unsigned int dest; | 2351 | unsigned int dest; |
2299 | unsigned long flags; | 2352 | unsigned long flags; |
2300 | unsigned int irq; | 2353 | unsigned int irq; |
2301 | 2354 | ||
2302 | cpus_and(tmp, mask, cpu_online_map); | 2355 | if (!cpumask_intersects(mask, cpu_online_mask)) |
2303 | if (cpus_empty(tmp)) | ||
2304 | return; | 2356 | return; |
2305 | 2357 | ||
2306 | irq = desc->irq; | 2358 | irq = desc->irq; |
@@ -2313,8 +2365,7 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | |||
2313 | 2365 | ||
2314 | set_extra_move_desc(desc, mask); | 2366 | set_extra_move_desc(desc, mask); |
2315 | 2367 | ||
2316 | cpus_and(tmp, cfg->domain, mask); | 2368 | dest = cpu_mask_to_apicid_and(cfg->domain, mask); |
2317 | dest = cpu_mask_to_apicid(tmp); | ||
2318 | 2369 | ||
2319 | modify_ioapic_rte = desc->status & IRQ_LEVEL; | 2370 | modify_ioapic_rte = desc->status & IRQ_LEVEL; |
2320 | if (modify_ioapic_rte) { | 2371 | if (modify_ioapic_rte) { |
@@ -2331,14 +2382,10 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | |||
2331 | */ | 2382 | */ |
2332 | modify_irte(irq, &irte); | 2383 | modify_irte(irq, &irte); |
2333 | 2384 | ||
2334 | if (cfg->move_in_progress) { | 2385 | if (cfg->move_in_progress) |
2335 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 2386 | send_cleanup_vector(cfg); |
2336 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
2337 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
2338 | cfg->move_in_progress = 0; | ||
2339 | } | ||
2340 | 2387 | ||
2341 | desc->affinity = mask; | 2388 | cpumask_copy(&desc->affinity, mask); |
2342 | } | 2389 | } |
2343 | 2390 | ||
2344 | static int migrate_irq_remapped_level_desc(struct irq_desc *desc) | 2391 | static int migrate_irq_remapped_level_desc(struct irq_desc *desc) |
@@ -2360,11 +2407,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc) | |||
2360 | } | 2407 | } |
2361 | 2408 | ||
2362 | /* everthing is clear. we have right of way */ | 2409 | /* everthing is clear. we have right of way */ |
2363 | migrate_ioapic_irq_desc(desc, desc->pending_mask); | 2410 | migrate_ioapic_irq_desc(desc, &desc->pending_mask); |
2364 | 2411 | ||
2365 | ret = 0; | 2412 | ret = 0; |
2366 | desc->status &= ~IRQ_MOVE_PENDING; | 2413 | desc->status &= ~IRQ_MOVE_PENDING; |
2367 | cpus_clear(desc->pending_mask); | 2414 | cpumask_clear(&desc->pending_mask); |
2368 | 2415 | ||
2369 | unmask: | 2416 | unmask: |
2370 | unmask_IO_APIC_irq_desc(desc); | 2417 | unmask_IO_APIC_irq_desc(desc); |
@@ -2389,7 +2436,7 @@ static void ir_irq_migration(struct work_struct *work) | |||
2389 | continue; | 2436 | continue; |
2390 | } | 2437 | } |
2391 | 2438 | ||
2392 | desc->chip->set_affinity(irq, desc->pending_mask); | 2439 | desc->chip->set_affinity(irq, &desc->pending_mask); |
2393 | spin_unlock_irqrestore(&desc->lock, flags); | 2440 | spin_unlock_irqrestore(&desc->lock, flags); |
2394 | } | 2441 | } |
2395 | } | 2442 | } |
@@ -2398,18 +2445,20 @@ static void ir_irq_migration(struct work_struct *work) | |||
2398 | /* | 2445 | /* |
2399 | * Migrates the IRQ destination in the process context. | 2446 | * Migrates the IRQ destination in the process context. |
2400 | */ | 2447 | */ |
2401 | static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) | 2448 | static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, |
2449 | const struct cpumask *mask) | ||
2402 | { | 2450 | { |
2403 | if (desc->status & IRQ_LEVEL) { | 2451 | if (desc->status & IRQ_LEVEL) { |
2404 | desc->status |= IRQ_MOVE_PENDING; | 2452 | desc->status |= IRQ_MOVE_PENDING; |
2405 | desc->pending_mask = mask; | 2453 | cpumask_copy(&desc->pending_mask, mask); |
2406 | migrate_irq_remapped_level_desc(desc); | 2454 | migrate_irq_remapped_level_desc(desc); |
2407 | return; | 2455 | return; |
2408 | } | 2456 | } |
2409 | 2457 | ||
2410 | migrate_ioapic_irq_desc(desc, mask); | 2458 | migrate_ioapic_irq_desc(desc, mask); |
2411 | } | 2459 | } |
2412 | static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 2460 | static void set_ir_ioapic_affinity_irq(unsigned int irq, |
2461 | const struct cpumask *mask) | ||
2413 | { | 2462 | { |
2414 | struct irq_desc *desc = irq_to_desc(irq); | 2463 | struct irq_desc *desc = irq_to_desc(irq); |
2415 | 2464 | ||
@@ -2444,7 +2493,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2444 | if (!cfg->move_cleanup_count) | 2493 | if (!cfg->move_cleanup_count) |
2445 | goto unlock; | 2494 | goto unlock; |
2446 | 2495 | ||
2447 | if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) | 2496 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
2448 | goto unlock; | 2497 | goto unlock; |
2449 | 2498 | ||
2450 | __get_cpu_var(vector_irq)[vector] = -1; | 2499 | __get_cpu_var(vector_irq)[vector] = -1; |
@@ -2481,20 +2530,14 @@ static void irq_complete_move(struct irq_desc **descp) | |||
2481 | 2530 | ||
2482 | vector = ~get_irq_regs()->orig_ax; | 2531 | vector = ~get_irq_regs()->orig_ax; |
2483 | me = smp_processor_id(); | 2532 | me = smp_processor_id(); |
2484 | if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) { | ||
2485 | cpumask_t cleanup_mask; | ||
2486 | |||
2487 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | 2533 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC |
2488 | *descp = desc = move_irq_desc(desc, me); | 2534 | *descp = desc = move_irq_desc(desc, me); |
2489 | /* get the new one */ | 2535 | /* get the new one */ |
2490 | cfg = desc->chip_data; | 2536 | cfg = desc->chip_data; |
2491 | #endif | 2537 | #endif |
2492 | 2538 | ||
2493 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 2539 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
2494 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | 2540 | send_cleanup_vector(cfg); |
2495 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
2496 | cfg->move_in_progress = 0; | ||
2497 | } | ||
2498 | } | 2541 | } |
2499 | #else | 2542 | #else |
2500 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2543 | static inline void irq_complete_move(struct irq_desc **descp) {} |
@@ -3216,16 +3259,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
3216 | struct irq_cfg *cfg; | 3259 | struct irq_cfg *cfg; |
3217 | int err; | 3260 | int err; |
3218 | unsigned dest; | 3261 | unsigned dest; |
3219 | cpumask_t tmp; | ||
3220 | 3262 | ||
3221 | cfg = irq_cfg(irq); | 3263 | cfg = irq_cfg(irq); |
3222 | tmp = TARGET_CPUS; | 3264 | err = assign_irq_vector(irq, cfg, TARGET_CPUS); |
3223 | err = assign_irq_vector(irq, cfg, tmp); | ||
3224 | if (err) | 3265 | if (err) |
3225 | return err; | 3266 | return err; |
3226 | 3267 | ||
3227 | cpus_and(tmp, cfg->domain, tmp); | 3268 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
3228 | dest = cpu_mask_to_apicid(tmp); | ||
3229 | 3269 | ||
3230 | #ifdef CONFIG_INTR_REMAP | 3270 | #ifdef CONFIG_INTR_REMAP |
3231 | if (irq_remapped(irq)) { | 3271 | if (irq_remapped(irq)) { |
@@ -3279,26 +3319,18 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
3279 | } | 3319 | } |
3280 | 3320 | ||
3281 | #ifdef CONFIG_SMP | 3321 | #ifdef CONFIG_SMP |
3282 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3322 | static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3283 | { | 3323 | { |
3284 | struct irq_desc *desc = irq_to_desc(irq); | 3324 | struct irq_desc *desc = irq_to_desc(irq); |
3285 | struct irq_cfg *cfg; | 3325 | struct irq_cfg *cfg; |
3286 | struct msi_msg msg; | 3326 | struct msi_msg msg; |
3287 | unsigned int dest; | 3327 | unsigned int dest; |
3288 | cpumask_t tmp; | ||
3289 | 3328 | ||
3290 | cpus_and(tmp, mask, cpu_online_map); | 3329 | dest = set_desc_affinity(desc, mask); |
3291 | if (cpus_empty(tmp)) | 3330 | if (dest == BAD_APICID) |
3292 | return; | 3331 | return; |
3293 | 3332 | ||
3294 | cfg = desc->chip_data; | 3333 | cfg = desc->chip_data; |
3295 | if (assign_irq_vector(irq, cfg, mask)) | ||
3296 | return; | ||
3297 | |||
3298 | set_extra_move_desc(desc, mask); | ||
3299 | |||
3300 | cpus_and(tmp, cfg->domain, mask); | ||
3301 | dest = cpu_mask_to_apicid(tmp); | ||
3302 | 3334 | ||
3303 | read_msi_msg_desc(desc, &msg); | 3335 | read_msi_msg_desc(desc, &msg); |
3304 | 3336 | ||
@@ -3308,37 +3340,27 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3308 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3340 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3309 | 3341 | ||
3310 | write_msi_msg_desc(desc, &msg); | 3342 | write_msi_msg_desc(desc, &msg); |
3311 | desc->affinity = mask; | ||
3312 | } | 3343 | } |
3313 | #ifdef CONFIG_INTR_REMAP | 3344 | #ifdef CONFIG_INTR_REMAP |
3314 | /* | 3345 | /* |
3315 | * Migrate the MSI irq to another cpumask. This migration is | 3346 | * Migrate the MSI irq to another cpumask. This migration is |
3316 | * done in the process context using interrupt-remapping hardware. | 3347 | * done in the process context using interrupt-remapping hardware. |
3317 | */ | 3348 | */ |
3318 | static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3349 | static void |
3350 | ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | ||
3319 | { | 3351 | { |
3320 | struct irq_desc *desc = irq_to_desc(irq); | 3352 | struct irq_desc *desc = irq_to_desc(irq); |
3321 | struct irq_cfg *cfg; | 3353 | struct irq_cfg *cfg = desc->chip_data; |
3322 | unsigned int dest; | 3354 | unsigned int dest; |
3323 | cpumask_t tmp, cleanup_mask; | ||
3324 | struct irte irte; | 3355 | struct irte irte; |
3325 | 3356 | ||
3326 | cpus_and(tmp, mask, cpu_online_map); | ||
3327 | if (cpus_empty(tmp)) | ||
3328 | return; | ||
3329 | |||
3330 | if (get_irte(irq, &irte)) | 3357 | if (get_irte(irq, &irte)) |
3331 | return; | 3358 | return; |
3332 | 3359 | ||
3333 | cfg = desc->chip_data; | 3360 | dest = set_desc_affinity(desc, mask); |
3334 | if (assign_irq_vector(irq, cfg, mask)) | 3361 | if (dest == BAD_APICID) |
3335 | return; | 3362 | return; |
3336 | 3363 | ||
3337 | set_extra_move_desc(desc, mask); | ||
3338 | |||
3339 | cpus_and(tmp, cfg->domain, mask); | ||
3340 | dest = cpu_mask_to_apicid(tmp); | ||
3341 | |||
3342 | irte.vector = cfg->vector; | 3364 | irte.vector = cfg->vector; |
3343 | irte.dest_id = IRTE_DEST(dest); | 3365 | irte.dest_id = IRTE_DEST(dest); |
3344 | 3366 | ||
@@ -3352,14 +3374,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3352 | * at the new destination. So, time to cleanup the previous | 3374 | * at the new destination. So, time to cleanup the previous |
3353 | * vector allocation. | 3375 | * vector allocation. |
3354 | */ | 3376 | */ |
3355 | if (cfg->move_in_progress) { | 3377 | if (cfg->move_in_progress) |
3356 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 3378 | send_cleanup_vector(cfg); |
3357 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
3358 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
3359 | cfg->move_in_progress = 0; | ||
3360 | } | ||
3361 | |||
3362 | desc->affinity = mask; | ||
3363 | } | 3379 | } |
3364 | 3380 | ||
3365 | #endif | 3381 | #endif |
@@ -3550,26 +3566,18 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
3550 | 3566 | ||
3551 | #ifdef CONFIG_DMAR | 3567 | #ifdef CONFIG_DMAR |
3552 | #ifdef CONFIG_SMP | 3568 | #ifdef CONFIG_SMP |
3553 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3569 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3554 | { | 3570 | { |
3555 | struct irq_desc *desc = irq_to_desc(irq); | 3571 | struct irq_desc *desc = irq_to_desc(irq); |
3556 | struct irq_cfg *cfg; | 3572 | struct irq_cfg *cfg; |
3557 | struct msi_msg msg; | 3573 | struct msi_msg msg; |
3558 | unsigned int dest; | 3574 | unsigned int dest; |
3559 | cpumask_t tmp; | ||
3560 | 3575 | ||
3561 | cpus_and(tmp, mask, cpu_online_map); | 3576 | dest = set_desc_affinity(desc, mask); |
3562 | if (cpus_empty(tmp)) | 3577 | if (dest == BAD_APICID) |
3563 | return; | 3578 | return; |
3564 | 3579 | ||
3565 | cfg = desc->chip_data; | 3580 | cfg = desc->chip_data; |
3566 | if (assign_irq_vector(irq, cfg, mask)) | ||
3567 | return; | ||
3568 | |||
3569 | set_extra_move_desc(desc, mask); | ||
3570 | |||
3571 | cpus_and(tmp, cfg->domain, mask); | ||
3572 | dest = cpu_mask_to_apicid(tmp); | ||
3573 | 3581 | ||
3574 | dmar_msi_read(irq, &msg); | 3582 | dmar_msi_read(irq, &msg); |
3575 | 3583 | ||
@@ -3579,7 +3587,6 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3579 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3587 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3580 | 3588 | ||
3581 | dmar_msi_write(irq, &msg); | 3589 | dmar_msi_write(irq, &msg); |
3582 | desc->affinity = mask; | ||
3583 | } | 3590 | } |
3584 | 3591 | ||
3585 | #endif /* CONFIG_SMP */ | 3592 | #endif /* CONFIG_SMP */ |
@@ -3613,26 +3620,18 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3613 | #ifdef CONFIG_HPET_TIMER | 3620 | #ifdef CONFIG_HPET_TIMER |
3614 | 3621 | ||
3615 | #ifdef CONFIG_SMP | 3622 | #ifdef CONFIG_SMP |
3616 | static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3623 | static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3617 | { | 3624 | { |
3618 | struct irq_desc *desc = irq_to_desc(irq); | 3625 | struct irq_desc *desc = irq_to_desc(irq); |
3619 | struct irq_cfg *cfg; | 3626 | struct irq_cfg *cfg; |
3620 | struct msi_msg msg; | 3627 | struct msi_msg msg; |
3621 | unsigned int dest; | 3628 | unsigned int dest; |
3622 | cpumask_t tmp; | ||
3623 | 3629 | ||
3624 | cpus_and(tmp, mask, cpu_online_map); | 3630 | dest = set_desc_affinity(desc, mask); |
3625 | if (cpus_empty(tmp)) | 3631 | if (dest == BAD_APICID) |
3626 | return; | 3632 | return; |
3627 | 3633 | ||
3628 | cfg = desc->chip_data; | 3634 | cfg = desc->chip_data; |
3629 | if (assign_irq_vector(irq, cfg, mask)) | ||
3630 | return; | ||
3631 | |||
3632 | set_extra_move_desc(desc, mask); | ||
3633 | |||
3634 | cpus_and(tmp, cfg->domain, mask); | ||
3635 | dest = cpu_mask_to_apicid(tmp); | ||
3636 | 3635 | ||
3637 | hpet_msi_read(irq, &msg); | 3636 | hpet_msi_read(irq, &msg); |
3638 | 3637 | ||
@@ -3642,7 +3641,6 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3642 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3641 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3643 | 3642 | ||
3644 | hpet_msi_write(irq, &msg); | 3643 | hpet_msi_write(irq, &msg); |
3645 | desc->affinity = mask; | ||
3646 | } | 3644 | } |
3647 | 3645 | ||
3648 | #endif /* CONFIG_SMP */ | 3646 | #endif /* CONFIG_SMP */ |
@@ -3697,28 +3695,19 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
3697 | write_ht_irq_msg(irq, &msg); | 3695 | write_ht_irq_msg(irq, &msg); |
3698 | } | 3696 | } |
3699 | 3697 | ||
3700 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | 3698 | static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3701 | { | 3699 | { |
3702 | struct irq_desc *desc = irq_to_desc(irq); | 3700 | struct irq_desc *desc = irq_to_desc(irq); |
3703 | struct irq_cfg *cfg; | 3701 | struct irq_cfg *cfg; |
3704 | unsigned int dest; | 3702 | unsigned int dest; |
3705 | cpumask_t tmp; | ||
3706 | 3703 | ||
3707 | cpus_and(tmp, mask, cpu_online_map); | 3704 | dest = set_desc_affinity(desc, mask); |
3708 | if (cpus_empty(tmp)) | 3705 | if (dest == BAD_APICID) |
3709 | return; | 3706 | return; |
3710 | 3707 | ||
3711 | cfg = desc->chip_data; | 3708 | cfg = desc->chip_data; |
3712 | if (assign_irq_vector(irq, cfg, mask)) | ||
3713 | return; | ||
3714 | |||
3715 | set_extra_move_desc(desc, mask); | ||
3716 | |||
3717 | cpus_and(tmp, cfg->domain, mask); | ||
3718 | dest = cpu_mask_to_apicid(tmp); | ||
3719 | 3709 | ||
3720 | target_ht_irq(irq, dest, cfg->vector); | 3710 | target_ht_irq(irq, dest, cfg->vector); |
3721 | desc->affinity = mask; | ||
3722 | } | 3711 | } |
3723 | 3712 | ||
3724 | #endif | 3713 | #endif |
@@ -3738,17 +3727,14 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3738 | { | 3727 | { |
3739 | struct irq_cfg *cfg; | 3728 | struct irq_cfg *cfg; |
3740 | int err; | 3729 | int err; |
3741 | cpumask_t tmp; | ||
3742 | 3730 | ||
3743 | cfg = irq_cfg(irq); | 3731 | cfg = irq_cfg(irq); |
3744 | tmp = TARGET_CPUS; | 3732 | err = assign_irq_vector(irq, cfg, TARGET_CPUS); |
3745 | err = assign_irq_vector(irq, cfg, tmp); | ||
3746 | if (!err) { | 3733 | if (!err) { |
3747 | struct ht_irq_msg msg; | 3734 | struct ht_irq_msg msg; |
3748 | unsigned dest; | 3735 | unsigned dest; |
3749 | 3736 | ||
3750 | cpus_and(tmp, cfg->domain, tmp); | 3737 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
3751 | dest = cpu_mask_to_apicid(tmp); | ||
3752 | 3738 | ||
3753 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); | 3739 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); |
3754 | 3740 | ||
@@ -3784,7 +3770,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3784 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | 3770 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, |
3785 | unsigned long mmr_offset) | 3771 | unsigned long mmr_offset) |
3786 | { | 3772 | { |
3787 | const cpumask_t *eligible_cpu = get_cpu_mask(cpu); | 3773 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
3788 | struct irq_cfg *cfg; | 3774 | struct irq_cfg *cfg; |
3789 | int mmr_pnode; | 3775 | int mmr_pnode; |
3790 | unsigned long mmr_value; | 3776 | unsigned long mmr_value; |
@@ -3794,7 +3780,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
3794 | 3780 | ||
3795 | cfg = irq_cfg(irq); | 3781 | cfg = irq_cfg(irq); |
3796 | 3782 | ||
3797 | err = assign_irq_vector(irq, cfg, *eligible_cpu); | 3783 | err = assign_irq_vector(irq, cfg, eligible_cpu); |
3798 | if (err != 0) | 3784 | if (err != 0) |
3799 | return err; | 3785 | return err; |
3800 | 3786 | ||
@@ -3813,7 +3799,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
3813 | entry->polarity = 0; | 3799 | entry->polarity = 0; |
3814 | entry->trigger = 0; | 3800 | entry->trigger = 0; |
3815 | entry->mask = 0; | 3801 | entry->mask = 0; |
3816 | entry->dest = cpu_mask_to_apicid(*eligible_cpu); | 3802 | entry->dest = cpu_mask_to_apicid(eligible_cpu); |
3817 | 3803 | ||
3818 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | 3804 | mmr_pnode = uv_blade_to_pnode(mmr_blade); |
3819 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 3805 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
@@ -4024,7 +4010,7 @@ void __init setup_ioapic_dest(void) | |||
4024 | int pin, ioapic, irq, irq_entry; | 4010 | int pin, ioapic, irq, irq_entry; |
4025 | struct irq_desc *desc; | 4011 | struct irq_desc *desc; |
4026 | struct irq_cfg *cfg; | 4012 | struct irq_cfg *cfg; |
4027 | cpumask_t mask; | 4013 | const struct cpumask *mask; |
4028 | 4014 | ||
4029 | if (skip_ioapic_setup == 1) | 4015 | if (skip_ioapic_setup == 1) |
4030 | return; | 4016 | return; |
@@ -4055,7 +4041,7 @@ void __init setup_ioapic_dest(void) | |||
4055 | */ | 4041 | */ |
4056 | if (desc->status & | 4042 | if (desc->status & |
4057 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | 4043 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) |
4058 | mask = desc->affinity; | 4044 | mask = &desc->affinity; |
4059 | else | 4045 | else |
4060 | mask = TARGET_CPUS; | 4046 | mask = TARGET_CPUS; |
4061 | 4047 | ||
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index f1c688e46f35..285bbf8831fa 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c | |||
@@ -116,18 +116,18 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector) | |||
116 | /* | 116 | /* |
117 | * This is only used on smaller machines. | 117 | * This is only used on smaller machines. |
118 | */ | 118 | */ |
119 | void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) | 119 | void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) |
120 | { | 120 | { |
121 | unsigned long mask = cpus_addr(cpumask)[0]; | 121 | unsigned long mask = cpumask_bits(cpumask)[0]; |
122 | unsigned long flags; | 122 | unsigned long flags; |
123 | 123 | ||
124 | local_irq_save(flags); | 124 | local_irq_save(flags); |
125 | WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); | 125 | WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); |
126 | __send_IPI_dest_field(mask, vector); | 126 | __send_IPI_dest_field(mask, vector); |
127 | local_irq_restore(flags); | 127 | local_irq_restore(flags); |
128 | } | 128 | } |
129 | 129 | ||
130 | void send_IPI_mask_sequence(cpumask_t mask, int vector) | 130 | void send_IPI_mask_sequence(const struct cpumask *mask, int vector) |
131 | { | 131 | { |
132 | unsigned long flags; | 132 | unsigned long flags; |
133 | unsigned int query_cpu; | 133 | unsigned int query_cpu; |
@@ -139,12 +139,24 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
139 | */ | 139 | */ |
140 | 140 | ||
141 | local_irq_save(flags); | 141 | local_irq_save(flags); |
142 | for_each_possible_cpu(query_cpu) { | 142 | for_each_cpu(query_cpu, mask) |
143 | if (cpu_isset(query_cpu, mask)) { | 143 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); |
144 | local_irq_restore(flags); | ||
145 | } | ||
146 | |||
147 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) | ||
148 | { | ||
149 | unsigned long flags; | ||
150 | unsigned int query_cpu; | ||
151 | unsigned int this_cpu = smp_processor_id(); | ||
152 | |||
153 | /* See Hack comment above */ | ||
154 | |||
155 | local_irq_save(flags); | ||
156 | for_each_cpu(query_cpu, mask) | ||
157 | if (query_cpu != this_cpu) | ||
144 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), | 158 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), |
145 | vector); | 159 | vector); |
146 | } | ||
147 | } | ||
148 | local_irq_restore(flags); | 160 | local_irq_restore(flags); |
149 | } | 161 | } |
150 | 162 | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 3f1d9d18df67..bce53e1352a0 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <asm/apic.h> | 9 | #include <asm/apic.h> |
10 | #include <asm/io_apic.h> | 10 | #include <asm/io_apic.h> |
11 | #include <asm/smp.h> | 11 | #include <asm/smp.h> |
12 | #include <asm/irq.h> | ||
12 | 13 | ||
13 | atomic_t irq_err_count; | 14 | atomic_t irq_err_count; |
14 | 15 | ||
@@ -190,3 +191,5 @@ u64 arch_irq_stat(void) | |||
190 | #endif | 191 | #endif |
191 | return sum; | 192 | return sum; |
192 | } | 193 | } |
194 | |||
195 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); | ||
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 119fc9c8ff7f..9dc5588f336a 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -233,27 +233,28 @@ unsigned int do_IRQ(struct pt_regs *regs) | |||
233 | #ifdef CONFIG_HOTPLUG_CPU | 233 | #ifdef CONFIG_HOTPLUG_CPU |
234 | #include <mach_apic.h> | 234 | #include <mach_apic.h> |
235 | 235 | ||
236 | void fixup_irqs(cpumask_t map) | 236 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ |
237 | void fixup_irqs(void) | ||
237 | { | 238 | { |
238 | unsigned int irq; | 239 | unsigned int irq; |
239 | static int warned; | 240 | static int warned; |
240 | struct irq_desc *desc; | 241 | struct irq_desc *desc; |
241 | 242 | ||
242 | for_each_irq_desc(irq, desc) { | 243 | for_each_irq_desc(irq, desc) { |
243 | cpumask_t mask; | 244 | const struct cpumask *affinity; |
244 | 245 | ||
245 | if (!desc) | 246 | if (!desc) |
246 | continue; | 247 | continue; |
247 | if (irq == 2) | 248 | if (irq == 2) |
248 | continue; | 249 | continue; |
249 | 250 | ||
250 | cpus_and(mask, desc->affinity, map); | 251 | affinity = &desc->affinity; |
251 | if (any_online_cpu(mask) == NR_CPUS) { | 252 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
252 | printk("Breaking affinity for irq %i\n", irq); | 253 | printk("Breaking affinity for irq %i\n", irq); |
253 | mask = map; | 254 | affinity = cpu_all_mask; |
254 | } | 255 | } |
255 | if (desc->chip->set_affinity) | 256 | if (desc->chip->set_affinity) |
256 | desc->chip->set_affinity(irq, mask); | 257 | desc->chip->set_affinity(irq, affinity); |
257 | else if (desc->action && !(warned++)) | 258 | else if (desc->action && !(warned++)) |
258 | printk("Cannot set affinity for irq %i\n", irq); | 259 | printk("Cannot set affinity for irq %i\n", irq); |
259 | } | 260 | } |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index a174a217eb1a..6383d50f82ea 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -80,16 +80,17 @@ asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |||
80 | } | 80 | } |
81 | 81 | ||
82 | #ifdef CONFIG_HOTPLUG_CPU | 82 | #ifdef CONFIG_HOTPLUG_CPU |
83 | void fixup_irqs(cpumask_t map) | 83 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ |
84 | void fixup_irqs(void) | ||
84 | { | 85 | { |
85 | unsigned int irq; | 86 | unsigned int irq; |
86 | static int warned; | 87 | static int warned; |
87 | struct irq_desc *desc; | 88 | struct irq_desc *desc; |
88 | 89 | ||
89 | for_each_irq_desc(irq, desc) { | 90 | for_each_irq_desc(irq, desc) { |
90 | cpumask_t mask; | ||
91 | int break_affinity = 0; | 91 | int break_affinity = 0; |
92 | int set_affinity = 1; | 92 | int set_affinity = 1; |
93 | const struct cpumask *affinity; | ||
93 | 94 | ||
94 | if (!desc) | 95 | if (!desc) |
95 | continue; | 96 | continue; |
@@ -99,23 +100,23 @@ void fixup_irqs(cpumask_t map) | |||
99 | /* interrupt's are disabled at this point */ | 100 | /* interrupt's are disabled at this point */ |
100 | spin_lock(&desc->lock); | 101 | spin_lock(&desc->lock); |
101 | 102 | ||
103 | affinity = &desc->affinity; | ||
102 | if (!irq_has_action(irq) || | 104 | if (!irq_has_action(irq) || |
103 | cpus_equal(desc->affinity, map)) { | 105 | cpumask_equal(affinity, cpu_online_mask)) { |
104 | spin_unlock(&desc->lock); | 106 | spin_unlock(&desc->lock); |
105 | continue; | 107 | continue; |
106 | } | 108 | } |
107 | 109 | ||
108 | cpus_and(mask, desc->affinity, map); | 110 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
109 | if (cpus_empty(mask)) { | ||
110 | break_affinity = 1; | 111 | break_affinity = 1; |
111 | mask = map; | 112 | affinity = cpu_all_mask; |
112 | } | 113 | } |
113 | 114 | ||
114 | if (desc->chip->mask) | 115 | if (desc->chip->mask) |
115 | desc->chip->mask(irq); | 116 | desc->chip->mask(irq); |
116 | 117 | ||
117 | if (desc->chip->set_affinity) | 118 | if (desc->chip->set_affinity) |
118 | desc->chip->set_affinity(irq, mask); | 119 | desc->chip->set_affinity(irq, affinity); |
119 | else if (!(warned++)) | 120 | else if (!(warned++)) |
120 | set_affinity = 0; | 121 | set_affinity = 0; |
121 | 122 | ||
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 203384ed2b5d..84723295f88a 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c | |||
@@ -110,6 +110,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | |||
110 | [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 | 110 | [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 |
111 | }; | 111 | }; |
112 | 112 | ||
113 | int vector_used_by_percpu_irq(unsigned int vector) | ||
114 | { | ||
115 | int cpu; | ||
116 | |||
117 | for_each_online_cpu(cpu) { | ||
118 | if (per_cpu(vector_irq, cpu)[vector] != -1) | ||
119 | return 1; | ||
120 | } | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
113 | /* Overridden in paravirt.c */ | 125 | /* Overridden in paravirt.c */ |
114 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | 126 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); |
115 | 127 | ||
@@ -146,10 +158,12 @@ void __init native_init_IRQ(void) | |||
146 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 158 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
147 | 159 | ||
148 | /* IPI for single call function */ | 160 | /* IPI for single call function */ |
149 | set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); | 161 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, |
162 | call_function_single_interrupt); | ||
150 | 163 | ||
151 | /* Low priority IPI to cleanup after moving an irq */ | 164 | /* Low priority IPI to cleanup after moving an irq */ |
152 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 165 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
166 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); | ||
153 | #endif | 167 | #endif |
154 | 168 | ||
155 | #ifdef CONFIG_X86_LOCAL_APIC | 169 | #ifdef CONFIG_X86_LOCAL_APIC |
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 6190e6ef546c..31ebfe38e96c 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c | |||
@@ -69,6 +69,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | |||
69 | [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 | 69 | [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 |
70 | }; | 70 | }; |
71 | 71 | ||
72 | int vector_used_by_percpu_irq(unsigned int vector) | ||
73 | { | ||
74 | int cpu; | ||
75 | |||
76 | for_each_online_cpu(cpu) { | ||
77 | if (per_cpu(vector_irq, cpu)[vector] != -1) | ||
78 | return 1; | ||
79 | } | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
72 | void __init init_ISA_irqs(void) | 84 | void __init init_ISA_irqs(void) |
73 | { | 85 | { |
74 | int i; | 86 | int i; |
@@ -121,6 +133,7 @@ static void __init smp_intr_init(void) | |||
121 | 133 | ||
122 | /* Low priority IPI to cleanup after moving an irq */ | 134 | /* Low priority IPI to cleanup after moving an irq */ |
123 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 135 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
136 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); | ||
124 | #endif | 137 | #endif |
125 | } | 138 | } |
126 | 139 | ||
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index 3b599518c322..c12314c9e86f 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c | |||
@@ -287,7 +287,7 @@ static struct clock_event_device mfgpt_clockevent = { | |||
287 | .set_mode = mfgpt_set_mode, | 287 | .set_mode = mfgpt_set_mode, |
288 | .set_next_event = mfgpt_next_event, | 288 | .set_next_event = mfgpt_next_event, |
289 | .rating = 250, | 289 | .rating = 250, |
290 | .cpumask = CPU_MASK_ALL, | 290 | .cpumask = cpu_all_mask, |
291 | .shift = 32 | 291 | .shift = 32 |
292 | }; | 292 | }; |
293 | 293 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 72e0e4e712d6..39643b1df061 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -650,10 +650,7 @@ static int crash_nmi_callback(struct notifier_block *self, | |||
650 | 650 | ||
651 | static void smp_send_nmi_allbutself(void) | 651 | static void smp_send_nmi_allbutself(void) |
652 | { | 652 | { |
653 | cpumask_t mask = cpu_online_map; | 653 | send_IPI_allbutself(NMI_VECTOR); |
654 | cpu_clear(safe_smp_processor_id(), mask); | ||
655 | if (!cpus_empty(mask)) | ||
656 | send_IPI_mask(mask, NMI_VECTOR); | ||
657 | } | 654 | } |
658 | 655 | ||
659 | static struct notifier_block crash_nmi_nb = { | 656 | static struct notifier_block crash_nmi_nb = { |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index ae0c0d3bb770..0b63b08e7530 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -152,6 +152,11 @@ void __init setup_per_cpu_areas(void) | |||
152 | old_size = PERCPU_ENOUGH_ROOM; | 152 | old_size = PERCPU_ENOUGH_ROOM; |
153 | align = max_t(unsigned long, PAGE_SIZE, align); | 153 | align = max_t(unsigned long, PAGE_SIZE, align); |
154 | size = roundup(old_size, align); | 154 | size = roundup(old_size, align); |
155 | |||
156 | printk(KERN_INFO | ||
157 | "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", | ||
158 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); | ||
159 | |||
155 | printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", | 160 | printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", |
156 | size); | 161 | size); |
157 | 162 | ||
@@ -168,24 +173,24 @@ void __init setup_per_cpu_areas(void) | |||
168 | "cpu %d has no node %d or node-local memory\n", | 173 | "cpu %d has no node %d or node-local memory\n", |
169 | cpu, node); | 174 | cpu, node); |
170 | if (ptr) | 175 | if (ptr) |
171 | printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n", | 176 | printk(KERN_DEBUG |
177 | "per cpu data for cpu%d at %016lx\n", | ||
172 | cpu, __pa(ptr)); | 178 | cpu, __pa(ptr)); |
173 | } | 179 | } |
174 | else { | 180 | else { |
175 | ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, | 181 | ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, |
176 | __pa(MAX_DMA_ADDRESS)); | 182 | __pa(MAX_DMA_ADDRESS)); |
177 | if (ptr) | 183 | if (ptr) |
178 | printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", | 184 | printk(KERN_DEBUG |
179 | cpu, node, __pa(ptr)); | 185 | "per cpu data for cpu%d on node%d " |
186 | "at %016lx\n", | ||
187 | cpu, node, __pa(ptr)); | ||
180 | } | 188 | } |
181 | #endif | 189 | #endif |
182 | per_cpu_offset(cpu) = ptr - __per_cpu_start; | 190 | per_cpu_offset(cpu) = ptr - __per_cpu_start; |
183 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 191 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); |
184 | } | 192 | } |
185 | 193 | ||
186 | printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", | ||
187 | NR_CPUS, nr_cpu_ids, nr_node_ids); | ||
188 | |||
189 | /* Setup percpu data maps */ | 194 | /* Setup percpu data maps */ |
190 | setup_per_cpu_maps(); | 195 | setup_per_cpu_maps(); |
191 | 196 | ||
@@ -282,7 +287,7 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable) | |||
282 | else | 287 | else |
283 | cpu_clear(cpu, *mask); | 288 | cpu_clear(cpu, *mask); |
284 | 289 | ||
285 | cpulist_scnprintf(buf, sizeof(buf), *mask); | 290 | cpulist_scnprintf(buf, sizeof(buf), mask); |
286 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | 291 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", |
287 | enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); | 292 | enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); |
288 | } | 293 | } |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 7e558db362c1..beea2649a240 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -118,22 +118,22 @@ static void native_smp_send_reschedule(int cpu) | |||
118 | WARN_ON(1); | 118 | WARN_ON(1); |
119 | return; | 119 | return; |
120 | } | 120 | } |
121 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | 121 | send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); |
122 | } | 122 | } |
123 | 123 | ||
124 | void native_send_call_func_single_ipi(int cpu) | 124 | void native_send_call_func_single_ipi(int cpu) |
125 | { | 125 | { |
126 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); | 126 | send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); |
127 | } | 127 | } |
128 | 128 | ||
129 | void native_send_call_func_ipi(cpumask_t mask) | 129 | void native_send_call_func_ipi(const struct cpumask *mask) |
130 | { | 130 | { |
131 | cpumask_t allbutself; | 131 | cpumask_t allbutself; |
132 | 132 | ||
133 | allbutself = cpu_online_map; | 133 | allbutself = cpu_online_map; |
134 | cpu_clear(smp_processor_id(), allbutself); | 134 | cpu_clear(smp_processor_id(), allbutself); |
135 | 135 | ||
136 | if (cpus_equal(mask, allbutself) && | 136 | if (cpus_equal(*mask, allbutself) && |
137 | cpus_equal(cpu_online_map, cpu_callout_map)) | 137 | cpus_equal(cpu_online_map, cpu_callout_map)) |
138 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | 138 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); |
139 | else | 139 | else |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f8500c969442..31869bf5fabd 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -102,14 +102,8 @@ EXPORT_SYMBOL(smp_num_siblings); | |||
102 | /* Last level cache ID of each logical CPU */ | 102 | /* Last level cache ID of each logical CPU */ |
103 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; | 103 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; |
104 | 104 | ||
105 | /* bitmap of online cpus */ | ||
106 | cpumask_t cpu_online_map __read_mostly; | ||
107 | EXPORT_SYMBOL(cpu_online_map); | ||
108 | |||
109 | cpumask_t cpu_callin_map; | 105 | cpumask_t cpu_callin_map; |
110 | cpumask_t cpu_callout_map; | 106 | cpumask_t cpu_callout_map; |
111 | cpumask_t cpu_possible_map; | ||
112 | EXPORT_SYMBOL(cpu_possible_map); | ||
113 | 107 | ||
114 | /* representing HT siblings of each logical CPU */ | 108 | /* representing HT siblings of each logical CPU */ |
115 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); | 109 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); |
@@ -1260,6 +1254,15 @@ void __init native_smp_cpus_done(unsigned int max_cpus) | |||
1260 | check_nmi_watchdog(); | 1254 | check_nmi_watchdog(); |
1261 | } | 1255 | } |
1262 | 1256 | ||
1257 | static int __initdata setup_possible_cpus = -1; | ||
1258 | static int __init _setup_possible_cpus(char *str) | ||
1259 | { | ||
1260 | get_option(&str, &setup_possible_cpus); | ||
1261 | return 0; | ||
1262 | } | ||
1263 | early_param("possible_cpus", _setup_possible_cpus); | ||
1264 | |||
1265 | |||
1263 | /* | 1266 | /* |
1264 | * cpu_possible_map should be static, it cannot change as cpu's | 1267 | * cpu_possible_map should be static, it cannot change as cpu's |
1265 | * are onlined, or offlined. The reason is per-cpu data-structures | 1268 | * are onlined, or offlined. The reason is per-cpu data-structures |
@@ -1272,7 +1275,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus) | |||
1272 | * | 1275 | * |
1273 | * Three ways to find out the number of additional hotplug CPUs: | 1276 | * Three ways to find out the number of additional hotplug CPUs: |
1274 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. | 1277 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. |
1275 | * - The user can overwrite it with additional_cpus=NUM | 1278 | * - The user can overwrite it with possible_cpus=NUM |
1276 | * - Otherwise don't reserve additional CPUs. | 1279 | * - Otherwise don't reserve additional CPUs. |
1277 | * We do this because additional CPUs waste a lot of memory. | 1280 | * We do this because additional CPUs waste a lot of memory. |
1278 | * -AK | 1281 | * -AK |
@@ -1285,9 +1288,17 @@ __init void prefill_possible_map(void) | |||
1285 | if (!num_processors) | 1288 | if (!num_processors) |
1286 | num_processors = 1; | 1289 | num_processors = 1; |
1287 | 1290 | ||
1288 | possible = num_processors + disabled_cpus; | 1291 | if (setup_possible_cpus == -1) |
1289 | if (possible > NR_CPUS) | 1292 | possible = num_processors + disabled_cpus; |
1290 | possible = NR_CPUS; | 1293 | else |
1294 | possible = setup_possible_cpus; | ||
1295 | |||
1296 | if (possible > CONFIG_NR_CPUS) { | ||
1297 | printk(KERN_WARNING | ||
1298 | "%d Processors exceeds NR_CPUS limit of %d\n", | ||
1299 | possible, CONFIG_NR_CPUS); | ||
1300 | possible = CONFIG_NR_CPUS; | ||
1301 | } | ||
1291 | 1302 | ||
1292 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", | 1303 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", |
1293 | possible, max_t(int, possible - num_processors, 0)); | 1304 | possible, max_t(int, possible - num_processors, 0)); |
@@ -1352,7 +1363,7 @@ void cpu_disable_common(void) | |||
1352 | lock_vector_lock(); | 1363 | lock_vector_lock(); |
1353 | remove_cpu_from_maps(cpu); | 1364 | remove_cpu_from_maps(cpu); |
1354 | unlock_vector_lock(); | 1365 | unlock_vector_lock(); |
1355 | fixup_irqs(cpu_online_map); | 1366 | fixup_irqs(); |
1356 | } | 1367 | } |
1357 | 1368 | ||
1358 | int native_cpu_disable(void) | 1369 | int native_cpu_disable(void) |
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index 8da059f949be..ce5054642247 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c | |||
@@ -163,7 +163,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | |||
163 | * We have to send the IPI only to | 163 | * We have to send the IPI only to |
164 | * CPUs affected. | 164 | * CPUs affected. |
165 | */ | 165 | */ |
166 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); | 166 | send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR); |
167 | 167 | ||
168 | while (!cpus_empty(flush_cpumask)) | 168 | while (!cpus_empty(flush_cpumask)) |
169 | /* nothing. lockup detection does not belong here */ | 169 | /* nothing. lockup detection does not belong here */ |
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 29887d7081a9..f8be6f1d2e48 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c | |||
@@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | |||
191 | * We have to send the IPI only to | 191 | * We have to send the IPI only to |
192 | * CPUs affected. | 192 | * CPUs affected. |
193 | */ | 193 | */ |
194 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); | 194 | send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender); |
195 | 195 | ||
196 | while (!cpus_empty(f->flush_cpumask)) | 196 | while (!cpus_empty(f->flush_cpumask)) |
197 | cpu_relax(); | 197 | cpu_relax(); |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 141907ab6e22..2d1f4c7e4052 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -72,9 +72,6 @@ | |||
72 | 72 | ||
73 | #include "cpu/mcheck/mce.h" | 73 | #include "cpu/mcheck/mce.h" |
74 | 74 | ||
75 | DECLARE_BITMAP(used_vectors, NR_VECTORS); | ||
76 | EXPORT_SYMBOL_GPL(used_vectors); | ||
77 | |||
78 | asmlinkage int system_call(void); | 75 | asmlinkage int system_call(void); |
79 | 76 | ||
80 | /* Do we ignore FPU interrupts ? */ | 77 | /* Do we ignore FPU interrupts ? */ |
@@ -89,6 +86,9 @@ gate_desc idt_table[256] | |||
89 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; | 86 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; |
90 | #endif | 87 | #endif |
91 | 88 | ||
89 | DECLARE_BITMAP(used_vectors, NR_VECTORS); | ||
90 | EXPORT_SYMBOL_GPL(used_vectors); | ||
91 | |||
92 | static int ignore_nmis; | 92 | static int ignore_nmis; |
93 | 93 | ||
94 | static inline void conditional_sti(struct pt_regs *regs) | 94 | static inline void conditional_sti(struct pt_regs *regs) |
@@ -941,9 +941,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) | |||
941 | 941 | ||
942 | void __init trap_init(void) | 942 | void __init trap_init(void) |
943 | { | 943 | { |
944 | #ifdef CONFIG_X86_32 | ||
945 | int i; | 944 | int i; |
946 | #endif | ||
947 | 945 | ||
948 | #ifdef CONFIG_EISA | 946 | #ifdef CONFIG_EISA |
949 | void __iomem *p = early_ioremap(0x0FFFD9, 4); | 947 | void __iomem *p = early_ioremap(0x0FFFD9, 4); |
@@ -1000,11 +998,15 @@ void __init trap_init(void) | |||
1000 | } | 998 | } |
1001 | 999 | ||
1002 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); | 1000 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); |
1001 | #endif | ||
1003 | 1002 | ||
1004 | /* Reserve all the builtin and the syscall vector: */ | 1003 | /* Reserve all the builtin and the syscall vector: */ |
1005 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) | 1004 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) |
1006 | set_bit(i, used_vectors); | 1005 | set_bit(i, used_vectors); |
1007 | 1006 | ||
1007 | #ifdef CONFIG_X86_64 | ||
1008 | set_bit(IA32_SYSCALL_VECTOR, used_vectors); | ||
1009 | #else | ||
1008 | set_bit(SYSCALL_VECTOR, used_vectors); | 1010 | set_bit(SYSCALL_VECTOR, used_vectors); |
1009 | #endif | 1011 | #endif |
1010 | /* | 1012 | /* |
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index 254ee07f8635..c4c1f9e09402 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -226,7 +226,7 @@ static void __devinit vmi_time_init_clockevent(void) | |||
226 | /* Upper bound is clockevent's use of ulong for cycle deltas. */ | 226 | /* Upper bound is clockevent's use of ulong for cycle deltas. */ |
227 | evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); | 227 | evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); |
228 | evt->min_delta_ns = clockevent_delta2ns(1, evt); | 228 | evt->min_delta_ns = clockevent_delta2ns(1, evt); |
229 | evt->cpumask = cpumask_of_cpu(cpu); | 229 | evt->cpumask = cpumask_of(cpu); |
230 | 230 | ||
231 | printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", | 231 | printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", |
232 | evt->name, evt->mult, evt->shift); | 232 | evt->name, evt->mult, evt->shift); |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 50a779264bb1..a7ed208f81e3 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -738,7 +738,7 @@ static void lguest_time_init(void) | |||
738 | 738 | ||
739 | /* We can't set cpumask in the initializer: damn C limitations! Set it | 739 | /* We can't set cpumask in the initializer: damn C limitations! Set it |
740 | * here and register our timer device. */ | 740 | * here and register our timer device. */ |
741 | lguest_clockevent.cpumask = cpumask_of_cpu(0); | 741 | lguest_clockevent.cpumask = cpumask_of(0); |
742 | clockevents_register_device(&lguest_clockevent); | 742 | clockevents_register_device(&lguest_clockevent); |
743 | 743 | ||
744 | /* Finally, we unblock the timer interrupt. */ | 744 | /* Finally, we unblock the timer interrupt. */ |
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 3624a364b7f3..bc4c7840b2a8 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c | |||
@@ -42,9 +42,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { | |||
42 | { } | 42 | { } |
43 | }; | 43 | }; |
44 | 44 | ||
45 | static cpumask_t vector_allocation_domain(int cpu) | 45 | static void vector_allocation_domain(int cpu, cpumask_t *retmask) |
46 | { | 46 | { |
47 | return cpumask_of_cpu(cpu); | 47 | cpus_clear(*retmask); |
48 | cpu_set(cpu, *retmask); | ||
48 | } | 49 | } |
49 | 50 | ||
50 | static int probe_bigsmp(void) | 51 | static int probe_bigsmp(void) |
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 7b4e6d0d1690..4ba5ccaa1584 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c | |||
@@ -87,7 +87,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
87 | } | 87 | } |
88 | #endif | 88 | #endif |
89 | 89 | ||
90 | static cpumask_t vector_allocation_domain(int cpu) | 90 | static void vector_allocation_domain(int cpu, cpumask_t *retmask) |
91 | { | 91 | { |
92 | /* Careful. Some cpus do not strictly honor the set of cpus | 92 | /* Careful. Some cpus do not strictly honor the set of cpus |
93 | * specified in the interrupt destination when using lowest | 93 | * specified in the interrupt destination when using lowest |
@@ -97,8 +97,7 @@ static cpumask_t vector_allocation_domain(int cpu) | |||
97 | * deliver interrupts to the wrong hyperthread when only one | 97 | * deliver interrupts to the wrong hyperthread when only one |
98 | * hyperthread was specified in the interrupt desitination. | 98 | * hyperthread was specified in the interrupt desitination. |
99 | */ | 99 | */ |
100 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 100 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; |
101 | return domain; | ||
102 | } | 101 | } |
103 | 102 | ||
104 | struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); | 103 | struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); |
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 71a309b122e6..511d7941364f 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c | |||
@@ -38,7 +38,7 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | static cpumask_t vector_allocation_domain(int cpu) | 41 | static void vector_allocation_domain(int cpu, cpumask_t *retmask) |
42 | { | 42 | { |
43 | /* Careful. Some cpus do not strictly honor the set of cpus | 43 | /* Careful. Some cpus do not strictly honor the set of cpus |
44 | * specified in the interrupt destination when using lowest | 44 | * specified in the interrupt destination when using lowest |
@@ -48,8 +48,7 @@ static cpumask_t vector_allocation_domain(int cpu) | |||
48 | * deliver interrupts to the wrong hyperthread when only one | 48 | * deliver interrupts to the wrong hyperthread when only one |
49 | * hyperthread was specified in the interrupt desitination. | 49 | * hyperthread was specified in the interrupt desitination. |
50 | */ | 50 | */ |
51 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 51 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; |
52 | return domain; | ||
53 | } | 52 | } |
54 | 53 | ||
55 | struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); | 54 | struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); |
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 2c6d234e0009..2821ffc188b5 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c | |||
@@ -24,7 +24,7 @@ static int probe_summit(void) | |||
24 | return 0; | 24 | return 0; |
25 | } | 25 | } |
26 | 26 | ||
27 | static cpumask_t vector_allocation_domain(int cpu) | 27 | static void vector_allocation_domain(int cpu, cpumask_t *retmask) |
28 | { | 28 | { |
29 | /* Careful. Some cpus do not strictly honor the set of cpus | 29 | /* Careful. Some cpus do not strictly honor the set of cpus |
30 | * specified in the interrupt destination when using lowest | 30 | * specified in the interrupt destination when using lowest |
@@ -34,8 +34,7 @@ static cpumask_t vector_allocation_domain(int cpu) | |||
34 | * deliver interrupts to the wrong hyperthread when only one | 34 | * deliver interrupts to the wrong hyperthread when only one |
35 | * hyperthread was specified in the interrupt desitination. | 35 | * hyperthread was specified in the interrupt desitination. |
36 | */ | 36 | */ |
37 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 37 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; |
38 | return domain; | ||
39 | } | 38 | } |
40 | 39 | ||
41 | struct genapic apic_summit = APIC_INIT("summit", probe_summit); | 40 | struct genapic apic_summit = APIC_INIT("summit", probe_summit); |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 52145007bd7e..a5bc05492b1e 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -63,11 +63,6 @@ static int voyager_extended_cpus = 1; | |||
63 | /* Used for the invalidate map that's also checked in the spinlock */ | 63 | /* Used for the invalidate map that's also checked in the spinlock */ |
64 | static volatile unsigned long smp_invalidate_needed; | 64 | static volatile unsigned long smp_invalidate_needed; |
65 | 65 | ||
66 | /* Bitmask of currently online CPUs - used by setup.c for | ||
67 | /proc/cpuinfo, visible externally but still physical */ | ||
68 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
69 | EXPORT_SYMBOL(cpu_online_map); | ||
70 | |||
71 | /* Bitmask of CPUs present in the system - exported by i386_syms.c, used | 66 | /* Bitmask of CPUs present in the system - exported by i386_syms.c, used |
72 | * by scheduler but indexed physically */ | 67 | * by scheduler but indexed physically */ |
73 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | 68 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; |
@@ -218,8 +213,6 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE; | |||
218 | /* This is for the new dynamic CPU boot code */ | 213 | /* This is for the new dynamic CPU boot code */ |
219 | cpumask_t cpu_callin_map = CPU_MASK_NONE; | 214 | cpumask_t cpu_callin_map = CPU_MASK_NONE; |
220 | cpumask_t cpu_callout_map = CPU_MASK_NONE; | 215 | cpumask_t cpu_callout_map = CPU_MASK_NONE; |
221 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | ||
222 | EXPORT_SYMBOL(cpu_possible_map); | ||
223 | 216 | ||
224 | /* The per processor IRQ masks (these are usually kept in sync) */ | 217 | /* The per processor IRQ masks (these are usually kept in sync) */ |
225 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | 218 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; |
@@ -679,7 +672,7 @@ void __init smp_boot_cpus(void) | |||
679 | 672 | ||
680 | /* loop over all the extended VIC CPUs and boot them. The | 673 | /* loop over all the extended VIC CPUs and boot them. The |
681 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ | 674 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ |
682 | for (i = 0; i < NR_CPUS; i++) { | 675 | for (i = 0; i < nr_cpu_ids; i++) { |
683 | if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) | 676 | if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) |
684 | continue; | 677 | continue; |
685 | do_boot_cpu(i); | 678 | do_boot_cpu(i); |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index cebcbf152d46..71a14f89f89e 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -278,7 +278,7 @@ void __init numa_init_array(void) | |||
278 | int rr, i; | 278 | int rr, i; |
279 | 279 | ||
280 | rr = first_node(node_online_map); | 280 | rr = first_node(node_online_map); |
281 | for (i = 0; i < NR_CPUS; i++) { | 281 | for (i = 0; i < nr_cpu_ids; i++) { |
282 | if (early_cpu_to_node(i) != NUMA_NO_NODE) | 282 | if (early_cpu_to_node(i) != NUMA_NO_NODE) |
283 | continue; | 283 | continue; |
284 | numa_set_node(i, rr); | 284 | numa_set_node(i, rr); |
@@ -549,7 +549,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn) | |||
549 | memnodemap[0] = 0; | 549 | memnodemap[0] = 0; |
550 | node_set_online(0); | 550 | node_set_online(0); |
551 | node_set(0, node_possible_map); | 551 | node_set(0, node_possible_map); |
552 | for (i = 0; i < NR_CPUS; i++) | 552 | for (i = 0; i < nr_cpu_ids; i++) |
553 | numa_set_node(i, 0); | 553 | numa_set_node(i, 0); |
554 | e820_register_active_regions(0, start_pfn, last_pfn); | 554 | e820_register_active_regions(0, start_pfn, last_pfn); |
555 | setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); | 555 | setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 51c0a2fc14fe..09737c8af074 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -382,7 +382,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) | |||
382 | if (!node_online(i)) | 382 | if (!node_online(i)) |
383 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); | 383 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); |
384 | 384 | ||
385 | for (i = 0; i < NR_CPUS; i++) { | 385 | for (i = 0; i < nr_cpu_ids; i++) { |
386 | int node = early_cpu_to_node(i); | 386 | int node = early_cpu_to_node(i); |
387 | 387 | ||
388 | if (node == NUMA_NO_NODE) | 388 | if (node == NUMA_NO_NODE) |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 773d68d3e912..503c240e26c7 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1082,7 +1082,7 @@ static void drop_other_mm_ref(void *info) | |||
1082 | 1082 | ||
1083 | static void xen_drop_mm_ref(struct mm_struct *mm) | 1083 | static void xen_drop_mm_ref(struct mm_struct *mm) |
1084 | { | 1084 | { |
1085 | cpumask_t mask; | 1085 | cpumask_var_t mask; |
1086 | unsigned cpu; | 1086 | unsigned cpu; |
1087 | 1087 | ||
1088 | if (current->active_mm == mm) { | 1088 | if (current->active_mm == mm) { |
@@ -1094,7 +1094,16 @@ static void xen_drop_mm_ref(struct mm_struct *mm) | |||
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | /* Get the "official" set of cpus referring to our pagetable. */ | 1096 | /* Get the "official" set of cpus referring to our pagetable. */ |
1097 | mask = mm->cpu_vm_mask; | 1097 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { |
1098 | for_each_online_cpu(cpu) { | ||
1099 | if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask) | ||
1100 | && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) | ||
1101 | continue; | ||
1102 | smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); | ||
1103 | } | ||
1104 | return; | ||
1105 | } | ||
1106 | cpumask_copy(mask, &mm->cpu_vm_mask); | ||
1098 | 1107 | ||
1099 | /* It's possible that a vcpu may have a stale reference to our | 1108 | /* It's possible that a vcpu may have a stale reference to our |
1100 | cr3, because its in lazy mode, and it hasn't yet flushed | 1109 | cr3, because its in lazy mode, and it hasn't yet flushed |
@@ -1103,11 +1112,12 @@ static void xen_drop_mm_ref(struct mm_struct *mm) | |||
1103 | if needed. */ | 1112 | if needed. */ |
1104 | for_each_online_cpu(cpu) { | 1113 | for_each_online_cpu(cpu) { |
1105 | if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) | 1114 | if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) |
1106 | cpu_set(cpu, mask); | 1115 | cpumask_set_cpu(cpu, mask); |
1107 | } | 1116 | } |
1108 | 1117 | ||
1109 | if (!cpus_empty(mask)) | 1118 | if (!cpumask_empty(mask)) |
1110 | smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); | 1119 | smp_call_function_many(mask, drop_other_mm_ref, mm, 1); |
1120 | free_cpumask_var(mask); | ||
1111 | } | 1121 | } |
1112 | #else | 1122 | #else |
1113 | static void xen_drop_mm_ref(struct mm_struct *mm) | 1123 | static void xen_drop_mm_ref(struct mm_struct *mm) |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index acd9b6705e02..c44e2069c7c7 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include "xen-ops.h" | 33 | #include "xen-ops.h" |
34 | #include "mmu.h" | 34 | #include "mmu.h" |
35 | 35 | ||
36 | cpumask_t xen_cpu_initialized_map; | 36 | cpumask_var_t xen_cpu_initialized_map; |
37 | 37 | ||
38 | static DEFINE_PER_CPU(int, resched_irq); | 38 | static DEFINE_PER_CPU(int, resched_irq); |
39 | static DEFINE_PER_CPU(int, callfunc_irq); | 39 | static DEFINE_PER_CPU(int, callfunc_irq); |
@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void) | |||
158 | { | 158 | { |
159 | int i, rc; | 159 | int i, rc; |
160 | 160 | ||
161 | for (i = 0; i < NR_CPUS; i++) { | 161 | for (i = 0; i < nr_cpu_ids; i++) { |
162 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); | 162 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); |
163 | if (rc >= 0) { | 163 | if (rc >= 0) { |
164 | num_processors++; | 164 | num_processors++; |
@@ -192,11 +192,14 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
192 | if (xen_smp_intr_init(0)) | 192 | if (xen_smp_intr_init(0)) |
193 | BUG(); | 193 | BUG(); |
194 | 194 | ||
195 | xen_cpu_initialized_map = cpumask_of_cpu(0); | 195 | if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) |
196 | panic("could not allocate xen_cpu_initialized_map\n"); | ||
197 | |||
198 | cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); | ||
196 | 199 | ||
197 | /* Restrict the possible_map according to max_cpus. */ | 200 | /* Restrict the possible_map according to max_cpus. */ |
198 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { | 201 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { |
199 | for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) | 202 | for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) |
200 | continue; | 203 | continue; |
201 | cpu_clear(cpu, cpu_possible_map); | 204 | cpu_clear(cpu, cpu_possible_map); |
202 | } | 205 | } |
@@ -221,7 +224,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
221 | struct vcpu_guest_context *ctxt; | 224 | struct vcpu_guest_context *ctxt; |
222 | struct desc_struct *gdt; | 225 | struct desc_struct *gdt; |
223 | 226 | ||
224 | if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) | 227 | if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) |
225 | return 0; | 228 | return 0; |
226 | 229 | ||
227 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); | 230 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
@@ -408,24 +411,23 @@ static void xen_smp_send_reschedule(int cpu) | |||
408 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | 411 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); |
409 | } | 412 | } |
410 | 413 | ||
411 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | 414 | static void xen_send_IPI_mask(const struct cpumask *mask, |
415 | enum ipi_vector vector) | ||
412 | { | 416 | { |
413 | unsigned cpu; | 417 | unsigned cpu; |
414 | 418 | ||
415 | cpus_and(mask, mask, cpu_online_map); | 419 | for_each_cpu_and(cpu, mask, cpu_online_mask) |
416 | |||
417 | for_each_cpu_mask_nr(cpu, mask) | ||
418 | xen_send_IPI_one(cpu, vector); | 420 | xen_send_IPI_one(cpu, vector); |
419 | } | 421 | } |
420 | 422 | ||
421 | static void xen_smp_send_call_function_ipi(cpumask_t mask) | 423 | static void xen_smp_send_call_function_ipi(const struct cpumask *mask) |
422 | { | 424 | { |
423 | int cpu; | 425 | int cpu; |
424 | 426 | ||
425 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | 427 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); |
426 | 428 | ||
427 | /* Make sure other vcpus get a chance to run if they need to. */ | 429 | /* Make sure other vcpus get a chance to run if they need to. */ |
428 | for_each_cpu_mask_nr(cpu, mask) { | 430 | for_each_cpu(cpu, mask) { |
429 | if (xen_vcpu_stolen(cpu)) { | 431 | if (xen_vcpu_stolen(cpu)) { |
430 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | 432 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); |
431 | break; | 433 | break; |
@@ -435,7 +437,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask) | |||
435 | 437 | ||
436 | static void xen_smp_send_call_function_single_ipi(int cpu) | 438 | static void xen_smp_send_call_function_single_ipi(int cpu) |
437 | { | 439 | { |
438 | xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); | 440 | xen_send_IPI_mask(cpumask_of(cpu), |
441 | XEN_CALL_FUNCTION_SINGLE_VECTOR); | ||
439 | } | 442 | } |
440 | 443 | ||
441 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | 444 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) |
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 2a234db5949b..212ffe012b76 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
@@ -35,7 +35,8 @@ void xen_post_suspend(int suspend_cancelled) | |||
35 | pfn_to_mfn(xen_start_info->console.domU.mfn); | 35 | pfn_to_mfn(xen_start_info->console.domU.mfn); |
36 | } else { | 36 | } else { |
37 | #ifdef CONFIG_SMP | 37 | #ifdef CONFIG_SMP |
38 | xen_cpu_initialized_map = cpu_online_map; | 38 | BUG_ON(xen_cpu_initialized_map == NULL); |
39 | cpumask_copy(xen_cpu_initialized_map, cpu_online_mask); | ||
39 | #endif | 40 | #endif |
40 | xen_vcpu_restore(); | 41 | xen_vcpu_restore(); |
41 | } | 42 | } |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index c9f7cda48ed7..65d75a6be0ba 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -437,7 +437,7 @@ void xen_setup_timer(int cpu) | |||
437 | evt = &per_cpu(xen_clock_events, cpu); | 437 | evt = &per_cpu(xen_clock_events, cpu); |
438 | memcpy(evt, xen_clockevent, sizeof(*evt)); | 438 | memcpy(evt, xen_clockevent, sizeof(*evt)); |
439 | 439 | ||
440 | evt->cpumask = cpumask_of_cpu(cpu); | 440 | evt->cpumask = cpumask_of(cpu); |
441 | evt->irq = irq; | 441 | evt->irq = irq; |
442 | 442 | ||
443 | setup_runstate_info(cpu); | 443 | setup_runstate_info(cpu); |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 9e1afae8461f..c1f8faf0a2c5 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -58,7 +58,7 @@ void __init xen_init_spinlocks(void); | |||
58 | __cpuinit void xen_init_lock_cpu(int cpu); | 58 | __cpuinit void xen_init_lock_cpu(int cpu); |
59 | void xen_uninit_lock_cpu(int cpu); | 59 | void xen_uninit_lock_cpu(int cpu); |
60 | 60 | ||
61 | extern cpumask_t xen_cpu_initialized_map; | 61 | extern cpumask_var_t xen_cpu_initialized_map; |
62 | #else | 62 | #else |
63 | static inline void xen_smp_init(void) {} | 63 | static inline void xen_smp_init(void) {} |
64 | #endif | 64 | #endif |