diff options
author | Mike Travis <travis@sgi.com> | 2008-12-16 20:33:52 -0500 |
---|---|---|
committer | Mike Travis <travis@sgi.com> | 2008-12-16 20:40:56 -0500 |
commit | e7986739a76cde5079da08809d8bbc6878387ae0 (patch) | |
tree | dd99ed6af66d459fe164f75ded7f95262dc0fb0d /arch/x86 | |
parent | 36f5101a60de8f79c0d1ca06e50660bf5129e02c (diff) |
x86 smp: modify send_IPI_mask interface to accept cpumask_t pointers
Impact: cleanup, change parameter passing
* Change genapic interfaces to accept cpumask_t pointers where possible.
* Modify external callers to use cpumask_t pointers in function calls.
* Create new send_IPI_mask_allbutself which is the same as the
send_IPI_mask functions but removes smp_processor_id() from list.
This removes another common need for a temporary cpumask_t variable.
* Functions that used a temp cpumask_t variable for:
cpumask_t allbutme = cpu_online_map;
cpu_clear(smp_processor_id(), allbutme);
if (!cpus_empty(allbutme))
...
become:
if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu)))
...
* Other minor code optimizations (like using cpus_clear instead of
CPU_MASK_NONE, etc.)
Applies to linux-2.6.tip/master.
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
30 files changed, 380 insertions, 273 deletions
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index ce547f24a1cd..dc6225ca48ad 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h | |||
@@ -9,12 +9,12 @@ static inline int apic_id_registered(void) | |||
9 | return (1); | 9 | return (1); |
10 | } | 10 | } |
11 | 11 | ||
12 | static inline cpumask_t target_cpus(void) | 12 | static inline const cpumask_t *target_cpus(void) |
13 | { | 13 | { |
14 | #ifdef CONFIG_SMP | 14 | #ifdef CONFIG_SMP |
15 | return cpu_online_map; | 15 | return &cpu_online_map; |
16 | #else | 16 | #else |
17 | return cpumask_of_cpu(0); | 17 | return &cpumask_of_cpu(0); |
18 | #endif | 18 | #endif |
19 | } | 19 | } |
20 | 20 | ||
@@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid) | |||
79 | 79 | ||
80 | static inline int cpu_present_to_apicid(int mps_cpu) | 80 | static inline int cpu_present_to_apicid(int mps_cpu) |
81 | { | 81 | { |
82 | if (mps_cpu < NR_CPUS) | 82 | if (mps_cpu < nr_cpu_ids) |
83 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); | 83 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
84 | 84 | ||
85 | return BAD_APICID; | 85 | return BAD_APICID; |
@@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[]; | |||
94 | /* Mapping from cpu number to logical apicid */ | 94 | /* Mapping from cpu number to logical apicid */ |
95 | static inline int cpu_to_logical_apicid(int cpu) | 95 | static inline int cpu_to_logical_apicid(int cpu) |
96 | { | 96 | { |
97 | if (cpu >= NR_CPUS) | 97 | if (cpu >= nr_cpu_ids) |
98 | return BAD_APICID; | 98 | return BAD_APICID; |
99 | return cpu_physical_id(cpu); | 99 | return cpu_physical_id(cpu); |
100 | } | 100 | } |
@@ -119,12 +119,12 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
119 | } | 119 | } |
120 | 120 | ||
121 | /* As we are using single CPU as destination, pick only one CPU here */ | 121 | /* As we are using single CPU as destination, pick only one CPU here */ |
122 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 122 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
123 | { | 123 | { |
124 | int cpu; | 124 | int cpu; |
125 | int apicid; | 125 | int apicid; |
126 | 126 | ||
127 | cpu = first_cpu(cpumask); | 127 | cpu = first_cpu(*cpumask); |
128 | apicid = cpu_to_logical_apicid(cpu); | 128 | apicid = cpu_to_logical_apicid(cpu); |
129 | return apicid; | 129 | return apicid; |
130 | } | 130 | } |
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h index 9404c535b7ec..63553e9f22b2 100644 --- a/arch/x86/include/asm/bigsmp/ipi.h +++ b/arch/x86/include/asm/bigsmp/ipi.h | |||
@@ -1,9 +1,10 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | 1 | #ifndef __ASM_MACH_IPI_H |
2 | #define __ASM_MACH_IPI_H | 2 | #define __ASM_MACH_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(const cpumask_t *mask, int vector); |
5 | void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const cpumask_t *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) | |||
14 | cpu_clear(smp_processor_id(), mask); | 15 | cpu_clear(smp_processor_id(), mask); |
15 | 16 | ||
16 | if (!cpus_empty(mask)) | 17 | if (!cpus_empty(mask)) |
17 | send_IPI_mask(mask, vector); | 18 | send_IPI_mask(&mask, vector); |
18 | } | 19 | } |
19 | 20 | ||
20 | static inline void send_IPI_all(int vector) | 21 | static inline void send_IPI_all(int vector) |
21 | { | 22 | { |
22 | send_IPI_mask(cpu_online_map, vector); | 23 | send_IPI_mask(&cpu_online_map, vector); |
23 | } | 24 | } |
24 | 25 | ||
25 | #endif /* __ASM_MACH_IPI_H */ | 26 | #endif /* __ASM_MACH_IPI_H */ |
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index e24ef876915f..4cac0837bb40 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h | |||
@@ -9,14 +9,14 @@ static inline int apic_id_registered(void) | |||
9 | return (1); | 9 | return (1); |
10 | } | 10 | } |
11 | 11 | ||
12 | static inline cpumask_t target_cpus_cluster(void) | 12 | static inline const cpumask_t *target_cpus_cluster(void) |
13 | { | 13 | { |
14 | return CPU_MASK_ALL; | 14 | return &CPU_MASK_ALL; |
15 | } | 15 | } |
16 | 16 | ||
17 | static inline cpumask_t target_cpus(void) | 17 | static inline const cpumask_t *target_cpus(void) |
18 | { | 18 | { |
19 | return cpumask_of_cpu(smp_processor_id()); | 19 | return &cpumask_of_cpu(smp_processor_id()); |
20 | } | 20 | } |
21 | 21 | ||
22 | #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) | 22 | #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) |
@@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS]; | |||
80 | static inline void setup_apic_routing(void) | 80 | static inline void setup_apic_routing(void) |
81 | { | 81 | { |
82 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); | 82 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); |
83 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | 83 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", |
84 | (apic_version[apic] == 0x14) ? | 84 | (apic_version[apic] == 0x14) ? |
85 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); | 85 | "Physical Cluster" : "Logical Cluster", |
86 | nr_ioapics, cpus_addr(*target_cpus())[0]); | ||
86 | } | 87 | } |
87 | 88 | ||
88 | static inline int multi_timer_check(int apic, int irq) | 89 | static inline int multi_timer_check(int apic, int irq) |
@@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) | |||
100 | { | 101 | { |
101 | if (!mps_cpu) | 102 | if (!mps_cpu) |
102 | return boot_cpu_physical_apicid; | 103 | return boot_cpu_physical_apicid; |
103 | else if (mps_cpu < NR_CPUS) | 104 | else if (mps_cpu < nr_cpu_ids) |
104 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); | 105 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
105 | else | 106 | else |
106 | return BAD_APICID; | 107 | return BAD_APICID; |
@@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[]; | |||
120 | static inline int cpu_to_logical_apicid(int cpu) | 121 | static inline int cpu_to_logical_apicid(int cpu) |
121 | { | 122 | { |
122 | #ifdef CONFIG_SMP | 123 | #ifdef CONFIG_SMP |
123 | if (cpu >= NR_CPUS) | 124 | if (cpu >= nr_cpu_ids) |
124 | return BAD_APICID; | 125 | return BAD_APICID; |
125 | return (int)cpu_2_logical_apicid[cpu]; | 126 | return (int)cpu_2_logical_apicid[cpu]; |
126 | #else | 127 | #else |
127 | return logical_smp_processor_id(); | 128 | return logical_smp_processor_id(); |
128 | #endif | 129 | #endif |
@@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid) | |||
146 | return (1); | 147 | return (1); |
147 | } | 148 | } |
148 | 149 | ||
149 | static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) | 150 | static inline unsigned int |
151 | cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) | ||
150 | { | 152 | { |
151 | int num_bits_set; | 153 | int num_bits_set; |
152 | int cpus_found = 0; | 154 | int cpus_found = 0; |
153 | int cpu; | 155 | int cpu; |
154 | int apicid; | 156 | int apicid; |
155 | 157 | ||
156 | num_bits_set = cpus_weight(cpumask); | 158 | num_bits_set = cpumask_weight(cpumask); |
157 | /* Return id to all */ | 159 | /* Return id to all */ |
158 | if (num_bits_set == NR_CPUS) | 160 | if (num_bits_set == NR_CPUS) |
159 | return 0xFF; | 161 | return 0xFF; |
@@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) | |||
161 | * The cpus in the mask must all be on the apic cluster. If are not | 163 | * The cpus in the mask must all be on the apic cluster. If are not |
162 | * on the same apicid cluster return default value of TARGET_CPUS. | 164 | * on the same apicid cluster return default value of TARGET_CPUS. |
163 | */ | 165 | */ |
164 | cpu = first_cpu(cpumask); | 166 | cpu = cpumask_first(cpumask); |
165 | apicid = cpu_to_logical_apicid(cpu); | 167 | apicid = cpu_to_logical_apicid(cpu); |
166 | while (cpus_found < num_bits_set) { | 168 | while (cpus_found < num_bits_set) { |
167 | if (cpu_isset(cpu, cpumask)) { | 169 | if (cpumask_test_cpu(cpu, cpumask)) { |
168 | int new_apicid = cpu_to_logical_apicid(cpu); | 170 | int new_apicid = cpu_to_logical_apicid(cpu); |
169 | if (apicid_cluster(apicid) != | 171 | if (apicid_cluster(apicid) != |
170 | apicid_cluster(new_apicid)){ | 172 | apicid_cluster(new_apicid)){ |
@@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) | |||
179 | return apicid; | 181 | return apicid; |
180 | } | 182 | } |
181 | 183 | ||
182 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 184 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
183 | { | 185 | { |
184 | int num_bits_set; | 186 | int num_bits_set; |
185 | int cpus_found = 0; | 187 | int cpus_found = 0; |
186 | int cpu; | 188 | int cpu; |
187 | int apicid; | 189 | int apicid; |
188 | 190 | ||
189 | num_bits_set = cpus_weight(cpumask); | 191 | num_bits_set = cpus_weight(*cpumask); |
190 | /* Return id to all */ | 192 | /* Return id to all */ |
191 | if (num_bits_set == NR_CPUS) | 193 | if (num_bits_set == NR_CPUS) |
192 | return cpu_to_logical_apicid(0); | 194 | return cpu_to_logical_apicid(0); |
@@ -194,10 +196,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
194 | * The cpus in the mask must all be on the apic cluster. If are not | 196 | * The cpus in the mask must all be on the apic cluster. If are not |
195 | * on the same apicid cluster return default value of TARGET_CPUS. | 197 | * on the same apicid cluster return default value of TARGET_CPUS. |
196 | */ | 198 | */ |
197 | cpu = first_cpu(cpumask); | 199 | cpu = first_cpu(*cpumask); |
198 | apicid = cpu_to_logical_apicid(cpu); | 200 | apicid = cpu_to_logical_apicid(cpu); |
199 | while (cpus_found < num_bits_set) { | 201 | while (cpus_found < num_bits_set) { |
200 | if (cpu_isset(cpu, cpumask)) { | 202 | if (cpu_isset(cpu, *cpumask)) { |
201 | int new_apicid = cpu_to_logical_apicid(cpu); | 203 | int new_apicid = cpu_to_logical_apicid(cpu); |
202 | if (apicid_cluster(apicid) != | 204 | if (apicid_cluster(apicid) != |
203 | apicid_cluster(new_apicid)){ | 205 | apicid_cluster(new_apicid)){ |
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h index 632a955fcc0a..1a8507265f91 100644 --- a/arch/x86/include/asm/es7000/ipi.h +++ b/arch/x86/include/asm/es7000/ipi.h | |||
@@ -1,9 +1,10 @@ | |||
1 | #ifndef __ASM_ES7000_IPI_H | 1 | #ifndef __ASM_ES7000_IPI_H |
2 | #define __ASM_ES7000_IPI_H | 2 | #define __ASM_ES7000_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(const cpumask_t *mask, int vector); |
5 | void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const cpumask_t *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
@@ -13,12 +14,12 @@ static inline void send_IPI_allbutself(int vector) | |||
13 | cpumask_t mask = cpu_online_map; | 14 | cpumask_t mask = cpu_online_map; |
14 | cpu_clear(smp_processor_id(), mask); | 15 | cpu_clear(smp_processor_id(), mask); |
15 | if (!cpus_empty(mask)) | 16 | if (!cpus_empty(mask)) |
16 | send_IPI_mask(mask, vector); | 17 | send_IPI_mask(&mask, vector); |
17 | } | 18 | } |
18 | 19 | ||
19 | static inline void send_IPI_all(int vector) | 20 | static inline void send_IPI_all(int vector) |
20 | { | 21 | { |
21 | send_IPI_mask(cpu_online_map, vector); | 22 | send_IPI_mask(&cpu_online_map, vector); |
22 | } | 23 | } |
23 | 24 | ||
24 | #endif /* __ASM_ES7000_IPI_H */ | 25 | #endif /* __ASM_ES7000_IPI_H */ |
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index 0ac17d33a8c7..b21ed21c574d 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h | |||
@@ -24,7 +24,7 @@ struct genapic { | |||
24 | int (*probe)(void); | 24 | int (*probe)(void); |
25 | 25 | ||
26 | int (*apic_id_registered)(void); | 26 | int (*apic_id_registered)(void); |
27 | cpumask_t (*target_cpus)(void); | 27 | const cpumask_t *(*target_cpus)(void); |
28 | int int_delivery_mode; | 28 | int int_delivery_mode; |
29 | int int_dest_mode; | 29 | int int_dest_mode; |
30 | int ESR_DISABLE; | 30 | int ESR_DISABLE; |
@@ -57,12 +57,13 @@ struct genapic { | |||
57 | 57 | ||
58 | unsigned (*get_apic_id)(unsigned long x); | 58 | unsigned (*get_apic_id)(unsigned long x); |
59 | unsigned long apic_id_mask; | 59 | unsigned long apic_id_mask; |
60 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | 60 | unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); |
61 | cpumask_t (*vector_allocation_domain)(int cpu); | 61 | void (*vector_allocation_domain)(int cpu, cpumask_t *retmask); |
62 | 62 | ||
63 | #ifdef CONFIG_SMP | 63 | #ifdef CONFIG_SMP |
64 | /* ipi */ | 64 | /* ipi */ |
65 | void (*send_IPI_mask)(cpumask_t mask, int vector); | 65 | void (*send_IPI_mask)(const cpumask_t *mask, int vector); |
66 | void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector); | ||
66 | void (*send_IPI_allbutself)(int vector); | 67 | void (*send_IPI_allbutself)(int vector); |
67 | void (*send_IPI_all)(int vector); | 68 | void (*send_IPI_all)(int vector); |
68 | #endif | 69 | #endif |
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h index 2cae011668b7..a020e7d35a40 100644 --- a/arch/x86/include/asm/genapic_64.h +++ b/arch/x86/include/asm/genapic_64.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_GENAPIC_64_H | 1 | #ifndef _ASM_X86_GENAPIC_64_H |
2 | #define _ASM_X86_GENAPIC_64_H | 2 | #define _ASM_X86_GENAPIC_64_H |
3 | 3 | ||
4 | #include <linux/cpumask.h> | ||
5 | |||
4 | /* | 6 | /* |
5 | * Copyright 2004 James Cleverdon, IBM. | 7 | * Copyright 2004 James Cleverdon, IBM. |
6 | * Subject to the GNU Public License, v.2 | 8 | * Subject to the GNU Public License, v.2 |
@@ -18,16 +20,17 @@ struct genapic { | |||
18 | u32 int_delivery_mode; | 20 | u32 int_delivery_mode; |
19 | u32 int_dest_mode; | 21 | u32 int_dest_mode; |
20 | int (*apic_id_registered)(void); | 22 | int (*apic_id_registered)(void); |
21 | cpumask_t (*target_cpus)(void); | 23 | const cpumask_t *(*target_cpus)(void); |
22 | cpumask_t (*vector_allocation_domain)(int cpu); | 24 | void (*vector_allocation_domain)(int cpu, cpumask_t *retmask); |
23 | void (*init_apic_ldr)(void); | 25 | void (*init_apic_ldr)(void); |
24 | /* ipi */ | 26 | /* ipi */ |
25 | void (*send_IPI_mask)(cpumask_t mask, int vector); | 27 | void (*send_IPI_mask)(const cpumask_t *mask, int vector); |
28 | void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector); | ||
26 | void (*send_IPI_allbutself)(int vector); | 29 | void (*send_IPI_allbutself)(int vector); |
27 | void (*send_IPI_all)(int vector); | 30 | void (*send_IPI_all)(int vector); |
28 | void (*send_IPI_self)(int vector); | 31 | void (*send_IPI_self)(int vector); |
29 | /* */ | 32 | /* */ |
30 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | 33 | unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); |
31 | unsigned int (*phys_pkg_id)(int index_msb); | 34 | unsigned int (*phys_pkg_id)(int index_msb); |
32 | unsigned int (*get_apic_id)(unsigned long x); | 35 | unsigned int (*get_apic_id)(unsigned long x); |
33 | unsigned long (*set_apic_id)(unsigned int id); | 36 | unsigned long (*set_apic_id)(unsigned int id); |
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index f89dffb28aa9..24b6e613edfa 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h | |||
@@ -117,7 +117,7 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, | |||
117 | native_apic_mem_write(APIC_ICR, cfg); | 117 | native_apic_mem_write(APIC_ICR, cfg); |
118 | } | 118 | } |
119 | 119 | ||
120 | static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | 120 | static inline void send_IPI_mask_sequence(const cpumask_t *mask, int vector) |
121 | { | 121 | { |
122 | unsigned long flags; | 122 | unsigned long flags; |
123 | unsigned long query_cpu; | 123 | unsigned long query_cpu; |
@@ -128,11 +128,28 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
128 | * - mbligh | 128 | * - mbligh |
129 | */ | 129 | */ |
130 | local_irq_save(flags); | 130 | local_irq_save(flags); |
131 | for_each_cpu_mask_nr(query_cpu, mask) { | 131 | for_each_cpu_mask_nr(query_cpu, *mask) { |
132 | __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), | 132 | __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), |
133 | vector, APIC_DEST_PHYSICAL); | 133 | vector, APIC_DEST_PHYSICAL); |
134 | } | 134 | } |
135 | local_irq_restore(flags); | 135 | local_irq_restore(flags); |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector) | ||
139 | { | ||
140 | unsigned long flags; | ||
141 | unsigned int query_cpu; | ||
142 | unsigned int this_cpu = smp_processor_id(); | ||
143 | |||
144 | /* See Hack comment above */ | ||
145 | |||
146 | local_irq_save(flags); | ||
147 | for_each_cpu_mask_nr(query_cpu, *mask) | ||
148 | if (query_cpu != this_cpu) | ||
149 | __send_IPI_dest_field( | ||
150 | per_cpu(x86_cpu_to_apicid, query_cpu), | ||
151 | vector, APIC_DEST_PHYSICAL); | ||
152 | local_irq_restore(flags); | ||
153 | } | ||
154 | |||
138 | #endif /* _ASM_X86_IPI_H */ | 155 | #endif /* _ASM_X86_IPI_H */ |
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 6cb3a467e067..c18896b0508c 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h | |||
@@ -8,12 +8,12 @@ | |||
8 | 8 | ||
9 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) | 9 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) |
10 | 10 | ||
11 | static inline cpumask_t target_cpus(void) | 11 | static inline const cpumask_t *target_cpus(void) |
12 | { | 12 | { |
13 | #ifdef CONFIG_SMP | 13 | #ifdef CONFIG_SMP |
14 | return cpu_online_map; | 14 | return &cpu_online_map; |
15 | #else | 15 | #else |
16 | return cpumask_of_cpu(0); | 16 | return &cpumask_of_cpu(0); |
17 | #endif | 17 | #endif |
18 | } | 18 | } |
19 | 19 | ||
@@ -61,9 +61,9 @@ static inline int apic_id_registered(void) | |||
61 | return physid_isset(read_apic_id(), phys_cpu_present_map); | 61 | return physid_isset(read_apic_id(), phys_cpu_present_map); |
62 | } | 62 | } |
63 | 63 | ||
64 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 64 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
65 | { | 65 | { |
66 | return cpus_addr(cpumask)[0]; | 66 | return cpus_addr(*cpumask)[0]; |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | 69 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) |
@@ -88,7 +88,7 @@ static inline int apicid_to_node(int logical_apicid) | |||
88 | #endif | 88 | #endif |
89 | } | 89 | } |
90 | 90 | ||
91 | static inline cpumask_t vector_allocation_domain(int cpu) | 91 | static inline void vector_allocation_domain(int cpu, cpumask_t *retmask) |
92 | { | 92 | { |
93 | /* Careful. Some cpus do not strictly honor the set of cpus | 93 | /* Careful. Some cpus do not strictly honor the set of cpus |
94 | * specified in the interrupt destination when using lowest | 94 | * specified in the interrupt destination when using lowest |
@@ -98,8 +98,7 @@ static inline cpumask_t vector_allocation_domain(int cpu) | |||
98 | * deliver interrupts to the wrong hyperthread when only one | 98 | * deliver interrupts to the wrong hyperthread when only one |
99 | * hyperthread was specified in the interrupt desitination. | 99 | * hyperthread was specified in the interrupt desitination. |
100 | */ | 100 | */ |
101 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 101 | *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; |
102 | return domain; | ||
103 | } | 102 | } |
104 | #endif | 103 | #endif |
105 | 104 | ||
@@ -131,7 +130,7 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
131 | 130 | ||
132 | static inline int cpu_present_to_apicid(int mps_cpu) | 131 | static inline int cpu_present_to_apicid(int mps_cpu) |
133 | { | 132 | { |
134 | if (mps_cpu < NR_CPUS && cpu_present(mps_cpu)) | 133 | if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) |
135 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); | 134 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); |
136 | else | 135 | else |
137 | return BAD_APICID; | 136 | return BAD_APICID; |
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h index fabca01ebacf..9353ab854a10 100644 --- a/arch/x86/include/asm/mach-default/mach_ipi.h +++ b/arch/x86/include/asm/mach-default/mach_ipi.h | |||
@@ -4,7 +4,8 @@ | |||
4 | /* Avoid include hell */ | 4 | /* Avoid include hell */ |
5 | #define NMI_VECTOR 0x02 | 5 | #define NMI_VECTOR 0x02 |
6 | 6 | ||
7 | void send_IPI_mask_bitmask(cpumask_t mask, int vector); | 7 | void send_IPI_mask_bitmask(const cpumask_t *mask, int vector); |
8 | void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); | ||
8 | void __send_IPI_shortcut(unsigned int shortcut, int vector); | 9 | void __send_IPI_shortcut(unsigned int shortcut, int vector); |
9 | 10 | ||
10 | extern int no_broadcast; | 11 | extern int no_broadcast; |
@@ -12,28 +13,27 @@ extern int no_broadcast; | |||
12 | #ifdef CONFIG_X86_64 | 13 | #ifdef CONFIG_X86_64 |
13 | #include <asm/genapic.h> | 14 | #include <asm/genapic.h> |
14 | #define send_IPI_mask (genapic->send_IPI_mask) | 15 | #define send_IPI_mask (genapic->send_IPI_mask) |
16 | #define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself) | ||
15 | #else | 17 | #else |
16 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 18 | static inline void send_IPI_mask(const cpumask_t *mask, int vector) |
17 | { | 19 | { |
18 | send_IPI_mask_bitmask(mask, vector); | 20 | send_IPI_mask_bitmask(mask, vector); |
19 | } | 21 | } |
22 | void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); | ||
20 | #endif | 23 | #endif |
21 | 24 | ||
22 | static inline void __local_send_IPI_allbutself(int vector) | 25 | static inline void __local_send_IPI_allbutself(int vector) |
23 | { | 26 | { |
24 | if (no_broadcast || vector == NMI_VECTOR) { | 27 | if (no_broadcast || vector == NMI_VECTOR) |
25 | cpumask_t mask = cpu_online_map; | 28 | send_IPI_mask_allbutself(&cpu_online_map, vector); |
26 | 29 | else | |
27 | cpu_clear(smp_processor_id(), mask); | ||
28 | send_IPI_mask(mask, vector); | ||
29 | } else | ||
30 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); | 30 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline void __local_send_IPI_all(int vector) | 33 | static inline void __local_send_IPI_all(int vector) |
34 | { | 34 | { |
35 | if (no_broadcast || vector == NMI_VECTOR) | 35 | if (no_broadcast || vector == NMI_VECTOR) |
36 | send_IPI_mask(cpu_online_map, vector); | 36 | send_IPI_mask(&cpu_online_map, vector); |
37 | else | 37 | else |
38 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); | 38 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); |
39 | } | 39 | } |
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 0bf2a06b7a4e..1df7ebe738e5 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h | |||
@@ -7,9 +7,9 @@ | |||
7 | 7 | ||
8 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 8 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
9 | 9 | ||
10 | static inline cpumask_t target_cpus(void) | 10 | static inline const cpumask_t *target_cpus(void) |
11 | { | 11 | { |
12 | return CPU_MASK_ALL; | 12 | return &CPU_MASK_ALL; |
13 | } | 13 | } |
14 | 14 | ||
15 | #define NO_BALANCE_IRQ (1) | 15 | #define NO_BALANCE_IRQ (1) |
@@ -122,7 +122,7 @@ static inline void enable_apic_mode(void) | |||
122 | * We use physical apicids here, not logical, so just return the default | 122 | * We use physical apicids here, not logical, so just return the default |
123 | * physical broadcast to stop people from breaking us | 123 | * physical broadcast to stop people from breaking us |
124 | */ | 124 | */ |
125 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 125 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
126 | { | 126 | { |
127 | return (int) 0xF; | 127 | return (int) 0xF; |
128 | } | 128 | } |
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h index 935588d286cf..c734d7acc430 100644 --- a/arch/x86/include/asm/numaq/ipi.h +++ b/arch/x86/include/asm/numaq/ipi.h | |||
@@ -1,9 +1,10 @@ | |||
1 | #ifndef __ASM_NUMAQ_IPI_H | 1 | #ifndef __ASM_NUMAQ_IPI_H |
2 | #define __ASM_NUMAQ_IPI_H | 2 | #define __ASM_NUMAQ_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t, int vector); | 4 | void send_IPI_mask_sequence(const cpumask_t *mask, int vector); |
5 | void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const cpumask_t *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) | |||
14 | cpu_clear(smp_processor_id(), mask); | 15 | cpu_clear(smp_processor_id(), mask); |
15 | 16 | ||
16 | if (!cpus_empty(mask)) | 17 | if (!cpus_empty(mask)) |
17 | send_IPI_mask(mask, vector); | 18 | send_IPI_mask(&mask, vector); |
18 | } | 19 | } |
19 | 20 | ||
20 | static inline void send_IPI_all(int vector) | 21 | static inline void send_IPI_all(int vector) |
21 | { | 22 | { |
22 | send_IPI_mask(cpu_online_map, vector); | 23 | send_IPI_mask(&cpu_online_map, vector); |
23 | } | 24 | } |
24 | 25 | ||
25 | #endif /* __ASM_NUMAQ_IPI_H */ | 26 | #endif /* __ASM_NUMAQ_IPI_H */ |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index d12811ce51d9..c4a9aa52df6e 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -60,7 +60,7 @@ struct smp_ops { | |||
60 | void (*cpu_die)(unsigned int cpu); | 60 | void (*cpu_die)(unsigned int cpu); |
61 | void (*play_dead)(void); | 61 | void (*play_dead)(void); |
62 | 62 | ||
63 | void (*send_call_func_ipi)(cpumask_t mask); | 63 | void (*send_call_func_ipi)(const cpumask_t *mask); |
64 | void (*send_call_func_single_ipi)(int cpu); | 64 | void (*send_call_func_single_ipi)(int cpu); |
65 | }; | 65 | }; |
66 | 66 | ||
@@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu) | |||
125 | 125 | ||
126 | static inline void arch_send_call_function_ipi(cpumask_t mask) | 126 | static inline void arch_send_call_function_ipi(cpumask_t mask) |
127 | { | 127 | { |
128 | smp_ops.send_call_func_ipi(mask); | 128 | smp_ops.send_call_func_ipi(&mask); |
129 | } | 129 | } |
130 | 130 | ||
131 | void cpu_disable_common(void); | 131 | void cpu_disable_common(void); |
@@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu); | |||
138 | void native_play_dead(void); | 138 | void native_play_dead(void); |
139 | void play_dead_common(void); | 139 | void play_dead_common(void); |
140 | 140 | ||
141 | void native_send_call_func_ipi(cpumask_t mask); | 141 | void native_send_call_func_ipi(const cpumask_t *mask); |
142 | void native_send_call_func_single_ipi(int cpu); | 142 | void native_send_call_func_single_ipi(int cpu); |
143 | 143 | ||
144 | extern void prefill_possible_map(void); | 144 | extern void prefill_possible_map(void); |
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 9b3070f1c2ac..437dc83725ca 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h | |||
@@ -14,13 +14,13 @@ | |||
14 | 14 | ||
15 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 15 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
16 | 16 | ||
17 | static inline cpumask_t target_cpus(void) | 17 | static inline const cpumask_t *target_cpus(void) |
18 | { | 18 | { |
19 | /* CPU_MASK_ALL (0xff) has undefined behaviour with | 19 | /* CPU_MASK_ALL (0xff) has undefined behaviour with |
20 | * dest_LowestPrio mode logical clustered apic interrupt routing | 20 | * dest_LowestPrio mode logical clustered apic interrupt routing |
21 | * Just start on cpu 0. IRQ balancing will spread load | 21 | * Just start on cpu 0. IRQ balancing will spread load |
22 | */ | 22 | */ |
23 | return cpumask_of_cpu(0); | 23 | return &cpumask_of_cpu(0); |
24 | } | 24 | } |
25 | 25 | ||
26 | #define INT_DELIVERY_MODE (dest_LowestPrio) | 26 | #define INT_DELIVERY_MODE (dest_LowestPrio) |
@@ -137,14 +137,14 @@ static inline void enable_apic_mode(void) | |||
137 | { | 137 | { |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 140 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
141 | { | 141 | { |
142 | int num_bits_set; | 142 | int num_bits_set; |
143 | int cpus_found = 0; | 143 | int cpus_found = 0; |
144 | int cpu; | 144 | int cpu; |
145 | int apicid; | 145 | int apicid; |
146 | 146 | ||
147 | num_bits_set = cpus_weight(cpumask); | 147 | num_bits_set = cpus_weight(*cpumask); |
148 | /* Return id to all */ | 148 | /* Return id to all */ |
149 | if (num_bits_set == NR_CPUS) | 149 | if (num_bits_set == NR_CPUS) |
150 | return (int) 0xFF; | 150 | return (int) 0xFF; |
@@ -152,10 +152,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
152 | * The cpus in the mask must all be on the apic cluster. If are not | 152 | * The cpus in the mask must all be on the apic cluster. If are not |
153 | * on the same apicid cluster return default value of TARGET_CPUS. | 153 | * on the same apicid cluster return default value of TARGET_CPUS. |
154 | */ | 154 | */ |
155 | cpu = first_cpu(cpumask); | 155 | cpu = first_cpu(*cpumask); |
156 | apicid = cpu_to_logical_apicid(cpu); | 156 | apicid = cpu_to_logical_apicid(cpu); |
157 | while (cpus_found < num_bits_set) { | 157 | while (cpus_found < num_bits_set) { |
158 | if (cpu_isset(cpu, cpumask)) { | 158 | if (cpu_isset(cpu, *cpumask)) { |
159 | int new_apicid = cpu_to_logical_apicid(cpu); | 159 | int new_apicid = cpu_to_logical_apicid(cpu); |
160 | if (apicid_cluster(apicid) != | 160 | if (apicid_cluster(apicid) != |
161 | apicid_cluster(new_apicid)){ | 161 | apicid_cluster(new_apicid)){ |
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h index 53bd1e7bd7b4..a8a2c24f50cc 100644 --- a/arch/x86/include/asm/summit/ipi.h +++ b/arch/x86/include/asm/summit/ipi.h | |||
@@ -1,9 +1,10 @@ | |||
1 | #ifndef __ASM_SUMMIT_IPI_H | 1 | #ifndef __ASM_SUMMIT_IPI_H |
2 | #define __ASM_SUMMIT_IPI_H | 2 | #define __ASM_SUMMIT_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(const cpumask_t *mask, int vector); |
5 | void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const cpumask_t *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) | |||
14 | cpu_clear(smp_processor_id(), mask); | 15 | cpu_clear(smp_processor_id(), mask); |
15 | 16 | ||
16 | if (!cpus_empty(mask)) | 17 | if (!cpus_empty(mask)) |
17 | send_IPI_mask(mask, vector); | 18 | send_IPI_mask(&mask, vector); |
18 | } | 19 | } |
19 | 20 | ||
20 | static inline void send_IPI_all(int vector) | 21 | static inline void send_IPI_all(int vector) |
21 | { | 22 | { |
22 | send_IPI_mask(cpu_online_map, vector); | 23 | send_IPI_mask(&cpu_online_map, vector); |
23 | } | 24 | } |
24 | 25 | ||
25 | #endif /* __ASM_SUMMIT_IPI_H */ | 26 | #endif /* __ASM_SUMMIT_IPI_H */ |
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index b2cef49f3085..a375791c08ca 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
@@ -141,7 +141,7 @@ static int lapic_next_event(unsigned long delta, | |||
141 | struct clock_event_device *evt); | 141 | struct clock_event_device *evt); |
142 | static void lapic_timer_setup(enum clock_event_mode mode, | 142 | static void lapic_timer_setup(enum clock_event_mode mode, |
143 | struct clock_event_device *evt); | 143 | struct clock_event_device *evt); |
144 | static void lapic_timer_broadcast(const struct cpumask *mask); | 144 | static void lapic_timer_broadcast(const cpumask_t *mask); |
145 | static void apic_pm_activate(void); | 145 | static void apic_pm_activate(void); |
146 | 146 | ||
147 | /* | 147 | /* |
@@ -453,10 +453,10 @@ static void lapic_timer_setup(enum clock_event_mode mode, | |||
453 | /* | 453 | /* |
454 | * Local APIC timer broadcast function | 454 | * Local APIC timer broadcast function |
455 | */ | 455 | */ |
456 | static void lapic_timer_broadcast(const struct cpumask *mask) | 456 | static void lapic_timer_broadcast(const cpumask_t *mask) |
457 | { | 457 | { |
458 | #ifdef CONFIG_SMP | 458 | #ifdef CONFIG_SMP |
459 | send_IPI_mask(*mask, LOCAL_TIMER_VECTOR); | 459 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); |
460 | #endif | 460 | #endif |
461 | } | 461 | } |
462 | 462 | ||
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 268553817909..81e01f7b1d12 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -77,10 +77,7 @@ static int crash_nmi_callback(struct notifier_block *self, | |||
77 | 77 | ||
78 | static void smp_send_nmi_allbutself(void) | 78 | static void smp_send_nmi_allbutself(void) |
79 | { | 79 | { |
80 | cpumask_t mask = cpu_online_map; | 80 | send_IPI_allbutself(NMI_VECTOR); |
81 | cpu_clear(safe_smp_processor_id(), mask); | ||
82 | if (!cpus_empty(mask)) | ||
83 | send_IPI_mask(mask, NMI_VECTOR); | ||
84 | } | 81 | } |
85 | 82 | ||
86 | static struct notifier_block crash_nmi_nb = { | 83 | static struct notifier_block crash_nmi_nb = { |
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index c0262791bda4..50eebd0328fe 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c | |||
@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
30 | return 1; | 30 | return 1; |
31 | } | 31 | } |
32 | 32 | ||
33 | static cpumask_t flat_target_cpus(void) | 33 | static const cpumask_t *flat_target_cpus(void) |
34 | { | 34 | { |
35 | return cpu_online_map; | 35 | return &cpu_online_map; |
36 | } | 36 | } |
37 | 37 | ||
38 | static cpumask_t flat_vector_allocation_domain(int cpu) | 38 | static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask) |
39 | { | 39 | { |
40 | /* Careful. Some cpus do not strictly honor the set of cpus | 40 | /* Careful. Some cpus do not strictly honor the set of cpus |
41 | * specified in the interrupt destination when using lowest | 41 | * specified in the interrupt destination when using lowest |
@@ -45,8 +45,7 @@ static cpumask_t flat_vector_allocation_domain(int cpu) | |||
45 | * deliver interrupts to the wrong hyperthread when only one | 45 | * deliver interrupts to the wrong hyperthread when only one |
46 | * hyperthread was specified in the interrupt desitination. | 46 | * hyperthread was specified in the interrupt desitination. |
47 | */ | 47 | */ |
48 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 48 | *retmask = (cpumask_t) { {[0] = APIC_ALL_CPUS, } }; |
49 | return domain; | ||
50 | } | 49 | } |
51 | 50 | ||
52 | /* | 51 | /* |
@@ -69,9 +68,8 @@ static void flat_init_apic_ldr(void) | |||
69 | apic_write(APIC_LDR, val); | 68 | apic_write(APIC_LDR, val); |
70 | } | 69 | } |
71 | 70 | ||
72 | static void flat_send_IPI_mask(cpumask_t cpumask, int vector) | 71 | static inline void _flat_send_IPI_mask(unsigned long mask, int vector) |
73 | { | 72 | { |
74 | unsigned long mask = cpus_addr(cpumask)[0]; | ||
75 | unsigned long flags; | 73 | unsigned long flags; |
76 | 74 | ||
77 | local_irq_save(flags); | 75 | local_irq_save(flags); |
@@ -79,20 +77,40 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector) | |||
79 | local_irq_restore(flags); | 77 | local_irq_restore(flags); |
80 | } | 78 | } |
81 | 79 | ||
80 | static void flat_send_IPI_mask(const cpumask_t *cpumask, int vector) | ||
81 | { | ||
82 | unsigned long mask = cpus_addr(*cpumask)[0]; | ||
83 | |||
84 | _flat_send_IPI_mask(mask, vector); | ||
85 | } | ||
86 | |||
87 | static void flat_send_IPI_mask_allbutself(const cpumask_t *cpumask, int vector) | ||
88 | { | ||
89 | unsigned long mask = cpus_addr(*cpumask)[0]; | ||
90 | int cpu = smp_processor_id(); | ||
91 | |||
92 | if (cpu < BITS_PER_LONG) | ||
93 | clear_bit(cpu, &mask); | ||
94 | _flat_send_IPI_mask(mask, vector); | ||
95 | } | ||
96 | |||
82 | static void flat_send_IPI_allbutself(int vector) | 97 | static void flat_send_IPI_allbutself(int vector) |
83 | { | 98 | { |
99 | int cpu = smp_processor_id(); | ||
84 | #ifdef CONFIG_HOTPLUG_CPU | 100 | #ifdef CONFIG_HOTPLUG_CPU |
85 | int hotplug = 1; | 101 | int hotplug = 1; |
86 | #else | 102 | #else |
87 | int hotplug = 0; | 103 | int hotplug = 0; |
88 | #endif | 104 | #endif |
89 | if (hotplug || vector == NMI_VECTOR) { | 105 | if (hotplug || vector == NMI_VECTOR) { |
90 | cpumask_t allbutme = cpu_online_map; | 106 | if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) { |
107 | unsigned long mask = cpus_addr(cpu_online_map)[0]; | ||
91 | 108 | ||
92 | cpu_clear(smp_processor_id(), allbutme); | 109 | if (cpu < BITS_PER_LONG) |
110 | clear_bit(cpu, &mask); | ||
93 | 111 | ||
94 | if (!cpus_empty(allbutme)) | 112 | _flat_send_IPI_mask(mask, vector); |
95 | flat_send_IPI_mask(allbutme, vector); | 113 | } |
96 | } else if (num_online_cpus() > 1) { | 114 | } else if (num_online_cpus() > 1) { |
97 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); | 115 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); |
98 | } | 116 | } |
@@ -101,7 +119,7 @@ static void flat_send_IPI_allbutself(int vector) | |||
101 | static void flat_send_IPI_all(int vector) | 119 | static void flat_send_IPI_all(int vector) |
102 | { | 120 | { |
103 | if (vector == NMI_VECTOR) | 121 | if (vector == NMI_VECTOR) |
104 | flat_send_IPI_mask(cpu_online_map, vector); | 122 | flat_send_IPI_mask(&cpu_online_map, vector); |
105 | else | 123 | else |
106 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); | 124 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); |
107 | } | 125 | } |
@@ -135,9 +153,9 @@ static int flat_apic_id_registered(void) | |||
135 | return physid_isset(read_xapic_id(), phys_cpu_present_map); | 153 | return physid_isset(read_xapic_id(), phys_cpu_present_map); |
136 | } | 154 | } |
137 | 155 | ||
138 | static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) | 156 | static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask) |
139 | { | 157 | { |
140 | return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; | 158 | return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; |
141 | } | 159 | } |
142 | 160 | ||
143 | static unsigned int phys_pkg_id(int index_msb) | 161 | static unsigned int phys_pkg_id(int index_msb) |
@@ -157,6 +175,7 @@ struct genapic apic_flat = { | |||
157 | .send_IPI_all = flat_send_IPI_all, | 175 | .send_IPI_all = flat_send_IPI_all, |
158 | .send_IPI_allbutself = flat_send_IPI_allbutself, | 176 | .send_IPI_allbutself = flat_send_IPI_allbutself, |
159 | .send_IPI_mask = flat_send_IPI_mask, | 177 | .send_IPI_mask = flat_send_IPI_mask, |
178 | .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, | ||
160 | .send_IPI_self = apic_send_IPI_self, | 179 | .send_IPI_self = apic_send_IPI_self, |
161 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, | 180 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, |
162 | .phys_pkg_id = phys_pkg_id, | 181 | .phys_pkg_id = phys_pkg_id, |
@@ -188,35 +207,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
188 | return 0; | 207 | return 0; |
189 | } | 208 | } |
190 | 209 | ||
191 | static cpumask_t physflat_target_cpus(void) | 210 | static const cpumask_t *physflat_target_cpus(void) |
192 | { | 211 | { |
193 | return cpu_online_map; | 212 | return &cpu_online_map; |
194 | } | 213 | } |
195 | 214 | ||
196 | static cpumask_t physflat_vector_allocation_domain(int cpu) | 215 | static void physflat_vector_allocation_domain(int cpu, cpumask_t *retmask) |
197 | { | 216 | { |
198 | return cpumask_of_cpu(cpu); | 217 | cpus_clear(*retmask); |
218 | cpu_set(cpu, *retmask); | ||
199 | } | 219 | } |
200 | 220 | ||
201 | static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) | 221 | static void physflat_send_IPI_mask(const cpumask_t *cpumask, int vector) |
202 | { | 222 | { |
203 | send_IPI_mask_sequence(cpumask, vector); | 223 | send_IPI_mask_sequence(cpumask, vector); |
204 | } | 224 | } |
205 | 225 | ||
206 | static void physflat_send_IPI_allbutself(int vector) | 226 | static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask, |
227 | int vector) | ||
207 | { | 228 | { |
208 | cpumask_t allbutme = cpu_online_map; | 229 | send_IPI_mask_allbutself(cpumask, vector); |
230 | } | ||
209 | 231 | ||
210 | cpu_clear(smp_processor_id(), allbutme); | 232 | static void physflat_send_IPI_allbutself(int vector) |
211 | physflat_send_IPI_mask(allbutme, vector); | 233 | { |
234 | send_IPI_mask_allbutself(&cpu_online_map, vector); | ||
212 | } | 235 | } |
213 | 236 | ||
214 | static void physflat_send_IPI_all(int vector) | 237 | static void physflat_send_IPI_all(int vector) |
215 | { | 238 | { |
216 | physflat_send_IPI_mask(cpu_online_map, vector); | 239 | physflat_send_IPI_mask(&cpu_online_map, vector); |
217 | } | 240 | } |
218 | 241 | ||
219 | static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | 242 | static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask) |
220 | { | 243 | { |
221 | int cpu; | 244 | int cpu; |
222 | 245 | ||
@@ -224,7 +247,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | |||
224 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 247 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
225 | * May as well be the first. | 248 | * May as well be the first. |
226 | */ | 249 | */ |
227 | cpu = first_cpu(cpumask); | 250 | cpu = first_cpu(*cpumask); |
228 | if ((unsigned)cpu < nr_cpu_ids) | 251 | if ((unsigned)cpu < nr_cpu_ids) |
229 | return per_cpu(x86_cpu_to_apicid, cpu); | 252 | return per_cpu(x86_cpu_to_apicid, cpu); |
230 | else | 253 | else |
@@ -243,6 +266,7 @@ struct genapic apic_physflat = { | |||
243 | .send_IPI_all = physflat_send_IPI_all, | 266 | .send_IPI_all = physflat_send_IPI_all, |
244 | .send_IPI_allbutself = physflat_send_IPI_allbutself, | 267 | .send_IPI_allbutself = physflat_send_IPI_allbutself, |
245 | .send_IPI_mask = physflat_send_IPI_mask, | 268 | .send_IPI_mask = physflat_send_IPI_mask, |
269 | .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, | ||
246 | .send_IPI_self = apic_send_IPI_self, | 270 | .send_IPI_self = apic_send_IPI_self, |
247 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, | 271 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, |
248 | .phys_pkg_id = phys_pkg_id, | 272 | .phys_pkg_id = phys_pkg_id, |
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index f6a2c8eb48a6..f5fa9a91ad38 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c | |||
@@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
22 | 22 | ||
23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
24 | 24 | ||
25 | static cpumask_t x2apic_target_cpus(void) | 25 | static const cpumask_t *x2apic_target_cpus(void) |
26 | { | 26 | { |
27 | return cpumask_of_cpu(0); | 27 | return &cpumask_of_cpu(0); |
28 | } | 28 | } |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * for now each logical cpu is in its own vector allocation domain. | 31 | * for now each logical cpu is in its own vector allocation domain. |
32 | */ | 32 | */ |
33 | static cpumask_t x2apic_vector_allocation_domain(int cpu) | 33 | static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) |
34 | { | 34 | { |
35 | cpumask_t domain = CPU_MASK_NONE; | 35 | cpus_clear(*retmask); |
36 | cpu_set(cpu, domain); | 36 | cpu_set(cpu, *retmask); |
37 | return domain; | ||
38 | } | 37 | } |
39 | 38 | ||
40 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | 39 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, |
@@ -56,32 +55,52 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | |||
56 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register | 55 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register |
57 | * writes. | 56 | * writes. |
58 | */ | 57 | */ |
59 | static void x2apic_send_IPI_mask(cpumask_t mask, int vector) | 58 | static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) |
60 | { | 59 | { |
61 | unsigned long flags; | 60 | unsigned long flags; |
62 | unsigned long query_cpu; | 61 | unsigned long query_cpu; |
63 | 62 | ||
64 | local_irq_save(flags); | 63 | local_irq_save(flags); |
65 | for_each_cpu_mask(query_cpu, mask) { | 64 | for_each_cpu_mask_nr(query_cpu, *mask) |
66 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), | 65 | __x2apic_send_IPI_dest( |
67 | vector, APIC_DEST_LOGICAL); | 66 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
68 | } | 67 | vector, APIC_DEST_LOGICAL); |
69 | local_irq_restore(flags); | 68 | local_irq_restore(flags); |
70 | } | 69 | } |
71 | 70 | ||
72 | static void x2apic_send_IPI_allbutself(int vector) | 71 | static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) |
73 | { | 72 | { |
74 | cpumask_t mask = cpu_online_map; | 73 | unsigned long flags; |
74 | unsigned long query_cpu; | ||
75 | unsigned long this_cpu = smp_processor_id(); | ||
75 | 76 | ||
76 | cpu_clear(smp_processor_id(), mask); | 77 | local_irq_save(flags); |
78 | for_each_cpu_mask_nr(query_cpu, *mask) | ||
79 | if (query_cpu != this_cpu) | ||
80 | __x2apic_send_IPI_dest( | ||
81 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
82 | vector, APIC_DEST_LOGICAL); | ||
83 | local_irq_restore(flags); | ||
84 | } | ||
77 | 85 | ||
78 | if (!cpus_empty(mask)) | 86 | static void x2apic_send_IPI_allbutself(int vector) |
79 | x2apic_send_IPI_mask(mask, vector); | 87 | { |
88 | unsigned long flags; | ||
89 | unsigned long query_cpu; | ||
90 | unsigned long this_cpu = smp_processor_id(); | ||
91 | |||
92 | local_irq_save(flags); | ||
93 | for_each_online_cpu(query_cpu) | ||
94 | if (query_cpu != this_cpu) | ||
95 | __x2apic_send_IPI_dest( | ||
96 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
97 | vector, APIC_DEST_LOGICAL); | ||
98 | local_irq_restore(flags); | ||
80 | } | 99 | } |
81 | 100 | ||
82 | static void x2apic_send_IPI_all(int vector) | 101 | static void x2apic_send_IPI_all(int vector) |
83 | { | 102 | { |
84 | x2apic_send_IPI_mask(cpu_online_map, vector); | 103 | x2apic_send_IPI_mask(&cpu_online_map, vector); |
85 | } | 104 | } |
86 | 105 | ||
87 | static int x2apic_apic_id_registered(void) | 106 | static int x2apic_apic_id_registered(void) |
@@ -89,7 +108,7 @@ static int x2apic_apic_id_registered(void) | |||
89 | return 1; | 108 | return 1; |
90 | } | 109 | } |
91 | 110 | ||
92 | static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | 111 | static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) |
93 | { | 112 | { |
94 | int cpu; | 113 | int cpu; |
95 | 114 | ||
@@ -97,8 +116,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | |||
97 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 116 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
98 | * May as well be the first. | 117 | * May as well be the first. |
99 | */ | 118 | */ |
100 | cpu = first_cpu(cpumask); | 119 | cpu = first_cpu(*cpumask); |
101 | if ((unsigned)cpu < NR_CPUS) | 120 | if ((unsigned)cpu < nr_cpu_ids) |
102 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | 121 | return per_cpu(x86_cpu_to_logical_apicid, cpu); |
103 | else | 122 | else |
104 | return BAD_APICID; | 123 | return BAD_APICID; |
@@ -150,6 +169,7 @@ struct genapic apic_x2apic_cluster = { | |||
150 | .send_IPI_all = x2apic_send_IPI_all, | 169 | .send_IPI_all = x2apic_send_IPI_all, |
151 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | 170 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, |
152 | .send_IPI_mask = x2apic_send_IPI_mask, | 171 | .send_IPI_mask = x2apic_send_IPI_mask, |
172 | .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, | ||
153 | .send_IPI_self = x2apic_send_IPI_self, | 173 | .send_IPI_self = x2apic_send_IPI_self, |
154 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | 174 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, |
155 | .phys_pkg_id = phys_pkg_id, | 175 | .phys_pkg_id = phys_pkg_id, |
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index d042211768b7..41c27b2f3d01 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c | |||
@@ -29,16 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
29 | 29 | ||
30 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 30 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
31 | 31 | ||
32 | static cpumask_t x2apic_target_cpus(void) | 32 | static const cpumask_t *x2apic_target_cpus(void) |
33 | { | 33 | { |
34 | return cpumask_of_cpu(0); | 34 | return &cpumask_of_cpu(0); |
35 | } | 35 | } |
36 | 36 | ||
37 | static cpumask_t x2apic_vector_allocation_domain(int cpu) | 37 | static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) |
38 | { | 38 | { |
39 | cpumask_t domain = CPU_MASK_NONE; | 39 | cpus_clear(*retmask); |
40 | cpu_set(cpu, domain); | 40 | cpu_set(cpu, *retmask); |
41 | return domain; | ||
42 | } | 41 | } |
43 | 42 | ||
44 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | 43 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, |
@@ -54,32 +53,53 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | |||
54 | x2apic_icr_write(cfg, apicid); | 53 | x2apic_icr_write(cfg, apicid); |
55 | } | 54 | } |
56 | 55 | ||
57 | static void x2apic_send_IPI_mask(cpumask_t mask, int vector) | 56 | static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) |
58 | { | 57 | { |
59 | unsigned long flags; | 58 | unsigned long flags; |
60 | unsigned long query_cpu; | 59 | unsigned long query_cpu; |
61 | 60 | ||
62 | local_irq_save(flags); | 61 | local_irq_save(flags); |
63 | for_each_cpu_mask(query_cpu, mask) { | 62 | for_each_cpu_mask_nr(query_cpu, *mask) { |
64 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), | 63 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), |
65 | vector, APIC_DEST_PHYSICAL); | 64 | vector, APIC_DEST_PHYSICAL); |
66 | } | 65 | } |
67 | local_irq_restore(flags); | 66 | local_irq_restore(flags); |
68 | } | 67 | } |
69 | 68 | ||
70 | static void x2apic_send_IPI_allbutself(int vector) | 69 | static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) |
71 | { | 70 | { |
72 | cpumask_t mask = cpu_online_map; | 71 | unsigned long flags; |
72 | unsigned long query_cpu; | ||
73 | unsigned long this_cpu = smp_processor_id(); | ||
74 | |||
75 | local_irq_save(flags); | ||
76 | for_each_cpu_mask_nr(query_cpu, *mask) { | ||
77 | if (query_cpu != this_cpu) | ||
78 | __x2apic_send_IPI_dest( | ||
79 | per_cpu(x86_cpu_to_apicid, query_cpu), | ||
80 | vector, APIC_DEST_PHYSICAL); | ||
81 | } | ||
82 | local_irq_restore(flags); | ||
83 | } | ||
73 | 84 | ||
74 | cpu_clear(smp_processor_id(), mask); | 85 | static void x2apic_send_IPI_allbutself(int vector) |
86 | { | ||
87 | unsigned long flags; | ||
88 | unsigned long query_cpu; | ||
89 | unsigned long this_cpu = smp_processor_id(); | ||
75 | 90 | ||
76 | if (!cpus_empty(mask)) | 91 | local_irq_save(flags); |
77 | x2apic_send_IPI_mask(mask, vector); | 92 | for_each_online_cpu(query_cpu) |
93 | if (query_cpu != this_cpu) | ||
94 | __x2apic_send_IPI_dest( | ||
95 | per_cpu(x86_cpu_to_apicid, query_cpu), | ||
96 | vector, APIC_DEST_PHYSICAL); | ||
97 | local_irq_restore(flags); | ||
78 | } | 98 | } |
79 | 99 | ||
80 | static void x2apic_send_IPI_all(int vector) | 100 | static void x2apic_send_IPI_all(int vector) |
81 | { | 101 | { |
82 | x2apic_send_IPI_mask(cpu_online_map, vector); | 102 | x2apic_send_IPI_mask(&cpu_online_map, vector); |
83 | } | 103 | } |
84 | 104 | ||
85 | static int x2apic_apic_id_registered(void) | 105 | static int x2apic_apic_id_registered(void) |
@@ -87,7 +107,7 @@ static int x2apic_apic_id_registered(void) | |||
87 | return 1; | 107 | return 1; |
88 | } | 108 | } |
89 | 109 | ||
90 | static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | 110 | static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) |
91 | { | 111 | { |
92 | int cpu; | 112 | int cpu; |
93 | 113 | ||
@@ -95,8 +115,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | |||
95 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 115 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
96 | * May as well be the first. | 116 | * May as well be the first. |
97 | */ | 117 | */ |
98 | cpu = first_cpu(cpumask); | 118 | cpu = first_cpu(*cpumask); |
99 | if ((unsigned)cpu < NR_CPUS) | 119 | if ((unsigned)cpu < nr_cpu_ids) |
100 | return per_cpu(x86_cpu_to_apicid, cpu); | 120 | return per_cpu(x86_cpu_to_apicid, cpu); |
101 | else | 121 | else |
102 | return BAD_APICID; | 122 | return BAD_APICID; |
@@ -145,6 +165,7 @@ struct genapic apic_x2apic_phys = { | |||
145 | .send_IPI_all = x2apic_send_IPI_all, | 165 | .send_IPI_all = x2apic_send_IPI_all, |
146 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | 166 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, |
147 | .send_IPI_mask = x2apic_send_IPI_mask, | 167 | .send_IPI_mask = x2apic_send_IPI_mask, |
168 | .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, | ||
148 | .send_IPI_self = x2apic_send_IPI_self, | 169 | .send_IPI_self = x2apic_send_IPI_self, |
149 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | 170 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, |
150 | .phys_pkg_id = phys_pkg_id, | 171 | .phys_pkg_id = phys_pkg_id, |
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 2c7dbdb98278..010659415ae4 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -75,16 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); | |||
75 | 75 | ||
76 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 76 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
77 | 77 | ||
78 | static cpumask_t uv_target_cpus(void) | 78 | static const cpumask_t *uv_target_cpus(void) |
79 | { | 79 | { |
80 | return cpumask_of_cpu(0); | 80 | return &cpumask_of_cpu(0); |
81 | } | 81 | } |
82 | 82 | ||
83 | static cpumask_t uv_vector_allocation_domain(int cpu) | 83 | static void uv_vector_allocation_domain(int cpu, cpumask_t *retmask) |
84 | { | 84 | { |
85 | cpumask_t domain = CPU_MASK_NONE; | 85 | cpus_clear(*retmask); |
86 | cpu_set(cpu, domain); | 86 | cpu_set(cpu, *retmask); |
87 | return domain; | ||
88 | } | 87 | } |
89 | 88 | ||
90 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) | 89 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) |
@@ -123,28 +122,37 @@ static void uv_send_IPI_one(int cpu, int vector) | |||
123 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 122 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
124 | } | 123 | } |
125 | 124 | ||
126 | static void uv_send_IPI_mask(cpumask_t mask, int vector) | 125 | static void uv_send_IPI_mask(const cpumask_t *mask, int vector) |
127 | { | 126 | { |
128 | unsigned int cpu; | 127 | unsigned int cpu; |
129 | 128 | ||
130 | for_each_possible_cpu(cpu) | 129 | for_each_cpu_mask_nr(cpu, *mask) |
131 | if (cpu_isset(cpu, mask)) | 130 | uv_send_IPI_one(cpu, vector); |
131 | } | ||
132 | |||
133 | static void uv_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) | ||
134 | { | ||
135 | unsigned int cpu; | ||
136 | unsigned int this_cpu = smp_processor_id(); | ||
137 | |||
138 | for_each_cpu_mask_nr(cpu, *mask) | ||
139 | if (cpu != this_cpu) | ||
132 | uv_send_IPI_one(cpu, vector); | 140 | uv_send_IPI_one(cpu, vector); |
133 | } | 141 | } |
134 | 142 | ||
135 | static void uv_send_IPI_allbutself(int vector) | 143 | static void uv_send_IPI_allbutself(int vector) |
136 | { | 144 | { |
137 | cpumask_t mask = cpu_online_map; | 145 | unsigned int cpu; |
138 | 146 | unsigned int this_cpu = smp_processor_id(); | |
139 | cpu_clear(smp_processor_id(), mask); | ||
140 | 147 | ||
141 | if (!cpus_empty(mask)) | 148 | for_each_online_cpu(cpu) |
142 | uv_send_IPI_mask(mask, vector); | 149 | if (cpu != this_cpu) |
150 | uv_send_IPI_one(cpu, vector); | ||
143 | } | 151 | } |
144 | 152 | ||
145 | static void uv_send_IPI_all(int vector) | 153 | static void uv_send_IPI_all(int vector) |
146 | { | 154 | { |
147 | uv_send_IPI_mask(cpu_online_map, vector); | 155 | uv_send_IPI_mask(&cpu_online_map, vector); |
148 | } | 156 | } |
149 | 157 | ||
150 | static int uv_apic_id_registered(void) | 158 | static int uv_apic_id_registered(void) |
@@ -156,7 +164,7 @@ static void uv_init_apic_ldr(void) | |||
156 | { | 164 | { |
157 | } | 165 | } |
158 | 166 | ||
159 | static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | 167 | static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask) |
160 | { | 168 | { |
161 | int cpu; | 169 | int cpu; |
162 | 170 | ||
@@ -164,7 +172,7 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | |||
164 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 172 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
165 | * May as well be the first. | 173 | * May as well be the first. |
166 | */ | 174 | */ |
167 | cpu = first_cpu(cpumask); | 175 | cpu = first_cpu(*cpumask); |
168 | if ((unsigned)cpu < nr_cpu_ids) | 176 | if ((unsigned)cpu < nr_cpu_ids) |
169 | return per_cpu(x86_cpu_to_apicid, cpu); | 177 | return per_cpu(x86_cpu_to_apicid, cpu); |
170 | else | 178 | else |
@@ -218,6 +226,7 @@ struct genapic apic_x2apic_uv_x = { | |||
218 | .send_IPI_all = uv_send_IPI_all, | 226 | .send_IPI_all = uv_send_IPI_all, |
219 | .send_IPI_allbutself = uv_send_IPI_allbutself, | 227 | .send_IPI_allbutself = uv_send_IPI_allbutself, |
220 | .send_IPI_mask = uv_send_IPI_mask, | 228 | .send_IPI_mask = uv_send_IPI_mask, |
229 | .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, | ||
221 | .send_IPI_self = uv_send_IPI_self, | 230 | .send_IPI_self = uv_send_IPI_self, |
222 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, | 231 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, |
223 | .phys_pkg_id = phys_pkg_id, | 232 | .phys_pkg_id = phys_pkg_id, |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 3d7d0d55253f..7f23ce7f5518 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -231,7 +231,8 @@ static struct irq_cfg *irq_cfg(unsigned int irq) | |||
231 | 231 | ||
232 | #endif | 232 | #endif |
233 | 233 | ||
234 | static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) | 234 | static inline void |
235 | set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
235 | { | 236 | { |
236 | } | 237 | } |
237 | 238 | ||
@@ -396,7 +397,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
396 | } | 397 | } |
397 | } | 398 | } |
398 | 399 | ||
399 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); | 400 | static int |
401 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); | ||
400 | 402 | ||
401 | static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, | 403 | static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, |
402 | const struct cpumask *mask) | 404 | const struct cpumask *mask) |
@@ -412,13 +414,13 @@ static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, | |||
412 | 414 | ||
413 | irq = desc->irq; | 415 | irq = desc->irq; |
414 | cfg = desc->chip_data; | 416 | cfg = desc->chip_data; |
415 | if (assign_irq_vector(irq, cfg, *mask)) | 417 | if (assign_irq_vector(irq, cfg, mask)) |
416 | return; | 418 | return; |
417 | 419 | ||
418 | set_extra_move_desc(desc, *mask); | 420 | set_extra_move_desc(desc, mask); |
419 | 421 | ||
420 | cpumask_and(&tmp, &cfg->domain, mask); | 422 | cpumask_and(&tmp, &cfg->domain, mask); |
421 | dest = cpu_mask_to_apicid(tmp); | 423 | dest = cpu_mask_to_apicid(&tmp); |
422 | /* | 424 | /* |
423 | * Only the high 8 bits are valid. | 425 | * Only the high 8 bits are valid. |
424 | */ | 426 | */ |
@@ -1099,7 +1101,8 @@ void unlock_vector_lock(void) | |||
1099 | spin_unlock(&vector_lock); | 1101 | spin_unlock(&vector_lock); |
1100 | } | 1102 | } |
1101 | 1103 | ||
1102 | static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | 1104 | static int |
1105 | __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
1103 | { | 1106 | { |
1104 | /* | 1107 | /* |
1105 | * NOTE! The local APIC isn't very good at handling | 1108 | * NOTE! The local APIC isn't very good at handling |
@@ -1115,35 +1118,32 @@ static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | |||
1115 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; | 1118 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; |
1116 | unsigned int old_vector; | 1119 | unsigned int old_vector; |
1117 | int cpu; | 1120 | int cpu; |
1121 | cpumask_t tmp_mask; | ||
1118 | 1122 | ||
1119 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) | 1123 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) |
1120 | return -EBUSY; | 1124 | return -EBUSY; |
1121 | 1125 | ||
1122 | /* Only try and allocate irqs on cpus that are present */ | ||
1123 | cpus_and(mask, mask, cpu_online_map); | ||
1124 | |||
1125 | old_vector = cfg->vector; | 1126 | old_vector = cfg->vector; |
1126 | if (old_vector) { | 1127 | if (old_vector) { |
1127 | cpumask_t tmp; | 1128 | cpus_and(tmp_mask, *mask, cpu_online_map); |
1128 | cpus_and(tmp, cfg->domain, mask); | 1129 | cpus_and(tmp_mask, cfg->domain, tmp_mask); |
1129 | if (!cpus_empty(tmp)) | 1130 | if (!cpus_empty(tmp_mask)) |
1130 | return 0; | 1131 | return 0; |
1131 | } | 1132 | } |
1132 | 1133 | ||
1133 | for_each_cpu_mask_nr(cpu, mask) { | 1134 | /* Only try and allocate irqs on cpus that are present */ |
1134 | cpumask_t domain, new_mask; | 1135 | for_each_cpu_and(cpu, mask, &cpu_online_map) { |
1135 | int new_cpu; | 1136 | int new_cpu; |
1136 | int vector, offset; | 1137 | int vector, offset; |
1137 | 1138 | ||
1138 | domain = vector_allocation_domain(cpu); | 1139 | vector_allocation_domain(cpu, &tmp_mask); |
1139 | cpus_and(new_mask, domain, cpu_online_map); | ||
1140 | 1140 | ||
1141 | vector = current_vector; | 1141 | vector = current_vector; |
1142 | offset = current_offset; | 1142 | offset = current_offset; |
1143 | next: | 1143 | next: |
1144 | vector += 8; | 1144 | vector += 8; |
1145 | if (vector >= first_system_vector) { | 1145 | if (vector >= first_system_vector) { |
1146 | /* If we run out of vectors on large boxen, must share them. */ | 1146 | /* If out of vectors on large boxen, must share them. */ |
1147 | offset = (offset + 1) % 8; | 1147 | offset = (offset + 1) % 8; |
1148 | vector = FIRST_DEVICE_VECTOR + offset; | 1148 | vector = FIRST_DEVICE_VECTOR + offset; |
1149 | } | 1149 | } |
@@ -1156,7 +1156,7 @@ next: | |||
1156 | if (vector == SYSCALL_VECTOR) | 1156 | if (vector == SYSCALL_VECTOR) |
1157 | goto next; | 1157 | goto next; |
1158 | #endif | 1158 | #endif |
1159 | for_each_cpu_mask_nr(new_cpu, new_mask) | 1159 | for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map) |
1160 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 1160 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
1161 | goto next; | 1161 | goto next; |
1162 | /* Found one! */ | 1162 | /* Found one! */ |
@@ -1166,16 +1166,17 @@ next: | |||
1166 | cfg->move_in_progress = 1; | 1166 | cfg->move_in_progress = 1; |
1167 | cfg->old_domain = cfg->domain; | 1167 | cfg->old_domain = cfg->domain; |
1168 | } | 1168 | } |
1169 | for_each_cpu_mask_nr(new_cpu, new_mask) | 1169 | for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map) |
1170 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 1170 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
1171 | cfg->vector = vector; | 1171 | cfg->vector = vector; |
1172 | cfg->domain = domain; | 1172 | cfg->domain = tmp_mask; |
1173 | return 0; | 1173 | return 0; |
1174 | } | 1174 | } |
1175 | return -ENOSPC; | 1175 | return -ENOSPC; |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | 1178 | static int |
1179 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
1179 | { | 1180 | { |
1180 | int err; | 1181 | int err; |
1181 | unsigned long flags; | 1182 | unsigned long flags; |
@@ -1384,8 +1385,8 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de | |||
1384 | 1385 | ||
1385 | cfg = desc->chip_data; | 1386 | cfg = desc->chip_data; |
1386 | 1387 | ||
1387 | mask = TARGET_CPUS; | 1388 | mask = *TARGET_CPUS; |
1388 | if (assign_irq_vector(irq, cfg, mask)) | 1389 | if (assign_irq_vector(irq, cfg, &mask)) |
1389 | return; | 1390 | return; |
1390 | 1391 | ||
1391 | cpus_and(mask, cfg->domain, mask); | 1392 | cpus_and(mask, cfg->domain, mask); |
@@ -1398,7 +1399,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de | |||
1398 | 1399 | ||
1399 | 1400 | ||
1400 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, | 1401 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, |
1401 | cpu_mask_to_apicid(mask), trigger, polarity, | 1402 | cpu_mask_to_apicid(&mask), trigger, polarity, |
1402 | cfg->vector)) { | 1403 | cfg->vector)) { |
1403 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", | 1404 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", |
1404 | mp_ioapics[apic].mp_apicid, pin); | 1405 | mp_ioapics[apic].mp_apicid, pin); |
@@ -2121,7 +2122,7 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2121 | unsigned long flags; | 2122 | unsigned long flags; |
2122 | 2123 | ||
2123 | spin_lock_irqsave(&vector_lock, flags); | 2124 | spin_lock_irqsave(&vector_lock, flags); |
2124 | send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); | 2125 | send_IPI_mask(&cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); |
2125 | spin_unlock_irqrestore(&vector_lock, flags); | 2126 | spin_unlock_irqrestore(&vector_lock, flags); |
2126 | 2127 | ||
2127 | return 1; | 2128 | return 1; |
@@ -2170,18 +2171,19 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); | |||
2170 | * as simple as edge triggered migration and we can do the irq migration | 2171 | * as simple as edge triggered migration and we can do the irq migration |
2171 | * with a simple atomic update to IO-APIC RTE. | 2172 | * with a simple atomic update to IO-APIC RTE. |
2172 | */ | 2173 | */ |
2173 | static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | 2174 | static void |
2175 | migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
2174 | { | 2176 | { |
2175 | struct irq_cfg *cfg; | 2177 | struct irq_cfg *cfg; |
2176 | cpumask_t tmp, cleanup_mask; | 2178 | cpumask_t tmpmask; |
2177 | struct irte irte; | 2179 | struct irte irte; |
2178 | int modify_ioapic_rte; | 2180 | int modify_ioapic_rte; |
2179 | unsigned int dest; | 2181 | unsigned int dest; |
2180 | unsigned long flags; | 2182 | unsigned long flags; |
2181 | unsigned int irq; | 2183 | unsigned int irq; |
2182 | 2184 | ||
2183 | cpus_and(tmp, mask, cpu_online_map); | 2185 | cpus_and(tmpmask, *mask, cpu_online_map); |
2184 | if (cpus_empty(tmp)) | 2186 | if (cpus_empty(tmpmask)) |
2185 | return; | 2187 | return; |
2186 | 2188 | ||
2187 | irq = desc->irq; | 2189 | irq = desc->irq; |
@@ -2194,8 +2196,8 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | |||
2194 | 2196 | ||
2195 | set_extra_move_desc(desc, mask); | 2197 | set_extra_move_desc(desc, mask); |
2196 | 2198 | ||
2197 | cpus_and(tmp, cfg->domain, mask); | 2199 | cpus_and(tmpmask, cfg->domain, *mask); |
2198 | dest = cpu_mask_to_apicid(tmp); | 2200 | dest = cpu_mask_to_apicid(&tmpmask); |
2199 | 2201 | ||
2200 | modify_ioapic_rte = desc->status & IRQ_LEVEL; | 2202 | modify_ioapic_rte = desc->status & IRQ_LEVEL; |
2201 | if (modify_ioapic_rte) { | 2203 | if (modify_ioapic_rte) { |
@@ -2213,13 +2215,13 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | |||
2213 | modify_irte(irq, &irte); | 2215 | modify_irte(irq, &irte); |
2214 | 2216 | ||
2215 | if (cfg->move_in_progress) { | 2217 | if (cfg->move_in_progress) { |
2216 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 2218 | cpus_and(tmpmask, cfg->old_domain, cpu_online_map); |
2217 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | 2219 | cfg->move_cleanup_count = cpus_weight(tmpmask); |
2218 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | 2220 | send_IPI_mask(&tmpmask, IRQ_MOVE_CLEANUP_VECTOR); |
2219 | cfg->move_in_progress = 0; | 2221 | cfg->move_in_progress = 0; |
2220 | } | 2222 | } |
2221 | 2223 | ||
2222 | desc->affinity = mask; | 2224 | desc->affinity = *mask; |
2223 | } | 2225 | } |
2224 | 2226 | ||
2225 | static int migrate_irq_remapped_level_desc(struct irq_desc *desc) | 2227 | static int migrate_irq_remapped_level_desc(struct irq_desc *desc) |
@@ -2241,7 +2243,7 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc) | |||
2241 | } | 2243 | } |
2242 | 2244 | ||
2243 | /* everthing is clear. we have right of way */ | 2245 | /* everthing is clear. we have right of way */ |
2244 | migrate_ioapic_irq_desc(desc, desc->pending_mask); | 2246 | migrate_ioapic_irq_desc(desc, &desc->pending_mask); |
2245 | 2247 | ||
2246 | ret = 0; | 2248 | ret = 0; |
2247 | desc->status &= ~IRQ_MOVE_PENDING; | 2249 | desc->status &= ~IRQ_MOVE_PENDING; |
@@ -2292,7 +2294,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, | |||
2292 | return; | 2294 | return; |
2293 | } | 2295 | } |
2294 | 2296 | ||
2295 | migrate_ioapic_irq_desc(desc, *mask); | 2297 | migrate_ioapic_irq_desc(desc, mask); |
2296 | } | 2298 | } |
2297 | static void set_ir_ioapic_affinity_irq(unsigned int irq, | 2299 | static void set_ir_ioapic_affinity_irq(unsigned int irq, |
2298 | const struct cpumask *mask) | 2300 | const struct cpumask *mask) |
@@ -2359,7 +2361,7 @@ static void irq_complete_move(struct irq_desc **descp) | |||
2359 | 2361 | ||
2360 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 2362 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); |
2361 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | 2363 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); |
2362 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | 2364 | send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); |
2363 | cfg->move_in_progress = 0; | 2365 | cfg->move_in_progress = 0; |
2364 | } | 2366 | } |
2365 | } | 2367 | } |
@@ -3089,13 +3091,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
3089 | cpumask_t tmp; | 3091 | cpumask_t tmp; |
3090 | 3092 | ||
3091 | cfg = irq_cfg(irq); | 3093 | cfg = irq_cfg(irq); |
3092 | tmp = TARGET_CPUS; | 3094 | tmp = *TARGET_CPUS; |
3093 | err = assign_irq_vector(irq, cfg, tmp); | 3095 | err = assign_irq_vector(irq, cfg, &tmp); |
3094 | if (err) | 3096 | if (err) |
3095 | return err; | 3097 | return err; |
3096 | 3098 | ||
3097 | cpus_and(tmp, cfg->domain, tmp); | 3099 | cpus_and(tmp, cfg->domain, tmp); |
3098 | dest = cpu_mask_to_apicid(tmp); | 3100 | dest = cpu_mask_to_apicid(&tmp); |
3099 | 3101 | ||
3100 | #ifdef CONFIG_INTR_REMAP | 3102 | #ifdef CONFIG_INTR_REMAP |
3101 | if (irq_remapped(irq)) { | 3103 | if (irq_remapped(irq)) { |
@@ -3161,13 +3163,13 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3161 | return; | 3163 | return; |
3162 | 3164 | ||
3163 | cfg = desc->chip_data; | 3165 | cfg = desc->chip_data; |
3164 | if (assign_irq_vector(irq, cfg, *mask)) | 3166 | if (assign_irq_vector(irq, cfg, mask)) |
3165 | return; | 3167 | return; |
3166 | 3168 | ||
3167 | set_extra_move_desc(desc, *mask); | 3169 | set_extra_move_desc(desc, mask); |
3168 | 3170 | ||
3169 | cpumask_and(&tmp, &cfg->domain, mask); | 3171 | cpumask_and(&tmp, &cfg->domain, mask); |
3170 | dest = cpu_mask_to_apicid(tmp); | 3172 | dest = cpu_mask_to_apicid(&tmp); |
3171 | 3173 | ||
3172 | read_msi_msg_desc(desc, &msg); | 3174 | read_msi_msg_desc(desc, &msg); |
3173 | 3175 | ||
@@ -3184,8 +3186,8 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3184 | * Migrate the MSI irq to another cpumask. This migration is | 3186 | * Migrate the MSI irq to another cpumask. This migration is |
3185 | * done in the process context using interrupt-remapping hardware. | 3187 | * done in the process context using interrupt-remapping hardware. |
3186 | */ | 3188 | */ |
3187 | static void ir_set_msi_irq_affinity(unsigned int irq, | 3189 | static void |
3188 | const struct cpumask *mask) | 3190 | ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3189 | { | 3191 | { |
3190 | struct irq_desc *desc = irq_to_desc(irq); | 3192 | struct irq_desc *desc = irq_to_desc(irq); |
3191 | struct irq_cfg *cfg; | 3193 | struct irq_cfg *cfg; |
@@ -3200,13 +3202,13 @@ static void ir_set_msi_irq_affinity(unsigned int irq, | |||
3200 | return; | 3202 | return; |
3201 | 3203 | ||
3202 | cfg = desc->chip_data; | 3204 | cfg = desc->chip_data; |
3203 | if (assign_irq_vector(irq, cfg, *mask)) | 3205 | if (assign_irq_vector(irq, cfg, mask)) |
3204 | return; | 3206 | return; |
3205 | 3207 | ||
3206 | set_extra_move_desc(desc, *mask); | 3208 | set_extra_move_desc(desc, mask); |
3207 | 3209 | ||
3208 | cpumask_and(&tmp, &cfg->domain, mask); | 3210 | cpumask_and(&tmp, &cfg->domain, mask); |
3209 | dest = cpu_mask_to_apicid(tmp); | 3211 | dest = cpu_mask_to_apicid(&tmp); |
3210 | 3212 | ||
3211 | irte.vector = cfg->vector; | 3213 | irte.vector = cfg->vector; |
3212 | irte.dest_id = IRTE_DEST(dest); | 3214 | irte.dest_id = IRTE_DEST(dest); |
@@ -3224,7 +3226,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, | |||
3224 | if (cfg->move_in_progress) { | 3226 | if (cfg->move_in_progress) { |
3225 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 3227 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); |
3226 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | 3228 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); |
3227 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | 3229 | send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); |
3228 | cfg->move_in_progress = 0; | 3230 | cfg->move_in_progress = 0; |
3229 | } | 3231 | } |
3230 | 3232 | ||
@@ -3419,7 +3421,7 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
3419 | 3421 | ||
3420 | #ifdef CONFIG_DMAR | 3422 | #ifdef CONFIG_DMAR |
3421 | #ifdef CONFIG_SMP | 3423 | #ifdef CONFIG_SMP |
3422 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | 3424 | static void dmar_msi_set_affinity(unsigned int irq, const cpumask_t *mask) |
3423 | { | 3425 | { |
3424 | struct irq_desc *desc = irq_to_desc(irq); | 3426 | struct irq_desc *desc = irq_to_desc(irq); |
3425 | struct irq_cfg *cfg; | 3427 | struct irq_cfg *cfg; |
@@ -3431,13 +3433,13 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
3431 | return; | 3433 | return; |
3432 | 3434 | ||
3433 | cfg = desc->chip_data; | 3435 | cfg = desc->chip_data; |
3434 | if (assign_irq_vector(irq, cfg, *mask)) | 3436 | if (assign_irq_vector(irq, cfg, mask)) |
3435 | return; | 3437 | return; |
3436 | 3438 | ||
3437 | set_extra_move_desc(desc, *mask); | 3439 | set_extra_move_desc(desc, mask); |
3438 | 3440 | ||
3439 | cpumask_and(&tmp, &cfg->domain, mask); | 3441 | cpumask_and(&tmp, &cfg->domain, mask); |
3440 | dest = cpu_mask_to_apicid(tmp); | 3442 | dest = cpu_mask_to_apicid(&tmp); |
3441 | 3443 | ||
3442 | dmar_msi_read(irq, &msg); | 3444 | dmar_msi_read(irq, &msg); |
3443 | 3445 | ||
@@ -3481,7 +3483,7 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3481 | #ifdef CONFIG_HPET_TIMER | 3483 | #ifdef CONFIG_HPET_TIMER |
3482 | 3484 | ||
3483 | #ifdef CONFIG_SMP | 3485 | #ifdef CONFIG_SMP |
3484 | static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | 3486 | static void hpet_msi_set_affinity(unsigned int irq, const cpumask_t *mask) |
3485 | { | 3487 | { |
3486 | struct irq_desc *desc = irq_to_desc(irq); | 3488 | struct irq_desc *desc = irq_to_desc(irq); |
3487 | struct irq_cfg *cfg; | 3489 | struct irq_cfg *cfg; |
@@ -3493,13 +3495,13 @@ static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
3493 | return; | 3495 | return; |
3494 | 3496 | ||
3495 | cfg = desc->chip_data; | 3497 | cfg = desc->chip_data; |
3496 | if (assign_irq_vector(irq, cfg, *mask)) | 3498 | if (assign_irq_vector(irq, cfg, mask)) |
3497 | return; | 3499 | return; |
3498 | 3500 | ||
3499 | set_extra_move_desc(desc, *mask); | 3501 | set_extra_move_desc(desc, mask); |
3500 | 3502 | ||
3501 | cpumask_and(&tmp, &cfg->domain, mask); | 3503 | cpumask_and(&tmp, &cfg->domain, mask); |
3502 | dest = cpu_mask_to_apicid(tmp); | 3504 | dest = cpu_mask_to_apicid(&tmp); |
3503 | 3505 | ||
3504 | hpet_msi_read(irq, &msg); | 3506 | hpet_msi_read(irq, &msg); |
3505 | 3507 | ||
@@ -3564,7 +3566,7 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
3564 | write_ht_irq_msg(irq, &msg); | 3566 | write_ht_irq_msg(irq, &msg); |
3565 | } | 3567 | } |
3566 | 3568 | ||
3567 | static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) | 3569 | static void set_ht_irq_affinity(unsigned int irq, const cpumask_t *mask) |
3568 | { | 3570 | { |
3569 | struct irq_desc *desc = irq_to_desc(irq); | 3571 | struct irq_desc *desc = irq_to_desc(irq); |
3570 | struct irq_cfg *cfg; | 3572 | struct irq_cfg *cfg; |
@@ -3575,13 +3577,13 @@ static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3575 | return; | 3577 | return; |
3576 | 3578 | ||
3577 | cfg = desc->chip_data; | 3579 | cfg = desc->chip_data; |
3578 | if (assign_irq_vector(irq, cfg, *mask)) | 3580 | if (assign_irq_vector(irq, cfg, mask)) |
3579 | return; | 3581 | return; |
3580 | 3582 | ||
3581 | set_extra_move_desc(desc, *mask); | 3583 | set_extra_move_desc(desc, mask); |
3582 | 3584 | ||
3583 | cpumask_and(&tmp, &cfg->domain, mask); | 3585 | cpumask_and(&tmp, &cfg->domain, mask); |
3584 | dest = cpu_mask_to_apicid(tmp); | 3586 | dest = cpu_mask_to_apicid(&tmp); |
3585 | 3587 | ||
3586 | target_ht_irq(irq, dest, cfg->vector); | 3588 | target_ht_irq(irq, dest, cfg->vector); |
3587 | cpumask_copy(&desc->affinity, mask); | 3589 | cpumask_copy(&desc->affinity, mask); |
@@ -3607,14 +3609,13 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3607 | cpumask_t tmp; | 3609 | cpumask_t tmp; |
3608 | 3610 | ||
3609 | cfg = irq_cfg(irq); | 3611 | cfg = irq_cfg(irq); |
3610 | tmp = TARGET_CPUS; | 3612 | err = assign_irq_vector(irq, cfg, TARGET_CPUS); |
3611 | err = assign_irq_vector(irq, cfg, tmp); | ||
3612 | if (!err) { | 3613 | if (!err) { |
3613 | struct ht_irq_msg msg; | 3614 | struct ht_irq_msg msg; |
3614 | unsigned dest; | 3615 | unsigned dest; |
3615 | 3616 | ||
3616 | cpus_and(tmp, cfg->domain, tmp); | 3617 | cpus_and(tmp, cfg->domain, tmp); |
3617 | dest = cpu_mask_to_apicid(tmp); | 3618 | dest = cpu_mask_to_apicid(&tmp); |
3618 | 3619 | ||
3619 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); | 3620 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); |
3620 | 3621 | ||
@@ -3650,7 +3651,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3650 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | 3651 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, |
3651 | unsigned long mmr_offset) | 3652 | unsigned long mmr_offset) |
3652 | { | 3653 | { |
3653 | const cpumask_t *eligible_cpu = get_cpu_mask(cpu); | 3654 | const cpumask_t *eligible_cpu = &cpumask_of_cpu(cpu); |
3654 | struct irq_cfg *cfg; | 3655 | struct irq_cfg *cfg; |
3655 | int mmr_pnode; | 3656 | int mmr_pnode; |
3656 | unsigned long mmr_value; | 3657 | unsigned long mmr_value; |
@@ -3660,7 +3661,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
3660 | 3661 | ||
3661 | cfg = irq_cfg(irq); | 3662 | cfg = irq_cfg(irq); |
3662 | 3663 | ||
3663 | err = assign_irq_vector(irq, cfg, *eligible_cpu); | 3664 | err = assign_irq_vector(irq, cfg, eligible_cpu); |
3664 | if (err != 0) | 3665 | if (err != 0) |
3665 | return err; | 3666 | return err; |
3666 | 3667 | ||
@@ -3679,7 +3680,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
3679 | entry->polarity = 0; | 3680 | entry->polarity = 0; |
3680 | entry->trigger = 0; | 3681 | entry->trigger = 0; |
3681 | entry->mask = 0; | 3682 | entry->mask = 0; |
3682 | entry->dest = cpu_mask_to_apicid(*eligible_cpu); | 3683 | entry->dest = cpu_mask_to_apicid(eligible_cpu); |
3683 | 3684 | ||
3684 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | 3685 | mmr_pnode = uv_blade_to_pnode(mmr_blade); |
3685 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 3686 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
@@ -3890,7 +3891,7 @@ void __init setup_ioapic_dest(void) | |||
3890 | int pin, ioapic, irq, irq_entry; | 3891 | int pin, ioapic, irq, irq_entry; |
3891 | struct irq_desc *desc; | 3892 | struct irq_desc *desc; |
3892 | struct irq_cfg *cfg; | 3893 | struct irq_cfg *cfg; |
3893 | cpumask_t mask; | 3894 | const cpumask_t *mask; |
3894 | 3895 | ||
3895 | if (skip_ioapic_setup == 1) | 3896 | if (skip_ioapic_setup == 1) |
3896 | return; | 3897 | return; |
@@ -3921,16 +3922,16 @@ void __init setup_ioapic_dest(void) | |||
3921 | */ | 3922 | */ |
3922 | if (desc->status & | 3923 | if (desc->status & |
3923 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | 3924 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) |
3924 | mask = desc->affinity; | 3925 | mask = &desc->affinity; |
3925 | else | 3926 | else |
3926 | mask = TARGET_CPUS; | 3927 | mask = TARGET_CPUS; |
3927 | 3928 | ||
3928 | #ifdef CONFIG_INTR_REMAP | 3929 | #ifdef CONFIG_INTR_REMAP |
3929 | if (intr_remapping_enabled) | 3930 | if (intr_remapping_enabled) |
3930 | set_ir_ioapic_affinity_irq_desc(desc, &mask); | 3931 | set_ir_ioapic_affinity_irq_desc(desc, mask); |
3931 | else | 3932 | else |
3932 | #endif | 3933 | #endif |
3933 | set_ioapic_affinity_irq_desc(desc, &mask); | 3934 | set_ioapic_affinity_irq_desc(desc, mask); |
3934 | } | 3935 | } |
3935 | 3936 | ||
3936 | } | 3937 | } |
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index f1c688e46f35..86aa50fc65a1 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c | |||
@@ -116,9 +116,9 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector) | |||
116 | /* | 116 | /* |
117 | * This is only used on smaller machines. | 117 | * This is only used on smaller machines. |
118 | */ | 118 | */ |
119 | void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) | 119 | void send_IPI_mask_bitmask(const cpumask_t *cpumask, int vector) |
120 | { | 120 | { |
121 | unsigned long mask = cpus_addr(cpumask)[0]; | 121 | unsigned long mask = cpus_addr(*cpumask)[0]; |
122 | unsigned long flags; | 122 | unsigned long flags; |
123 | 123 | ||
124 | local_irq_save(flags); | 124 | local_irq_save(flags); |
@@ -127,7 +127,7 @@ void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) | |||
127 | local_irq_restore(flags); | 127 | local_irq_restore(flags); |
128 | } | 128 | } |
129 | 129 | ||
130 | void send_IPI_mask_sequence(cpumask_t mask, int vector) | 130 | void send_IPI_mask_sequence(const cpumask_t *mask, int vector) |
131 | { | 131 | { |
132 | unsigned long flags; | 132 | unsigned long flags; |
133 | unsigned int query_cpu; | 133 | unsigned int query_cpu; |
@@ -139,12 +139,24 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
139 | */ | 139 | */ |
140 | 140 | ||
141 | local_irq_save(flags); | 141 | local_irq_save(flags); |
142 | for_each_possible_cpu(query_cpu) { | 142 | for_each_cpu_mask_nr(query_cpu, *mask) |
143 | if (cpu_isset(query_cpu, mask)) { | 143 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); |
144 | local_irq_restore(flags); | ||
145 | } | ||
146 | |||
147 | void send_IPI_mask_allbutself(const cpumask_t *mask, int vector) | ||
148 | { | ||
149 | unsigned long flags; | ||
150 | unsigned int query_cpu; | ||
151 | unsigned int this_cpu = smp_processor_id(); | ||
152 | |||
153 | /* See Hack comment above */ | ||
154 | |||
155 | local_irq_save(flags); | ||
156 | for_each_cpu_mask_nr(query_cpu, *mask) | ||
157 | if (query_cpu != this_cpu) | ||
144 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), | 158 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), |
145 | vector); | 159 | vector); |
146 | } | ||
147 | } | ||
148 | local_irq_restore(flags); | 160 | local_irq_restore(flags); |
149 | } | 161 | } |
150 | 162 | ||
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 3f92b134ab90..341df946f9a9 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -118,22 +118,22 @@ static void native_smp_send_reschedule(int cpu) | |||
118 | WARN_ON(1); | 118 | WARN_ON(1); |
119 | return; | 119 | return; |
120 | } | 120 | } |
121 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | 121 | send_IPI_mask(&cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); |
122 | } | 122 | } |
123 | 123 | ||
124 | void native_send_call_func_single_ipi(int cpu) | 124 | void native_send_call_func_single_ipi(int cpu) |
125 | { | 125 | { |
126 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); | 126 | send_IPI_mask(&cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); |
127 | } | 127 | } |
128 | 128 | ||
129 | void native_send_call_func_ipi(cpumask_t mask) | 129 | void native_send_call_func_ipi(const cpumask_t *mask) |
130 | { | 130 | { |
131 | cpumask_t allbutself; | 131 | cpumask_t allbutself; |
132 | 132 | ||
133 | allbutself = cpu_online_map; | 133 | allbutself = cpu_online_map; |
134 | cpu_clear(smp_processor_id(), allbutself); | 134 | cpu_clear(smp_processor_id(), allbutself); |
135 | 135 | ||
136 | if (cpus_equal(mask, allbutself) && | 136 | if (cpus_equal(*mask, allbutself) && |
137 | cpus_equal(cpu_online_map, cpu_callout_map)) | 137 | cpus_equal(cpu_online_map, cpu_callout_map)) |
138 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | 138 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); |
139 | else | 139 | else |
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index f4049f3513b6..174ea90d1cbd 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c | |||
@@ -164,7 +164,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | |||
164 | * We have to send the IPI only to | 164 | * We have to send the IPI only to |
165 | * CPUs affected. | 165 | * CPUs affected. |
166 | */ | 166 | */ |
167 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); | 167 | send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR); |
168 | 168 | ||
169 | while (!cpus_empty(flush_cpumask)) | 169 | while (!cpus_empty(flush_cpumask)) |
170 | /* nothing. lockup detection does not belong here */ | 170 | /* nothing. lockup detection does not belong here */ |
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 8f919ca69494..de6f1bda0c50 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c | |||
@@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | |||
191 | * We have to send the IPI only to | 191 | * We have to send the IPI only to |
192 | * CPUs affected. | 192 | * CPUs affected. |
193 | */ | 193 | */ |
194 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); | 194 | send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender); |
195 | 195 | ||
196 | while (!cpus_empty(f->flush_cpumask)) | 196 | while (!cpus_empty(f->flush_cpumask)) |
197 | cpu_relax(); | 197 | cpu_relax(); |
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 3624a364b7f3..bc4c7840b2a8 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c | |||
@@ -42,9 +42,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { | |||
42 | { } | 42 | { } |
43 | }; | 43 | }; |
44 | 44 | ||
45 | static cpumask_t vector_allocation_domain(int cpu) | 45 | static void vector_allocation_domain(int cpu, cpumask_t *retmask) |
46 | { | 46 | { |
47 | return cpumask_of_cpu(cpu); | 47 | cpus_clear(*retmask); |
48 | cpu_set(cpu, *retmask); | ||
48 | } | 49 | } |
49 | 50 | ||
50 | static int probe_bigsmp(void) | 51 | static int probe_bigsmp(void) |
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 7b4e6d0d1690..4ba5ccaa1584 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c | |||
@@ -87,7 +87,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
87 | } | 87 | } |
88 | #endif | 88 | #endif |
89 | 89 | ||
90 | static cpumask_t vector_allocation_domain(int cpu) | 90 | static void vector_allocation_domain(int cpu, cpumask_t *retmask) |
91 | { | 91 | { |
92 | /* Careful. Some cpus do not strictly honor the set of cpus | 92 | /* Careful. Some cpus do not strictly honor the set of cpus |
93 | * specified in the interrupt destination when using lowest | 93 | * specified in the interrupt destination when using lowest |
@@ -97,8 +97,7 @@ static cpumask_t vector_allocation_domain(int cpu) | |||
97 | * deliver interrupts to the wrong hyperthread when only one | 97 | * deliver interrupts to the wrong hyperthread when only one |
98 | * hyperthread was specified in the interrupt desitination. | 98 | * hyperthread was specified in the interrupt desitination. |
99 | */ | 99 | */ |
100 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 100 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; |
101 | return domain; | ||
102 | } | 101 | } |
103 | 102 | ||
104 | struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); | 103 | struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); |
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 71a309b122e6..511d7941364f 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c | |||
@@ -38,7 +38,7 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | static cpumask_t vector_allocation_domain(int cpu) | 41 | static void vector_allocation_domain(int cpu, cpumask_t *retmask) |
42 | { | 42 | { |
43 | /* Careful. Some cpus do not strictly honor the set of cpus | 43 | /* Careful. Some cpus do not strictly honor the set of cpus |
44 | * specified in the interrupt destination when using lowest | 44 | * specified in the interrupt destination when using lowest |
@@ -48,8 +48,7 @@ static cpumask_t vector_allocation_domain(int cpu) | |||
48 | * deliver interrupts to the wrong hyperthread when only one | 48 | * deliver interrupts to the wrong hyperthread when only one |
49 | * hyperthread was specified in the interrupt desitination. | 49 | * hyperthread was specified in the interrupt desitination. |
50 | */ | 50 | */ |
51 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 51 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; |
52 | return domain; | ||
53 | } | 52 | } |
54 | 53 | ||
55 | struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); | 54 | struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); |
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 2c6d234e0009..2821ffc188b5 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c | |||
@@ -24,7 +24,7 @@ static int probe_summit(void) | |||
24 | return 0; | 24 | return 0; |
25 | } | 25 | } |
26 | 26 | ||
27 | static cpumask_t vector_allocation_domain(int cpu) | 27 | static void vector_allocation_domain(int cpu, cpumask_t *retmask) |
28 | { | 28 | { |
29 | /* Careful. Some cpus do not strictly honor the set of cpus | 29 | /* Careful. Some cpus do not strictly honor the set of cpus |
30 | * specified in the interrupt destination when using lowest | 30 | * specified in the interrupt destination when using lowest |
@@ -34,8 +34,7 @@ static cpumask_t vector_allocation_domain(int cpu) | |||
34 | * deliver interrupts to the wrong hyperthread when only one | 34 | * deliver interrupts to the wrong hyperthread when only one |
35 | * hyperthread was specified in the interrupt desitination. | 35 | * hyperthread was specified in the interrupt desitination. |
36 | */ | 36 | */ |
37 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 37 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; |
38 | return domain; | ||
39 | } | 38 | } |
40 | 39 | ||
41 | struct genapic apic_summit = APIC_INIT("summit", probe_summit); | 40 | struct genapic apic_summit = APIC_INIT("summit", probe_summit); |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index acd9b6705e02..2cce362c9874 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void) | |||
158 | { | 158 | { |
159 | int i, rc; | 159 | int i, rc; |
160 | 160 | ||
161 | for (i = 0; i < NR_CPUS; i++) { | 161 | for (i = 0; i < nr_cpu_ids; i++) { |
162 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); | 162 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); |
163 | if (rc >= 0) { | 163 | if (rc >= 0) { |
164 | num_processors++; | 164 | num_processors++; |
@@ -196,7 +196,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
196 | 196 | ||
197 | /* Restrict the possible_map according to max_cpus. */ | 197 | /* Restrict the possible_map according to max_cpus. */ |
198 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { | 198 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { |
199 | for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) | 199 | for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) |
200 | continue; | 200 | continue; |
201 | cpu_clear(cpu, cpu_possible_map); | 201 | cpu_clear(cpu, cpu_possible_map); |
202 | } | 202 | } |
@@ -408,24 +408,22 @@ static void xen_smp_send_reschedule(int cpu) | |||
408 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | 408 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); |
409 | } | 409 | } |
410 | 410 | ||
411 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | 411 | static void xen_send_IPI_mask(const cpumask_t *mask, enum ipi_vector vector) |
412 | { | 412 | { |
413 | unsigned cpu; | 413 | unsigned cpu; |
414 | 414 | ||
415 | cpus_and(mask, mask, cpu_online_map); | 415 | for_each_cpu_and(cpu, mask, &cpu_online_map) |
416 | |||
417 | for_each_cpu_mask_nr(cpu, mask) | ||
418 | xen_send_IPI_one(cpu, vector); | 416 | xen_send_IPI_one(cpu, vector); |
419 | } | 417 | } |
420 | 418 | ||
421 | static void xen_smp_send_call_function_ipi(cpumask_t mask) | 419 | static void xen_smp_send_call_function_ipi(const cpumask_t *mask) |
422 | { | 420 | { |
423 | int cpu; | 421 | int cpu; |
424 | 422 | ||
425 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | 423 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); |
426 | 424 | ||
427 | /* Make sure other vcpus get a chance to run if they need to. */ | 425 | /* Make sure other vcpus get a chance to run if they need to. */ |
428 | for_each_cpu_mask_nr(cpu, mask) { | 426 | for_each_cpu_mask_nr(cpu, *mask) { |
429 | if (xen_vcpu_stolen(cpu)) { | 427 | if (xen_vcpu_stolen(cpu)) { |
430 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | 428 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); |
431 | break; | 429 | break; |
@@ -435,7 +433,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask) | |||
435 | 433 | ||
436 | static void xen_smp_send_call_function_single_ipi(int cpu) | 434 | static void xen_smp_send_call_function_single_ipi(int cpu) |
437 | { | 435 | { |
438 | xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); | 436 | xen_send_IPI_mask(&cpumask_of_cpu(cpu), |
437 | XEN_CALL_FUNCTION_SINGLE_VECTOR); | ||
439 | } | 438 | } |
440 | 439 | ||
441 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | 440 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) |