diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/topology.h | 14 | ||||
-rw-r--r-- | include/asm-m32r/smp.h | 2 | ||||
-rw-r--r-- | include/linux/bitmap.h | 35 | ||||
-rw-r--r-- | include/linux/clockchips.h | 4 | ||||
-rw-r--r-- | include/linux/cpumask.h | 284 | ||||
-rw-r--r-- | include/linux/interrupt.h | 4 | ||||
-rw-r--r-- | include/linux/irq.h | 3 | ||||
-rw-r--r-- | include/linux/seq_file.h | 7 | ||||
-rw-r--r-- | include/linux/smp.h | 18 | ||||
-rw-r--r-- | include/linux/threads.h | 16 |
10 files changed, 194 insertions, 193 deletions
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 54bbf6e04ee8..0e9e2bc0ee96 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h | |||
@@ -40,6 +40,9 @@ | |||
40 | #ifndef node_to_cpumask | 40 | #ifndef node_to_cpumask |
41 | #define node_to_cpumask(node) ((void)node, cpu_online_map) | 41 | #define node_to_cpumask(node) ((void)node, cpu_online_map) |
42 | #endif | 42 | #endif |
43 | #ifndef cpumask_of_node | ||
44 | #define cpumask_of_node(node) ((void)node, cpu_online_mask) | ||
45 | #endif | ||
43 | #ifndef node_to_first_cpu | 46 | #ifndef node_to_first_cpu |
44 | #define node_to_first_cpu(node) ((void)(node),0) | 47 | #define node_to_first_cpu(node) ((void)(node),0) |
45 | #endif | 48 | #endif |
@@ -54,9 +57,18 @@ | |||
54 | ) | 57 | ) |
55 | #endif | 58 | #endif |
56 | 59 | ||
60 | #ifndef cpumask_of_pcibus | ||
61 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | ||
62 | cpu_all_mask : \ | ||
63 | cpumask_of_node(pcibus_to_node(bus))) | ||
64 | #endif | ||
65 | |||
57 | #endif /* CONFIG_NUMA */ | 66 | #endif /* CONFIG_NUMA */ |
58 | 67 | ||
59 | /* returns pointer to cpumask for specified node */ | 68 | /* |
69 | * returns pointer to cpumask for specified node | ||
70 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
71 | */ | ||
60 | #ifndef node_to_cpumask_ptr | 72 | #ifndef node_to_cpumask_ptr |
61 | 73 | ||
62 | #define node_to_cpumask_ptr(v, node) \ | 74 | #define node_to_cpumask_ptr(v, node) \ |
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h index c5dd66916692..b96a6d2ffbc3 100644 --- a/include/asm-m32r/smp.h +++ b/include/asm-m32r/smp.h | |||
@@ -63,8 +63,6 @@ extern volatile int cpu_2_physid[NR_CPUS]; | |||
63 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 63 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
64 | 64 | ||
65 | extern cpumask_t cpu_callout_map; | 65 | extern cpumask_t cpu_callout_map; |
66 | extern cpumask_t cpu_possible_map; | ||
67 | extern cpumask_t cpu_present_map; | ||
68 | 66 | ||
69 | static __inline__ int hard_smp_processor_id(void) | 67 | static __inline__ int hard_smp_processor_id(void) |
70 | { | 68 | { |
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index a08c33a26ca9..2878811c6134 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h | |||
@@ -137,9 +137,12 @@ extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); | |||
137 | (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ | 137 | (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ |
138 | ) | 138 | ) |
139 | 139 | ||
140 | #define small_const_nbits(nbits) \ | ||
141 | (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) | ||
142 | |||
140 | static inline void bitmap_zero(unsigned long *dst, int nbits) | 143 | static inline void bitmap_zero(unsigned long *dst, int nbits) |
141 | { | 144 | { |
142 | if (nbits <= BITS_PER_LONG) | 145 | if (small_const_nbits(nbits)) |
143 | *dst = 0UL; | 146 | *dst = 0UL; |
144 | else { | 147 | else { |
145 | int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); | 148 | int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
@@ -150,7 +153,7 @@ static inline void bitmap_zero(unsigned long *dst, int nbits) | |||
150 | static inline void bitmap_fill(unsigned long *dst, int nbits) | 153 | static inline void bitmap_fill(unsigned long *dst, int nbits) |
151 | { | 154 | { |
152 | size_t nlongs = BITS_TO_LONGS(nbits); | 155 | size_t nlongs = BITS_TO_LONGS(nbits); |
153 | if (nlongs > 1) { | 156 | if (!small_const_nbits(nbits)) { |
154 | int len = (nlongs - 1) * sizeof(unsigned long); | 157 | int len = (nlongs - 1) * sizeof(unsigned long); |
155 | memset(dst, 0xff, len); | 158 | memset(dst, 0xff, len); |
156 | } | 159 | } |
@@ -160,7 +163,7 @@ static inline void bitmap_fill(unsigned long *dst, int nbits) | |||
160 | static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, | 163 | static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, |
161 | int nbits) | 164 | int nbits) |
162 | { | 165 | { |
163 | if (nbits <= BITS_PER_LONG) | 166 | if (small_const_nbits(nbits)) |
164 | *dst = *src; | 167 | *dst = *src; |
165 | else { | 168 | else { |
166 | int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); | 169 | int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
@@ -171,7 +174,7 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, | |||
171 | static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, | 174 | static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, |
172 | const unsigned long *src2, int nbits) | 175 | const unsigned long *src2, int nbits) |
173 | { | 176 | { |
174 | if (nbits <= BITS_PER_LONG) | 177 | if (small_const_nbits(nbits)) |
175 | *dst = *src1 & *src2; | 178 | *dst = *src1 & *src2; |
176 | else | 179 | else |
177 | __bitmap_and(dst, src1, src2, nbits); | 180 | __bitmap_and(dst, src1, src2, nbits); |
@@ -180,7 +183,7 @@ static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, | |||
180 | static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, | 183 | static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, |
181 | const unsigned long *src2, int nbits) | 184 | const unsigned long *src2, int nbits) |
182 | { | 185 | { |
183 | if (nbits <= BITS_PER_LONG) | 186 | if (small_const_nbits(nbits)) |
184 | *dst = *src1 | *src2; | 187 | *dst = *src1 | *src2; |
185 | else | 188 | else |
186 | __bitmap_or(dst, src1, src2, nbits); | 189 | __bitmap_or(dst, src1, src2, nbits); |
@@ -189,7 +192,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, | |||
189 | static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, | 192 | static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, |
190 | const unsigned long *src2, int nbits) | 193 | const unsigned long *src2, int nbits) |
191 | { | 194 | { |
192 | if (nbits <= BITS_PER_LONG) | 195 | if (small_const_nbits(nbits)) |
193 | *dst = *src1 ^ *src2; | 196 | *dst = *src1 ^ *src2; |
194 | else | 197 | else |
195 | __bitmap_xor(dst, src1, src2, nbits); | 198 | __bitmap_xor(dst, src1, src2, nbits); |
@@ -198,7 +201,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, | |||
198 | static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, | 201 | static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, |
199 | const unsigned long *src2, int nbits) | 202 | const unsigned long *src2, int nbits) |
200 | { | 203 | { |
201 | if (nbits <= BITS_PER_LONG) | 204 | if (small_const_nbits(nbits)) |
202 | *dst = *src1 & ~(*src2); | 205 | *dst = *src1 & ~(*src2); |
203 | else | 206 | else |
204 | __bitmap_andnot(dst, src1, src2, nbits); | 207 | __bitmap_andnot(dst, src1, src2, nbits); |
@@ -207,7 +210,7 @@ static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, | |||
207 | static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, | 210 | static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, |
208 | int nbits) | 211 | int nbits) |
209 | { | 212 | { |
210 | if (nbits <= BITS_PER_LONG) | 213 | if (small_const_nbits(nbits)) |
211 | *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); | 214 | *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); |
212 | else | 215 | else |
213 | __bitmap_complement(dst, src, nbits); | 216 | __bitmap_complement(dst, src, nbits); |
@@ -216,7 +219,7 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr | |||
216 | static inline int bitmap_equal(const unsigned long *src1, | 219 | static inline int bitmap_equal(const unsigned long *src1, |
217 | const unsigned long *src2, int nbits) | 220 | const unsigned long *src2, int nbits) |
218 | { | 221 | { |
219 | if (nbits <= BITS_PER_LONG) | 222 | if (small_const_nbits(nbits)) |
220 | return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); | 223 | return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); |
221 | else | 224 | else |
222 | return __bitmap_equal(src1, src2, nbits); | 225 | return __bitmap_equal(src1, src2, nbits); |
@@ -225,7 +228,7 @@ static inline int bitmap_equal(const unsigned long *src1, | |||
225 | static inline int bitmap_intersects(const unsigned long *src1, | 228 | static inline int bitmap_intersects(const unsigned long *src1, |
226 | const unsigned long *src2, int nbits) | 229 | const unsigned long *src2, int nbits) |
227 | { | 230 | { |
228 | if (nbits <= BITS_PER_LONG) | 231 | if (small_const_nbits(nbits)) |
229 | return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; | 232 | return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; |
230 | else | 233 | else |
231 | return __bitmap_intersects(src1, src2, nbits); | 234 | return __bitmap_intersects(src1, src2, nbits); |
@@ -234,7 +237,7 @@ static inline int bitmap_intersects(const unsigned long *src1, | |||
234 | static inline int bitmap_subset(const unsigned long *src1, | 237 | static inline int bitmap_subset(const unsigned long *src1, |
235 | const unsigned long *src2, int nbits) | 238 | const unsigned long *src2, int nbits) |
236 | { | 239 | { |
237 | if (nbits <= BITS_PER_LONG) | 240 | if (small_const_nbits(nbits)) |
238 | return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); | 241 | return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); |
239 | else | 242 | else |
240 | return __bitmap_subset(src1, src2, nbits); | 243 | return __bitmap_subset(src1, src2, nbits); |
@@ -242,7 +245,7 @@ static inline int bitmap_subset(const unsigned long *src1, | |||
242 | 245 | ||
243 | static inline int bitmap_empty(const unsigned long *src, int nbits) | 246 | static inline int bitmap_empty(const unsigned long *src, int nbits) |
244 | { | 247 | { |
245 | if (nbits <= BITS_PER_LONG) | 248 | if (small_const_nbits(nbits)) |
246 | return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); | 249 | return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); |
247 | else | 250 | else |
248 | return __bitmap_empty(src, nbits); | 251 | return __bitmap_empty(src, nbits); |
@@ -250,7 +253,7 @@ static inline int bitmap_empty(const unsigned long *src, int nbits) | |||
250 | 253 | ||
251 | static inline int bitmap_full(const unsigned long *src, int nbits) | 254 | static inline int bitmap_full(const unsigned long *src, int nbits) |
252 | { | 255 | { |
253 | if (nbits <= BITS_PER_LONG) | 256 | if (small_const_nbits(nbits)) |
254 | return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); | 257 | return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); |
255 | else | 258 | else |
256 | return __bitmap_full(src, nbits); | 259 | return __bitmap_full(src, nbits); |
@@ -258,7 +261,7 @@ static inline int bitmap_full(const unsigned long *src, int nbits) | |||
258 | 261 | ||
259 | static inline int bitmap_weight(const unsigned long *src, int nbits) | 262 | static inline int bitmap_weight(const unsigned long *src, int nbits) |
260 | { | 263 | { |
261 | if (nbits <= BITS_PER_LONG) | 264 | if (small_const_nbits(nbits)) |
262 | return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); | 265 | return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); |
263 | return __bitmap_weight(src, nbits); | 266 | return __bitmap_weight(src, nbits); |
264 | } | 267 | } |
@@ -266,7 +269,7 @@ static inline int bitmap_weight(const unsigned long *src, int nbits) | |||
266 | static inline void bitmap_shift_right(unsigned long *dst, | 269 | static inline void bitmap_shift_right(unsigned long *dst, |
267 | const unsigned long *src, int n, int nbits) | 270 | const unsigned long *src, int n, int nbits) |
268 | { | 271 | { |
269 | if (nbits <= BITS_PER_LONG) | 272 | if (small_const_nbits(nbits)) |
270 | *dst = *src >> n; | 273 | *dst = *src >> n; |
271 | else | 274 | else |
272 | __bitmap_shift_right(dst, src, n, nbits); | 275 | __bitmap_shift_right(dst, src, n, nbits); |
@@ -275,7 +278,7 @@ static inline void bitmap_shift_right(unsigned long *dst, | |||
275 | static inline void bitmap_shift_left(unsigned long *dst, | 278 | static inline void bitmap_shift_left(unsigned long *dst, |
276 | const unsigned long *src, int n, int nbits) | 279 | const unsigned long *src, int n, int nbits) |
277 | { | 280 | { |
278 | if (nbits <= BITS_PER_LONG) | 281 | if (small_const_nbits(nbits)) |
279 | *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); | 282 | *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); |
280 | else | 283 | else |
281 | __bitmap_shift_left(dst, src, n, nbits); | 284 | __bitmap_shift_left(dst, src, n, nbits); |
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index ed3a5d473e52..cea153697ec7 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
@@ -82,13 +82,13 @@ struct clock_event_device { | |||
82 | int shift; | 82 | int shift; |
83 | int rating; | 83 | int rating; |
84 | int irq; | 84 | int irq; |
85 | cpumask_t cpumask; | 85 | const struct cpumask *cpumask; |
86 | int (*set_next_event)(unsigned long evt, | 86 | int (*set_next_event)(unsigned long evt, |
87 | struct clock_event_device *); | 87 | struct clock_event_device *); |
88 | void (*set_mode)(enum clock_event_mode mode, | 88 | void (*set_mode)(enum clock_event_mode mode, |
89 | struct clock_event_device *); | 89 | struct clock_event_device *); |
90 | void (*event_handler)(struct clock_event_device *); | 90 | void (*event_handler)(struct clock_event_device *); |
91 | void (*broadcast)(cpumask_t mask); | 91 | void (*broadcast)(const struct cpumask *mask); |
92 | struct list_head list; | 92 | struct list_head list; |
93 | enum clock_event_mode mode; | 93 | enum clock_event_mode mode; |
94 | ktime_t next_event; | 94 | ktime_t next_event; |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 21e1dd43e52a..7c178a6baae3 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -268,6 +268,25 @@ static inline void __cpus_shift_left(cpumask_t *dstp, | |||
268 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); | 268 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); |
269 | } | 269 | } |
270 | 270 | ||
271 | /** | ||
272 | * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * | ||
273 | * @bitmap: the bitmap | ||
274 | * | ||
275 | * There are a few places where cpumask_var_t isn't appropriate and | ||
276 | * static cpumasks must be used (eg. very early boot), yet we don't | ||
277 | * expose the definition of 'struct cpumask'. | ||
278 | * | ||
279 | * This does the conversion, and can be used as a constant initializer. | ||
280 | */ | ||
281 | #define to_cpumask(bitmap) \ | ||
282 | ((struct cpumask *)(1 ? (bitmap) \ | ||
283 | : (void *)sizeof(__check_is_bitmap(bitmap)))) | ||
284 | |||
285 | static inline int __check_is_bitmap(const unsigned long *bitmap) | ||
286 | { | ||
287 | return 1; | ||
288 | } | ||
289 | |||
271 | /* | 290 | /* |
272 | * Special-case data structure for "single bit set only" constant CPU masks. | 291 | * Special-case data structure for "single bit set only" constant CPU masks. |
273 | * | 292 | * |
@@ -278,11 +297,11 @@ static inline void __cpus_shift_left(cpumask_t *dstp, | |||
278 | extern const unsigned long | 297 | extern const unsigned long |
279 | cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; | 298 | cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; |
280 | 299 | ||
281 | static inline const cpumask_t *get_cpu_mask(unsigned int cpu) | 300 | static inline const struct cpumask *get_cpu_mask(unsigned int cpu) |
282 | { | 301 | { |
283 | const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; | 302 | const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; |
284 | p -= cpu / BITS_PER_LONG; | 303 | p -= cpu / BITS_PER_LONG; |
285 | return (const cpumask_t *)p; | 304 | return to_cpumask(p); |
286 | } | 305 | } |
287 | 306 | ||
288 | /* | 307 | /* |
@@ -339,36 +358,6 @@ extern cpumask_t cpu_mask_all; | |||
339 | #endif | 358 | #endif |
340 | #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) | 359 | #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) |
341 | 360 | ||
342 | #define cpumask_scnprintf(buf, len, src) \ | ||
343 | __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) | ||
344 | static inline int __cpumask_scnprintf(char *buf, int len, | ||
345 | const cpumask_t *srcp, int nbits) | ||
346 | { | ||
347 | return bitmap_scnprintf(buf, len, srcp->bits, nbits); | ||
348 | } | ||
349 | |||
350 | #define cpumask_parse_user(ubuf, ulen, dst) \ | ||
351 | __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS) | ||
352 | static inline int __cpumask_parse_user(const char __user *buf, int len, | ||
353 | cpumask_t *dstp, int nbits) | ||
354 | { | ||
355 | return bitmap_parse_user(buf, len, dstp->bits, nbits); | ||
356 | } | ||
357 | |||
358 | #define cpulist_scnprintf(buf, len, src) \ | ||
359 | __cpulist_scnprintf((buf), (len), &(src), NR_CPUS) | ||
360 | static inline int __cpulist_scnprintf(char *buf, int len, | ||
361 | const cpumask_t *srcp, int nbits) | ||
362 | { | ||
363 | return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); | ||
364 | } | ||
365 | |||
366 | #define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS) | ||
367 | static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits) | ||
368 | { | ||
369 | return bitmap_parselist(buf, dstp->bits, nbits); | ||
370 | } | ||
371 | |||
372 | #define cpu_remap(oldbit, old, new) \ | 361 | #define cpu_remap(oldbit, old, new) \ |
373 | __cpu_remap((oldbit), &(old), &(new), NR_CPUS) | 362 | __cpu_remap((oldbit), &(old), &(new), NR_CPUS) |
374 | static inline int __cpu_remap(int oldbit, | 363 | static inline int __cpu_remap(int oldbit, |
@@ -446,74 +435,63 @@ int __next_cpu_nr(int n, const cpumask_t *srcp); | |||
446 | 435 | ||
447 | /* | 436 | /* |
448 | * The following particular system cpumasks and operations manage | 437 | * The following particular system cpumasks and operations manage |
449 | * possible, present, active and online cpus. Each of them is a fixed size | 438 | * possible, present, active and online cpus. |
450 | * bitmap of size NR_CPUS. | 439 | * |
440 | * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable | ||
441 | * cpu_present_mask - has bit 'cpu' set iff cpu is populated | ||
442 | * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler | ||
443 | * cpu_active_mask - has bit 'cpu' set iff cpu available to migration | ||
451 | * | 444 | * |
452 | * #ifdef CONFIG_HOTPLUG_CPU | 445 | * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. |
453 | * cpu_possible_map - has bit 'cpu' set iff cpu is populatable | ||
454 | * cpu_present_map - has bit 'cpu' set iff cpu is populated | ||
455 | * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler | ||
456 | * cpu_active_map - has bit 'cpu' set iff cpu available to migration | ||
457 | * #else | ||
458 | * cpu_possible_map - has bit 'cpu' set iff cpu is populated | ||
459 | * cpu_present_map - copy of cpu_possible_map | ||
460 | * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler | ||
461 | * #endif | ||
462 | * | 446 | * |
463 | * In either case, NR_CPUS is fixed at compile time, as the static | 447 | * The cpu_possible_mask is fixed at boot time, as the set of CPU id's |
464 | * size of these bitmaps. The cpu_possible_map is fixed at boot | 448 | * that it is possible might ever be plugged in at anytime during the |
465 | * time, as the set of CPU id's that it is possible might ever | 449 | * life of that system boot. The cpu_present_mask is dynamic(*), |
466 | * be plugged in at anytime during the life of that system boot. | 450 | * representing which CPUs are currently plugged in. And |
467 | * The cpu_present_map is dynamic(*), representing which CPUs | 451 | * cpu_online_mask is the dynamic subset of cpu_present_mask, |
468 | * are currently plugged in. And cpu_online_map is the dynamic | 452 | * indicating those CPUs available for scheduling. |
469 | * subset of cpu_present_map, indicating those CPUs available | ||
470 | * for scheduling. | ||
471 | * | 453 | * |
472 | * If HOTPLUG is enabled, then cpu_possible_map is forced to have | 454 | * If HOTPLUG is enabled, then cpu_possible_mask is forced to have |
473 | * all NR_CPUS bits set, otherwise it is just the set of CPUs that | 455 | * all NR_CPUS bits set, otherwise it is just the set of CPUs that |
474 | * ACPI reports present at boot. | 456 | * ACPI reports present at boot. |
475 | * | 457 | * |
476 | * If HOTPLUG is enabled, then cpu_present_map varies dynamically, | 458 | * If HOTPLUG is enabled, then cpu_present_mask varies dynamically, |
477 | * depending on what ACPI reports as currently plugged in, otherwise | 459 | * depending on what ACPI reports as currently plugged in, otherwise |
478 | * cpu_present_map is just a copy of cpu_possible_map. | 460 | * cpu_present_mask is just a copy of cpu_possible_mask. |
479 | * | 461 | * |
480 | * (*) Well, cpu_present_map is dynamic in the hotplug case. If not | 462 | * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not |
481 | * hotplug, it's a copy of cpu_possible_map, hence fixed at boot. | 463 | * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. |
482 | * | 464 | * |
483 | * Subtleties: | 465 | * Subtleties: |
484 | * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode | 466 | * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode |
485 | * assumption that their single CPU is online. The UP | 467 | * assumption that their single CPU is online. The UP |
486 | * cpu_{online,possible,present}_maps are placebos. Changing them | 468 | * cpu_{online,possible,present}_masks are placebos. Changing them |
487 | * will have no useful affect on the following num_*_cpus() | 469 | * will have no useful affect on the following num_*_cpus() |
488 | * and cpu_*() macros in the UP case. This ugliness is a UP | 470 | * and cpu_*() macros in the UP case. This ugliness is a UP |
489 | * optimization - don't waste any instructions or memory references | 471 | * optimization - don't waste any instructions or memory references |
490 | * asking if you're online or how many CPUs there are if there is | 472 | * asking if you're online or how many CPUs there are if there is |
491 | * only one CPU. | 473 | * only one CPU. |
492 | * 2) Most SMP arch's #define some of these maps to be some | ||
493 | * other map specific to that arch. Therefore, the following | ||
494 | * must be #define macros, not inlines. To see why, examine | ||
495 | * the assembly code produced by the following. Note that | ||
496 | * set1() writes phys_x_map, but set2() writes x_map: | ||
497 | * int x_map, phys_x_map; | ||
498 | * #define set1(a) x_map = a | ||
499 | * inline void set2(int a) { x_map = a; } | ||
500 | * #define x_map phys_x_map | ||
501 | * main(){ set1(3); set2(5); } | ||
502 | */ | 474 | */ |
503 | 475 | ||
504 | extern cpumask_t cpu_possible_map; | 476 | extern const struct cpumask *const cpu_possible_mask; |
505 | extern cpumask_t cpu_online_map; | 477 | extern const struct cpumask *const cpu_online_mask; |
506 | extern cpumask_t cpu_present_map; | 478 | extern const struct cpumask *const cpu_present_mask; |
507 | extern cpumask_t cpu_active_map; | 479 | extern const struct cpumask *const cpu_active_mask; |
480 | |||
481 | /* These strip const, as traditionally they weren't const. */ | ||
482 | #define cpu_possible_map (*(cpumask_t *)cpu_possible_mask) | ||
483 | #define cpu_online_map (*(cpumask_t *)cpu_online_mask) | ||
484 | #define cpu_present_map (*(cpumask_t *)cpu_present_mask) | ||
485 | #define cpu_active_map (*(cpumask_t *)cpu_active_mask) | ||
508 | 486 | ||
509 | #if NR_CPUS > 1 | 487 | #if NR_CPUS > 1 |
510 | #define num_online_cpus() cpus_weight_nr(cpu_online_map) | 488 | #define num_online_cpus() cpumask_weight(cpu_online_mask) |
511 | #define num_possible_cpus() cpus_weight_nr(cpu_possible_map) | 489 | #define num_possible_cpus() cpumask_weight(cpu_possible_mask) |
512 | #define num_present_cpus() cpus_weight_nr(cpu_present_map) | 490 | #define num_present_cpus() cpumask_weight(cpu_present_mask) |
513 | #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) | 491 | #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) |
514 | #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) | 492 | #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) |
515 | #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) | 493 | #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) |
516 | #define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) | 494 | #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) |
517 | #else | 495 | #else |
518 | #define num_online_cpus() 1 | 496 | #define num_online_cpus() 1 |
519 | #define num_possible_cpus() 1 | 497 | #define num_possible_cpus() 1 |
@@ -526,10 +504,6 @@ extern cpumask_t cpu_active_map; | |||
526 | 504 | ||
527 | #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) | 505 | #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) |
528 | 506 | ||
529 | #define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) | ||
530 | #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) | ||
531 | #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) | ||
532 | |||
533 | /* These are the new versions of the cpumask operators: passed by pointer. | 507 | /* These are the new versions of the cpumask operators: passed by pointer. |
534 | * The older versions will be implemented in terms of these, then deleted. */ | 508 | * The older versions will be implemented in terms of these, then deleted. */ |
535 | #define cpumask_bits(maskp) ((maskp)->bits) | 509 | #define cpumask_bits(maskp) ((maskp)->bits) |
@@ -540,9 +514,6 @@ extern cpumask_t cpu_active_map; | |||
540 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | 514 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
541 | } | 515 | } |
542 | 516 | ||
543 | /* This produces more efficient code. */ | ||
544 | #define nr_cpumask_bits NR_CPUS | ||
545 | |||
546 | #else /* NR_CPUS > BITS_PER_LONG */ | 517 | #else /* NR_CPUS > BITS_PER_LONG */ |
547 | 518 | ||
548 | #define CPU_BITS_ALL \ | 519 | #define CPU_BITS_ALL \ |
@@ -550,9 +521,15 @@ extern cpumask_t cpu_active_map; | |||
550 | [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ | 521 | [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ |
551 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | 522 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
552 | } | 523 | } |
524 | #endif /* NR_CPUS > BITS_PER_LONG */ | ||
553 | 525 | ||
526 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
527 | /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, | ||
528 | * not all bits may be allocated. */ | ||
554 | #define nr_cpumask_bits nr_cpu_ids | 529 | #define nr_cpumask_bits nr_cpu_ids |
555 | #endif /* NR_CPUS > BITS_PER_LONG */ | 530 | #else |
531 | #define nr_cpumask_bits NR_CPUS | ||
532 | #endif | ||
556 | 533 | ||
557 | /* verify cpu argument to cpumask_* operators */ | 534 | /* verify cpu argument to cpumask_* operators */ |
558 | static inline unsigned int cpumask_check(unsigned int cpu) | 535 | static inline unsigned int cpumask_check(unsigned int cpu) |
@@ -714,7 +691,7 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) | |||
714 | * No static inline type checking - see Subtlety (1) above. | 691 | * No static inline type checking - see Subtlety (1) above. |
715 | */ | 692 | */ |
716 | #define cpumask_test_cpu(cpu, cpumask) \ | 693 | #define cpumask_test_cpu(cpu, cpumask) \ |
717 | test_bit(cpumask_check(cpu), (cpumask)->bits) | 694 | test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) |
718 | 695 | ||
719 | /** | 696 | /** |
720 | * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask | 697 | * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask |
@@ -946,22 +923,61 @@ static inline void cpumask_copy(struct cpumask *dstp, | |||
946 | #define cpumask_of(cpu) (get_cpu_mask(cpu)) | 923 | #define cpumask_of(cpu) (get_cpu_mask(cpu)) |
947 | 924 | ||
948 | /** | 925 | /** |
949 | * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * | 926 | * cpumask_scnprintf - print a cpumask into a string as comma-separated hex |
950 | * @bitmap: the bitmap | 927 | * @buf: the buffer to sprintf into |
928 | * @len: the length of the buffer | ||
929 | * @srcp: the cpumask to print | ||
951 | * | 930 | * |
952 | * There are a few places where cpumask_var_t isn't appropriate and | 931 | * If len is zero, returns zero. Otherwise returns the length of the |
953 | * static cpumasks must be used (eg. very early boot), yet we don't | 932 | * (nul-terminated) @buf string. |
954 | * expose the definition of 'struct cpumask'. | 933 | */ |
934 | static inline int cpumask_scnprintf(char *buf, int len, | ||
935 | const struct cpumask *srcp) | ||
936 | { | ||
937 | return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits); | ||
938 | } | ||
939 | |||
940 | /** | ||
941 | * cpumask_parse_user - extract a cpumask from a user string | ||
942 | * @buf: the buffer to extract from | ||
943 | * @len: the length of the buffer | ||
944 | * @dstp: the cpumask to set. | ||
955 | * | 945 | * |
956 | * This does the conversion, and can be used as a constant initializer. | 946 | * Returns -errno, or 0 for success. |
957 | */ | 947 | */ |
958 | #define to_cpumask(bitmap) \ | 948 | static inline int cpumask_parse_user(const char __user *buf, int len, |
959 | ((struct cpumask *)(1 ? (bitmap) \ | 949 | struct cpumask *dstp) |
960 | : (void *)sizeof(__check_is_bitmap(bitmap)))) | 950 | { |
951 | return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); | ||
952 | } | ||
961 | 953 | ||
962 | static inline int __check_is_bitmap(const unsigned long *bitmap) | 954 | /** |
955 | * cpulist_scnprintf - print a cpumask into a string as comma-separated list | ||
956 | * @buf: the buffer to sprintf into | ||
957 | * @len: the length of the buffer | ||
958 | * @srcp: the cpumask to print | ||
959 | * | ||
960 | * If len is zero, returns zero. Otherwise returns the length of the | ||
961 | * (nul-terminated) @buf string. | ||
962 | */ | ||
963 | static inline int cpulist_scnprintf(char *buf, int len, | ||
964 | const struct cpumask *srcp) | ||
963 | { | 965 | { |
964 | return 1; | 966 | return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp), |
967 | nr_cpumask_bits); | ||
968 | } | ||
969 | |||
970 | /** | ||
971 | * cpulist_parse_user - extract a cpumask from a user string of ranges | ||
972 | * @buf: the buffer to extract from | ||
973 | * @len: the length of the buffer | ||
974 | * @dstp: the cpumask to set. | ||
975 | * | ||
976 | * Returns -errno, or 0 for success. | ||
977 | */ | ||
978 | static inline int cpulist_parse(const char *buf, struct cpumask *dstp) | ||
979 | { | ||
980 | return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); | ||
965 | } | 981 | } |
966 | 982 | ||
967 | /** | 983 | /** |
@@ -995,6 +1011,7 @@ static inline size_t cpumask_size(void) | |||
995 | #ifdef CONFIG_CPUMASK_OFFSTACK | 1011 | #ifdef CONFIG_CPUMASK_OFFSTACK |
996 | typedef struct cpumask *cpumask_var_t; | 1012 | typedef struct cpumask *cpumask_var_t; |
997 | 1013 | ||
1014 | bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); | ||
998 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); | 1015 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); |
999 | void alloc_bootmem_cpumask_var(cpumask_var_t *mask); | 1016 | void alloc_bootmem_cpumask_var(cpumask_var_t *mask); |
1000 | void free_cpumask_var(cpumask_var_t mask); | 1017 | void free_cpumask_var(cpumask_var_t mask); |
@@ -1008,6 +1025,12 @@ static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | |||
1008 | return true; | 1025 | return true; |
1009 | } | 1026 | } |
1010 | 1027 | ||
1028 | static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, | ||
1029 | int node) | ||
1030 | { | ||
1031 | return true; | ||
1032 | } | ||
1033 | |||
1011 | static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) | 1034 | static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) |
1012 | { | 1035 | { |
1013 | } | 1036 | } |
@@ -1021,12 +1044,6 @@ static inline void free_bootmem_cpumask_var(cpumask_var_t mask) | |||
1021 | } | 1044 | } |
1022 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | 1045 | #endif /* CONFIG_CPUMASK_OFFSTACK */ |
1023 | 1046 | ||
1024 | /* The pointer versions of the maps, these will become the primary versions. */ | ||
1025 | #define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map) | ||
1026 | #define cpu_online_mask ((const struct cpumask *)&cpu_online_map) | ||
1027 | #define cpu_present_mask ((const struct cpumask *)&cpu_present_map) | ||
1028 | #define cpu_active_mask ((const struct cpumask *)&cpu_active_map) | ||
1029 | |||
1030 | /* It's common to want to use cpu_all_mask in struct member initializers, | 1047 | /* It's common to want to use cpu_all_mask in struct member initializers, |
1031 | * so it has to refer to an address rather than a pointer. */ | 1048 | * so it has to refer to an address rather than a pointer. */ |
1032 | extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); | 1049 | extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); |
@@ -1035,51 +1052,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); | |||
1035 | /* First bits of cpu_bit_bitmap are in fact unset. */ | 1052 | /* First bits of cpu_bit_bitmap are in fact unset. */ |
1036 | #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) | 1053 | #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) |
1037 | 1054 | ||
1038 | /* Wrappers for arch boot code to manipulate normally-constant masks */ | 1055 | #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) |
1039 | static inline void set_cpu_possible(unsigned int cpu, bool possible) | 1056 | #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) |
1040 | { | 1057 | #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) |
1041 | if (possible) | ||
1042 | cpumask_set_cpu(cpu, &cpu_possible_map); | ||
1043 | else | ||
1044 | cpumask_clear_cpu(cpu, &cpu_possible_map); | ||
1045 | } | ||
1046 | |||
1047 | static inline void set_cpu_present(unsigned int cpu, bool present) | ||
1048 | { | ||
1049 | if (present) | ||
1050 | cpumask_set_cpu(cpu, &cpu_present_map); | ||
1051 | else | ||
1052 | cpumask_clear_cpu(cpu, &cpu_present_map); | ||
1053 | } | ||
1054 | |||
1055 | static inline void set_cpu_online(unsigned int cpu, bool online) | ||
1056 | { | ||
1057 | if (online) | ||
1058 | cpumask_set_cpu(cpu, &cpu_online_map); | ||
1059 | else | ||
1060 | cpumask_clear_cpu(cpu, &cpu_online_map); | ||
1061 | } | ||
1062 | |||
1063 | static inline void set_cpu_active(unsigned int cpu, bool active) | ||
1064 | { | ||
1065 | if (active) | ||
1066 | cpumask_set_cpu(cpu, &cpu_active_map); | ||
1067 | else | ||
1068 | cpumask_clear_cpu(cpu, &cpu_active_map); | ||
1069 | } | ||
1070 | |||
1071 | static inline void init_cpu_present(const struct cpumask *src) | ||
1072 | { | ||
1073 | cpumask_copy(&cpu_present_map, src); | ||
1074 | } | ||
1075 | |||
1076 | static inline void init_cpu_possible(const struct cpumask *src) | ||
1077 | { | ||
1078 | cpumask_copy(&cpu_possible_map, src); | ||
1079 | } | ||
1080 | 1058 | ||
1081 | static inline void init_cpu_online(const struct cpumask *src) | 1059 | /* Wrappers for arch boot code to manipulate normally-constant masks */ |
1082 | { | 1060 | void set_cpu_possible(unsigned int cpu, bool possible); |
1083 | cpumask_copy(&cpu_online_map, src); | 1061 | void set_cpu_present(unsigned int cpu, bool present); |
1084 | } | 1062 | void set_cpu_online(unsigned int cpu, bool online); |
1063 | void set_cpu_active(unsigned int cpu, bool active); | ||
1064 | void init_cpu_present(const struct cpumask *src); | ||
1065 | void init_cpu_possible(const struct cpumask *src); | ||
1066 | void init_cpu_online(const struct cpumask *src); | ||
1085 | #endif /* __LINUX_CPUMASK_H */ | 1067 | #endif /* __LINUX_CPUMASK_H */ |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index be3c484b5242..dfaee6bd265b 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -111,13 +111,13 @@ extern void enable_irq(unsigned int irq); | |||
111 | 111 | ||
112 | extern cpumask_t irq_default_affinity; | 112 | extern cpumask_t irq_default_affinity; |
113 | 113 | ||
114 | extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); | 114 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
115 | extern int irq_can_set_affinity(unsigned int irq); | 115 | extern int irq_can_set_affinity(unsigned int irq); |
116 | extern int irq_select_affinity(unsigned int irq); | 116 | extern int irq_select_affinity(unsigned int irq); |
117 | 117 | ||
118 | #else /* CONFIG_SMP */ | 118 | #else /* CONFIG_SMP */ |
119 | 119 | ||
120 | static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 120 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
121 | { | 121 | { |
122 | return -EINVAL; | 122 | return -EINVAL; |
123 | } | 123 | } |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 98564dc64476..5845bdc1ac09 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -113,7 +113,8 @@ struct irq_chip { | |||
113 | void (*eoi)(unsigned int irq); | 113 | void (*eoi)(unsigned int irq); |
114 | 114 | ||
115 | void (*end)(unsigned int irq); | 115 | void (*end)(unsigned int irq); |
116 | void (*set_affinity)(unsigned int irq, cpumask_t dest); | 116 | void (*set_affinity)(unsigned int irq, |
117 | const struct cpumask *dest); | ||
117 | int (*retrigger)(unsigned int irq); | 118 | int (*retrigger)(unsigned int irq); |
118 | int (*set_type)(unsigned int irq, unsigned int flow_type); | 119 | int (*set_type)(unsigned int irq, unsigned int flow_type); |
119 | int (*set_wake)(unsigned int irq, unsigned int on); | 120 | int (*set_wake)(unsigned int irq, unsigned int on); |
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index b3dfa72f13b9..40ea5058c2ec 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h | |||
@@ -50,10 +50,11 @@ int seq_path(struct seq_file *, struct path *, char *); | |||
50 | int seq_dentry(struct seq_file *, struct dentry *, char *); | 50 | int seq_dentry(struct seq_file *, struct dentry *, char *); |
51 | int seq_path_root(struct seq_file *m, struct path *path, struct path *root, | 51 | int seq_path_root(struct seq_file *m, struct path *path, struct path *root, |
52 | char *esc); | 52 | char *esc); |
53 | int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); | 53 | int seq_bitmap(struct seq_file *m, const unsigned long *bits, |
54 | static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) | 54 | unsigned int nr_bits); |
55 | static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) | ||
55 | { | 56 | { |
56 | return seq_bitmap(m, mask->bits, NR_CPUS); | 57 | return seq_bitmap(m, mask->bits, nr_cpu_ids); |
57 | } | 58 | } |
58 | 59 | ||
59 | static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) | 60 | static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 6e7ba16ff454..b82466968101 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -21,6 +21,9 @@ struct call_single_data { | |||
21 | u16 priv; | 21 | u16 priv; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | /* total number of cpus in this system (may exceed NR_CPUS) */ | ||
25 | extern unsigned int total_cpus; | ||
26 | |||
24 | #ifdef CONFIG_SMP | 27 | #ifdef CONFIG_SMP |
25 | 28 | ||
26 | #include <linux/preempt.h> | 29 | #include <linux/preempt.h> |
@@ -64,15 +67,16 @@ extern void smp_cpus_done(unsigned int max_cpus); | |||
64 | * Call a function on all other processors | 67 | * Call a function on all other processors |
65 | */ | 68 | */ |
66 | int smp_call_function(void(*func)(void *info), void *info, int wait); | 69 | int smp_call_function(void(*func)(void *info), void *info, int wait); |
67 | /* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */ | 70 | void smp_call_function_many(const struct cpumask *mask, |
68 | int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, | 71 | void (*func)(void *info), void *info, bool wait); |
69 | int wait); | ||
70 | 72 | ||
71 | static inline void smp_call_function_many(const struct cpumask *mask, | 73 | /* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */ |
72 | void (*func)(void *info), void *info, | 74 | static inline int |
73 | int wait) | 75 | smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, |
76 | int wait) | ||
74 | { | 77 | { |
75 | smp_call_function_mask(*mask, func, info, wait); | 78 | smp_call_function_many(&mask, func, info, wait); |
79 | return 0; | ||
76 | } | 80 | } |
77 | 81 | ||
78 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | 82 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, |
diff --git a/include/linux/threads.h b/include/linux/threads.h index 38d1a5d6568e..052b12bec8bd 100644 --- a/include/linux/threads.h +++ b/include/linux/threads.h | |||
@@ -8,17 +8,17 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * Maximum supported processors that can run under SMP. This value is | 11 | * Maximum supported processors. Setting this smaller saves quite a |
12 | * set via configure setting. The maximum is equal to the size of the | 12 | * bit of memory. Use nr_cpu_ids instead of this except for static bitmaps. |
13 | * bitmasks used on that platform, i.e. 32 or 64. Setting this smaller | ||
14 | * saves quite a bit of memory. | ||
15 | */ | 13 | */ |
16 | #ifdef CONFIG_SMP | 14 | #ifndef CONFIG_NR_CPUS |
17 | #define NR_CPUS CONFIG_NR_CPUS | 15 | /* FIXME: This should be fixed in the arch's Kconfig */ |
18 | #else | 16 | #define CONFIG_NR_CPUS 1 |
19 | #define NR_CPUS 1 | ||
20 | #endif | 17 | #endif |
21 | 18 | ||
19 | /* Places which use this should consider cpumask_var_t. */ | ||
20 | #define NR_CPUS CONFIG_NR_CPUS | ||
21 | |||
22 | #define MIN_THREADS_LEFT_FOR_ROOT 4 | 22 | #define MIN_THREADS_LEFT_FOR_ROOT 4 |
23 | 23 | ||
24 | /* | 24 | /* |