diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-08-14 06:19:59 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-14 06:19:59 -0400 |
commit | 8d7ccaa545490cdffdfaff0842436a8dd85cf47b (patch) | |
tree | 8129b5907161bc6ae26deb3645ce1e280c5e1f51 /include/linux/cpumask.h | |
parent | b2139aa0eec330c711c5a279db361e5ef1178e78 (diff) | |
parent | 30a2f3c60a84092c8084dfe788b710f8d0768cd4 (diff) |
Merge commit 'v2.6.27-rc3' into x86/prototypes
Conflicts:
include/asm-x86/dma-mapping.h
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/cpumask.h')
-rw-r--r-- | include/linux/cpumask.h | 179 |
1 files changed, 129 insertions, 50 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index c24875bd9c5b..d3219d73f8e6 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -17,6 +17,20 @@ | |||
17 | * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. | 17 | * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. |
18 | * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. | 18 | * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. |
19 | * | 19 | * |
20 | * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | ||
21 | * Note: The alternate operations with the suffix "_nr" are used | ||
22 | * to limit the range of the loop to nr_cpu_ids instead of | ||
23 | * NR_CPUS when NR_CPUS > 64 for performance reasons. | ||
24 | * If NR_CPUS is <= 64 then most assembler bitmask | ||
25 | * operators execute faster with a constant range, so | ||
26 | * the operator will continue to use NR_CPUS. | ||
27 | * | ||
28 | * Another consideration is that nr_cpu_ids is initialized | ||
29 | * to NR_CPUS and isn't lowered until the possible cpus are | ||
30 | * discovered (including any disabled cpus). So early uses | ||
31 | * will span the entire range of NR_CPUS. | ||
32 | * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | ||
33 | * | ||
20 | * The available cpumask operations are: | 34 | * The available cpumask operations are: |
21 | * | 35 | * |
22 | * void cpu_set(cpu, mask) turn on bit 'cpu' in mask | 36 | * void cpu_set(cpu, mask) turn on bit 'cpu' in mask |
@@ -38,18 +52,52 @@ | |||
38 | * int cpus_empty(mask) Is mask empty (no bits sets)? | 52 | * int cpus_empty(mask) Is mask empty (no bits sets)? |
39 | * int cpus_full(mask) Is mask full (all bits sets)? | 53 | * int cpus_full(mask) Is mask full (all bits sets)? |
40 | * int cpus_weight(mask) Hamming weigh - number of set bits | 54 | * int cpus_weight(mask) Hamming weigh - number of set bits |
55 | * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS | ||
41 | * | 56 | * |
42 | * void cpus_shift_right(dst, src, n) Shift right | 57 | * void cpus_shift_right(dst, src, n) Shift right |
43 | * void cpus_shift_left(dst, src, n) Shift left | 58 | * void cpus_shift_left(dst, src, n) Shift left |
44 | * | 59 | * |
45 | * int first_cpu(mask) Number lowest set bit, or NR_CPUS | 60 | * int first_cpu(mask) Number lowest set bit, or NR_CPUS |
46 | * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS | 61 | * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS |
62 | * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids | ||
47 | * | 63 | * |
48 | * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set | 64 | * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set |
65 | * (can be used as an lvalue) | ||
49 | * CPU_MASK_ALL Initializer - all bits set | 66 | * CPU_MASK_ALL Initializer - all bits set |
50 | * CPU_MASK_NONE Initializer - no bits set | 67 | * CPU_MASK_NONE Initializer - no bits set |
51 | * unsigned long *cpus_addr(mask) Array of unsigned long's in mask | 68 | * unsigned long *cpus_addr(mask) Array of unsigned long's in mask |
52 | * | 69 | * |
70 | * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t | ||
71 | * variables, and CPUMASK_PTR provides pointers to each field. | ||
72 | * | ||
73 | * The structure should be defined something like this: | ||
74 | * struct my_cpumasks { | ||
75 | * cpumask_t mask1; | ||
76 | * cpumask_t mask2; | ||
77 | * }; | ||
78 | * | ||
79 | * Usage is then: | ||
80 | * CPUMASK_ALLOC(my_cpumasks); | ||
81 | * CPUMASK_PTR(mask1, my_cpumasks); | ||
82 | * CPUMASK_PTR(mask2, my_cpumasks); | ||
83 | * | ||
84 | * --- DO NOT reference cpumask_t pointers until this check --- | ||
85 | * if (my_cpumasks == NULL) | ||
86 | * "kmalloc failed"... | ||
87 | * | ||
88 | * References are now pointers to the cpumask_t variables (*mask1, ...) | ||
89 | * | ||
90 | *if NR_CPUS > BITS_PER_LONG | ||
91 | * CPUMASK_ALLOC(m) Declares and allocates struct m *m = | ||
92 | * kmalloc(sizeof(*m), GFP_KERNEL) | ||
93 | * CPUMASK_FREE(m) Macro for kfree(m) | ||
94 | *else | ||
95 | * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m | ||
96 | * CPUMASK_FREE(m) Nop | ||
97 | *endif | ||
98 | * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v) | ||
99 | * ------------------------------------------------------------------------ | ||
100 | * | ||
53 | * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing | 101 | * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing |
54 | * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask | 102 | * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask |
55 | * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing | 103 | * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing |
@@ -59,7 +107,8 @@ | |||
59 | * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap | 107 | * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap |
60 | * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz | 108 | * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz |
61 | * | 109 | * |
62 | * for_each_cpu_mask(cpu, mask) for-loop cpu over mask | 110 | * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS |
111 | * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids | ||
63 | * | 112 | * |
64 | * int num_online_cpus() Number of online CPUs | 113 | * int num_online_cpus() Number of online CPUs |
65 | * int num_possible_cpus() Number of all possible CPUs | 114 | * int num_possible_cpus() Number of all possible CPUs |
@@ -216,33 +265,30 @@ static inline void __cpus_shift_left(cpumask_t *dstp, | |||
216 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); | 265 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); |
217 | } | 266 | } |
218 | 267 | ||
219 | #ifdef CONFIG_SMP | 268 | /* |
220 | int __first_cpu(const cpumask_t *srcp); | 269 | * Special-case data structure for "single bit set only" constant CPU masks. |
221 | #define first_cpu(src) __first_cpu(&(src)) | 270 | * |
222 | int __next_cpu(int n, const cpumask_t *srcp); | 271 | * We pre-generate all the 64 (or 32) possible bit positions, with enough |
223 | #define next_cpu(n, src) __next_cpu((n), &(src)) | 272 | * padding to the left and the right, and return the constant pointer |
224 | #else | 273 | * appropriately offset. |
225 | #define first_cpu(src) ({ (void)(src); 0; }) | 274 | */ |
226 | #define next_cpu(n, src) ({ (void)(src); 1; }) | 275 | extern const unsigned long |
227 | #endif | 276 | cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; |
277 | |||
278 | static inline const cpumask_t *get_cpu_mask(unsigned int cpu) | ||
279 | { | ||
280 | const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; | ||
281 | p -= cpu / BITS_PER_LONG; | ||
282 | return (const cpumask_t *)p; | ||
283 | } | ||
228 | 284 | ||
229 | #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP | 285 | /* |
230 | extern cpumask_t *cpumask_of_cpu_map; | 286 | * In cases where we take the address of the cpumask immediately, |
231 | #define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) | 287 | * gcc optimizes it out (it's a constant) and there's no huge stack |
288 | * variable created: | ||
289 | */ | ||
290 | #define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) | ||
232 | 291 | ||
233 | #else | ||
234 | #define cpumask_of_cpu(cpu) \ | ||
235 | (*({ \ | ||
236 | typeof(_unused_cpumask_arg_) m; \ | ||
237 | if (sizeof(m) == sizeof(unsigned long)) { \ | ||
238 | m.bits[0] = 1UL<<(cpu); \ | ||
239 | } else { \ | ||
240 | cpus_clear(m); \ | ||
241 | cpu_set((cpu), m); \ | ||
242 | } \ | ||
243 | &m; \ | ||
244 | })) | ||
245 | #endif | ||
246 | 292 | ||
247 | #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) | 293 | #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) |
248 | 294 | ||
@@ -281,6 +327,15 @@ extern cpumask_t cpu_mask_all; | |||
281 | 327 | ||
282 | #define cpus_addr(src) ((src).bits) | 328 | #define cpus_addr(src) ((src).bits) |
283 | 329 | ||
330 | #if NR_CPUS > BITS_PER_LONG | ||
331 | #define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL) | ||
332 | #define CPUMASK_FREE(m) kfree(m) | ||
333 | #else | ||
334 | #define CPUMASK_ALLOC(m) struct m _m, *m = &_m | ||
335 | #define CPUMASK_FREE(m) | ||
336 | #endif | ||
337 | #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) | ||
338 | |||
284 | #define cpumask_scnprintf(buf, len, src) \ | 339 | #define cpumask_scnprintf(buf, len, src) \ |
285 | __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) | 340 | __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) |
286 | static inline int __cpumask_scnprintf(char *buf, int len, | 341 | static inline int __cpumask_scnprintf(char *buf, int len, |
@@ -343,29 +398,59 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, | |||
343 | bitmap_fold(dstp->bits, origp->bits, sz, nbits); | 398 | bitmap_fold(dstp->bits, origp->bits, sz, nbits); |
344 | } | 399 | } |
345 | 400 | ||
346 | #if NR_CPUS > 1 | 401 | #if NR_CPUS == 1 |
347 | #define for_each_cpu_mask(cpu, mask) \ | 402 | |
348 | for ((cpu) = first_cpu(mask); \ | 403 | #define nr_cpu_ids 1 |
349 | (cpu) < NR_CPUS; \ | 404 | #define first_cpu(src) ({ (void)(src); 0; }) |
350 | (cpu) = next_cpu((cpu), (mask))) | 405 | #define next_cpu(n, src) ({ (void)(src); 1; }) |
351 | #else /* NR_CPUS == 1 */ | 406 | #define any_online_cpu(mask) 0 |
352 | #define for_each_cpu_mask(cpu, mask) \ | 407 | #define for_each_cpu_mask(cpu, mask) \ |
353 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) | 408 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
354 | #endif /* NR_CPUS */ | 409 | |
410 | #else /* NR_CPUS > 1 */ | ||
411 | |||
412 | extern int nr_cpu_ids; | ||
413 | int __first_cpu(const cpumask_t *srcp); | ||
414 | int __next_cpu(int n, const cpumask_t *srcp); | ||
415 | int __any_online_cpu(const cpumask_t *mask); | ||
416 | |||
417 | #define first_cpu(src) __first_cpu(&(src)) | ||
418 | #define next_cpu(n, src) __next_cpu((n), &(src)) | ||
419 | #define any_online_cpu(mask) __any_online_cpu(&(mask)) | ||
420 | #define for_each_cpu_mask(cpu, mask) \ | ||
421 | for ((cpu) = -1; \ | ||
422 | (cpu) = next_cpu((cpu), (mask)), \ | ||
423 | (cpu) < NR_CPUS; ) | ||
424 | #endif | ||
425 | |||
426 | #if NR_CPUS <= 64 | ||
355 | 427 | ||
356 | #define next_cpu_nr(n, src) next_cpu(n, src) | 428 | #define next_cpu_nr(n, src) next_cpu(n, src) |
357 | #define cpus_weight_nr(cpumask) cpus_weight(cpumask) | 429 | #define cpus_weight_nr(cpumask) cpus_weight(cpumask) |
358 | #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) | 430 | #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) |
359 | 431 | ||
432 | #else /* NR_CPUS > 64 */ | ||
433 | |||
434 | int __next_cpu_nr(int n, const cpumask_t *srcp); | ||
435 | #define next_cpu_nr(n, src) __next_cpu_nr((n), &(src)) | ||
436 | #define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids) | ||
437 | #define for_each_cpu_mask_nr(cpu, mask) \ | ||
438 | for ((cpu) = -1; \ | ||
439 | (cpu) = next_cpu_nr((cpu), (mask)), \ | ||
440 | (cpu) < nr_cpu_ids; ) | ||
441 | |||
442 | #endif /* NR_CPUS > 64 */ | ||
443 | |||
360 | /* | 444 | /* |
361 | * The following particular system cpumasks and operations manage | 445 | * The following particular system cpumasks and operations manage |
362 | * possible, present and online cpus. Each of them is a fixed size | 446 | * possible, present, active and online cpus. Each of them is a fixed size |
363 | * bitmap of size NR_CPUS. | 447 | * bitmap of size NR_CPUS. |
364 | * | 448 | * |
365 | * #ifdef CONFIG_HOTPLUG_CPU | 449 | * #ifdef CONFIG_HOTPLUG_CPU |
366 | * cpu_possible_map - has bit 'cpu' set iff cpu is populatable | 450 | * cpu_possible_map - has bit 'cpu' set iff cpu is populatable |
367 | * cpu_present_map - has bit 'cpu' set iff cpu is populated | 451 | * cpu_present_map - has bit 'cpu' set iff cpu is populated |
368 | * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler | 452 | * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler |
453 | * cpu_active_map - has bit 'cpu' set iff cpu available to migration | ||
369 | * #else | 454 | * #else |
370 | * cpu_possible_map - has bit 'cpu' set iff cpu is populated | 455 | * cpu_possible_map - has bit 'cpu' set iff cpu is populated |
371 | * cpu_present_map - copy of cpu_possible_map | 456 | * cpu_present_map - copy of cpu_possible_map |
@@ -416,14 +501,16 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, | |||
416 | extern cpumask_t cpu_possible_map; | 501 | extern cpumask_t cpu_possible_map; |
417 | extern cpumask_t cpu_online_map; | 502 | extern cpumask_t cpu_online_map; |
418 | extern cpumask_t cpu_present_map; | 503 | extern cpumask_t cpu_present_map; |
504 | extern cpumask_t cpu_active_map; | ||
419 | 505 | ||
420 | #if NR_CPUS > 1 | 506 | #if NR_CPUS > 1 |
421 | #define num_online_cpus() cpus_weight(cpu_online_map) | 507 | #define num_online_cpus() cpus_weight_nr(cpu_online_map) |
422 | #define num_possible_cpus() cpus_weight(cpu_possible_map) | 508 | #define num_possible_cpus() cpus_weight_nr(cpu_possible_map) |
423 | #define num_present_cpus() cpus_weight(cpu_present_map) | 509 | #define num_present_cpus() cpus_weight_nr(cpu_present_map) |
424 | #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) | 510 | #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) |
425 | #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) | 511 | #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) |
426 | #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) | 512 | #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) |
513 | #define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) | ||
427 | #else | 514 | #else |
428 | #define num_online_cpus() 1 | 515 | #define num_online_cpus() 1 |
429 | #define num_possible_cpus() 1 | 516 | #define num_possible_cpus() 1 |
@@ -431,21 +518,13 @@ extern cpumask_t cpu_present_map; | |||
431 | #define cpu_online(cpu) ((cpu) == 0) | 518 | #define cpu_online(cpu) ((cpu) == 0) |
432 | #define cpu_possible(cpu) ((cpu) == 0) | 519 | #define cpu_possible(cpu) ((cpu) == 0) |
433 | #define cpu_present(cpu) ((cpu) == 0) | 520 | #define cpu_present(cpu) ((cpu) == 0) |
521 | #define cpu_active(cpu) ((cpu) == 0) | ||
434 | #endif | 522 | #endif |
435 | 523 | ||
436 | #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) | 524 | #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) |
437 | 525 | ||
438 | #ifdef CONFIG_SMP | 526 | #define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) |
439 | extern int nr_cpu_ids; | 527 | #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) |
440 | #define any_online_cpu(mask) __any_online_cpu(&(mask)) | 528 | #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) |
441 | int __any_online_cpu(const cpumask_t *mask); | ||
442 | #else | ||
443 | #define nr_cpu_ids 1 | ||
444 | #define any_online_cpu(mask) 0 | ||
445 | #endif | ||
446 | |||
447 | #define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) | ||
448 | #define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) | ||
449 | #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) | ||
450 | 529 | ||
451 | #endif /* __LINUX_CPUMASK_H */ | 530 | #endif /* __LINUX_CPUMASK_H */ |