diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-03 15:04:39 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-03 15:04:39 -0500 |
commit | 7d3b56ba37a95f1f370f50258ed3954c304c524b (patch) | |
tree | 86102527b92f02450aa245f084ffb491c18d2e0a /include/linux/cpumask.h | |
parent | 269b012321f2f1f8e4648c43a93bf432b42c6668 (diff) | |
parent | ab14398abd195af91a744c320a52a1bce814dd1e (diff) |
Merge branch 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (77 commits)
x86: setup_per_cpu_areas() cleanup
cpumask: fix compile error when CONFIG_NR_CPUS is not defined
cpumask: use alloc_cpumask_var_node where appropriate
cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t
x86: use cpumask_var_t in acpi/boot.c
x86: cleanup some remaining usages of NR_CPUS where s/b nr_cpu_ids
sched: put back some stack hog changes that were undone in kernel/sched.c
x86: enable cpus display of kernel_max and offlined cpus
ia64: cpumask fix for is_affinity_mask_valid()
cpumask: convert RCU implementations, fix
xtensa: define __fls
mn10300: define __fls
m32r: define __fls
h8300: define __fls
frv: define __fls
cris: define __fls
cpumask: CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
cpumask: zero extra bits in alloc_cpumask_var_node
cpumask: replace for_each_cpu_mask_nr with for_each_cpu in kernel/time/
cpumask: convert mm/
...
Diffstat (limited to 'include/linux/cpumask.h')
-rw-r--r-- | include/linux/cpumask.h | 221 |
1 files changed, 91 insertions, 130 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index d4bf52603e6b..9f315382610b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -144,6 +144,7 @@ | |||
144 | typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; | 144 | typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; |
145 | extern cpumask_t _unused_cpumask_arg_; | 145 | extern cpumask_t _unused_cpumask_arg_; |
146 | 146 | ||
147 | #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS | ||
147 | #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) | 148 | #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) |
148 | static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) | 149 | static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) |
149 | { | 150 | { |
@@ -267,6 +268,26 @@ static inline void __cpus_shift_left(cpumask_t *dstp, | |||
267 | { | 268 | { |
268 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); | 269 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); |
269 | } | 270 | } |
271 | #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ | ||
272 | |||
273 | /** | ||
274 | * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * | ||
275 | * @bitmap: the bitmap | ||
276 | * | ||
277 | * There are a few places where cpumask_var_t isn't appropriate and | ||
278 | * static cpumasks must be used (eg. very early boot), yet we don't | ||
279 | * expose the definition of 'struct cpumask'. | ||
280 | * | ||
281 | * This does the conversion, and can be used as a constant initializer. | ||
282 | */ | ||
283 | #define to_cpumask(bitmap) \ | ||
284 | ((struct cpumask *)(1 ? (bitmap) \ | ||
285 | : (void *)sizeof(__check_is_bitmap(bitmap)))) | ||
286 | |||
287 | static inline int __check_is_bitmap(const unsigned long *bitmap) | ||
288 | { | ||
289 | return 1; | ||
290 | } | ||
270 | 291 | ||
271 | /* | 292 | /* |
272 | * Special-case data structure for "single bit set only" constant CPU masks. | 293 | * Special-case data structure for "single bit set only" constant CPU masks. |
@@ -278,13 +299,14 @@ static inline void __cpus_shift_left(cpumask_t *dstp, | |||
278 | extern const unsigned long | 299 | extern const unsigned long |
279 | cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; | 300 | cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; |
280 | 301 | ||
281 | static inline const cpumask_t *get_cpu_mask(unsigned int cpu) | 302 | static inline const struct cpumask *get_cpu_mask(unsigned int cpu) |
282 | { | 303 | { |
283 | const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; | 304 | const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; |
284 | p -= cpu / BITS_PER_LONG; | 305 | p -= cpu / BITS_PER_LONG; |
285 | return (const cpumask_t *)p; | 306 | return to_cpumask(p); |
286 | } | 307 | } |
287 | 308 | ||
309 | #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS | ||
288 | /* | 310 | /* |
289 | * In cases where we take the address of the cpumask immediately, | 311 | * In cases where we take the address of the cpumask immediately, |
290 | * gcc optimizes it out (it's a constant) and there's no huge stack | 312 | * gcc optimizes it out (it's a constant) and there's no huge stack |
@@ -370,19 +392,22 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, | |||
370 | { | 392 | { |
371 | bitmap_fold(dstp->bits, origp->bits, sz, nbits); | 393 | bitmap_fold(dstp->bits, origp->bits, sz, nbits); |
372 | } | 394 | } |
395 | #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ | ||
373 | 396 | ||
374 | #if NR_CPUS == 1 | 397 | #if NR_CPUS == 1 |
375 | 398 | ||
376 | #define nr_cpu_ids 1 | 399 | #define nr_cpu_ids 1 |
400 | #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS | ||
377 | #define first_cpu(src) ({ (void)(src); 0; }) | 401 | #define first_cpu(src) ({ (void)(src); 0; }) |
378 | #define next_cpu(n, src) ({ (void)(src); 1; }) | 402 | #define next_cpu(n, src) ({ (void)(src); 1; }) |
379 | #define any_online_cpu(mask) 0 | 403 | #define any_online_cpu(mask) 0 |
380 | #define for_each_cpu_mask(cpu, mask) \ | 404 | #define for_each_cpu_mask(cpu, mask) \ |
381 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) | 405 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
382 | 406 | #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ | |
383 | #else /* NR_CPUS > 1 */ | 407 | #else /* NR_CPUS > 1 */ |
384 | 408 | ||
385 | extern int nr_cpu_ids; | 409 | extern int nr_cpu_ids; |
410 | #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS | ||
386 | int __first_cpu(const cpumask_t *srcp); | 411 | int __first_cpu(const cpumask_t *srcp); |
387 | int __next_cpu(int n, const cpumask_t *srcp); | 412 | int __next_cpu(int n, const cpumask_t *srcp); |
388 | int __any_online_cpu(const cpumask_t *mask); | 413 | int __any_online_cpu(const cpumask_t *mask); |
@@ -394,8 +419,10 @@ int __any_online_cpu(const cpumask_t *mask); | |||
394 | for ((cpu) = -1; \ | 419 | for ((cpu) = -1; \ |
395 | (cpu) = next_cpu((cpu), (mask)), \ | 420 | (cpu) = next_cpu((cpu), (mask)), \ |
396 | (cpu) < NR_CPUS; ) | 421 | (cpu) < NR_CPUS; ) |
422 | #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ | ||
397 | #endif | 423 | #endif |
398 | 424 | ||
425 | #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS | ||
399 | #if NR_CPUS <= 64 | 426 | #if NR_CPUS <= 64 |
400 | 427 | ||
401 | #define next_cpu_nr(n, src) next_cpu(n, src) | 428 | #define next_cpu_nr(n, src) next_cpu(n, src) |
@@ -413,77 +440,67 @@ int __next_cpu_nr(int n, const cpumask_t *srcp); | |||
413 | (cpu) < nr_cpu_ids; ) | 440 | (cpu) < nr_cpu_ids; ) |
414 | 441 | ||
415 | #endif /* NR_CPUS > 64 */ | 442 | #endif /* NR_CPUS > 64 */ |
443 | #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ | ||
416 | 444 | ||
417 | /* | 445 | /* |
418 | * The following particular system cpumasks and operations manage | 446 | * The following particular system cpumasks and operations manage |
419 | * possible, present, active and online cpus. Each of them is a fixed size | 447 | * possible, present, active and online cpus. |
420 | * bitmap of size NR_CPUS. | ||
421 | * | 448 | * |
422 | * #ifdef CONFIG_HOTPLUG_CPU | 449 | * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable |
423 | * cpu_possible_map - has bit 'cpu' set iff cpu is populatable | 450 | * cpu_present_mask - has bit 'cpu' set iff cpu is populated |
424 | * cpu_present_map - has bit 'cpu' set iff cpu is populated | 451 | * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler |
425 | * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler | 452 | * cpu_active_mask - has bit 'cpu' set iff cpu available to migration |
426 | * cpu_active_map - has bit 'cpu' set iff cpu available to migration | ||
427 | * #else | ||
428 | * cpu_possible_map - has bit 'cpu' set iff cpu is populated | ||
429 | * cpu_present_map - copy of cpu_possible_map | ||
430 | * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler | ||
431 | * #endif | ||
432 | * | 453 | * |
433 | * In either case, NR_CPUS is fixed at compile time, as the static | 454 | * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. |
434 | * size of these bitmaps. The cpu_possible_map is fixed at boot | ||
435 | * time, as the set of CPU id's that it is possible might ever | ||
436 | * be plugged in at anytime during the life of that system boot. | ||
437 | * The cpu_present_map is dynamic(*), representing which CPUs | ||
438 | * are currently plugged in. And cpu_online_map is the dynamic | ||
439 | * subset of cpu_present_map, indicating those CPUs available | ||
440 | * for scheduling. | ||
441 | * | 455 | * |
442 | * If HOTPLUG is enabled, then cpu_possible_map is forced to have | 456 | * The cpu_possible_mask is fixed at boot time, as the set of CPU id's |
457 | * that it is possible might ever be plugged in at anytime during the | ||
458 | * life of that system boot. The cpu_present_mask is dynamic(*), | ||
459 | * representing which CPUs are currently plugged in. And | ||
460 | * cpu_online_mask is the dynamic subset of cpu_present_mask, | ||
461 | * indicating those CPUs available for scheduling. | ||
462 | * | ||
463 | * If HOTPLUG is enabled, then cpu_possible_mask is forced to have | ||
443 | * all NR_CPUS bits set, otherwise it is just the set of CPUs that | 464 | * all NR_CPUS bits set, otherwise it is just the set of CPUs that |
444 | * ACPI reports present at boot. | 465 | * ACPI reports present at boot. |
445 | * | 466 | * |
446 | * If HOTPLUG is enabled, then cpu_present_map varies dynamically, | 467 | * If HOTPLUG is enabled, then cpu_present_mask varies dynamically, |
447 | * depending on what ACPI reports as currently plugged in, otherwise | 468 | * depending on what ACPI reports as currently plugged in, otherwise |
448 | * cpu_present_map is just a copy of cpu_possible_map. | 469 | * cpu_present_mask is just a copy of cpu_possible_mask. |
449 | * | 470 | * |
450 | * (*) Well, cpu_present_map is dynamic in the hotplug case. If not | 471 | * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not |
451 | * hotplug, it's a copy of cpu_possible_map, hence fixed at boot. | 472 | * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. |
452 | * | 473 | * |
453 | * Subtleties: | 474 | * Subtleties: |
454 | * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode | 475 | * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode |
455 | * assumption that their single CPU is online. The UP | 476 | * assumption that their single CPU is online. The UP |
456 | * cpu_{online,possible,present}_maps are placebos. Changing them | 477 | * cpu_{online,possible,present}_masks are placebos. Changing them |
457 | * will have no useful affect on the following num_*_cpus() | 478 | * will have no useful affect on the following num_*_cpus() |
458 | * and cpu_*() macros in the UP case. This ugliness is a UP | 479 | * and cpu_*() macros in the UP case. This ugliness is a UP |
459 | * optimization - don't waste any instructions or memory references | 480 | * optimization - don't waste any instructions or memory references |
460 | * asking if you're online or how many CPUs there are if there is | 481 | * asking if you're online or how many CPUs there are if there is |
461 | * only one CPU. | 482 | * only one CPU. |
462 | * 2) Most SMP arch's #define some of these maps to be some | ||
463 | * other map specific to that arch. Therefore, the following | ||
464 | * must be #define macros, not inlines. To see why, examine | ||
465 | * the assembly code produced by the following. Note that | ||
466 | * set1() writes phys_x_map, but set2() writes x_map: | ||
467 | * int x_map, phys_x_map; | ||
468 | * #define set1(a) x_map = a | ||
469 | * inline void set2(int a) { x_map = a; } | ||
470 | * #define x_map phys_x_map | ||
471 | * main(){ set1(3); set2(5); } | ||
472 | */ | 483 | */ |
473 | 484 | ||
474 | extern cpumask_t cpu_possible_map; | 485 | extern const struct cpumask *const cpu_possible_mask; |
475 | extern cpumask_t cpu_online_map; | 486 | extern const struct cpumask *const cpu_online_mask; |
476 | extern cpumask_t cpu_present_map; | 487 | extern const struct cpumask *const cpu_present_mask; |
477 | extern cpumask_t cpu_active_map; | 488 | extern const struct cpumask *const cpu_active_mask; |
489 | |||
490 | /* These strip const, as traditionally they weren't const. */ | ||
491 | #define cpu_possible_map (*(cpumask_t *)cpu_possible_mask) | ||
492 | #define cpu_online_map (*(cpumask_t *)cpu_online_mask) | ||
493 | #define cpu_present_map (*(cpumask_t *)cpu_present_mask) | ||
494 | #define cpu_active_map (*(cpumask_t *)cpu_active_mask) | ||
478 | 495 | ||
479 | #if NR_CPUS > 1 | 496 | #if NR_CPUS > 1 |
480 | #define num_online_cpus() cpus_weight_nr(cpu_online_map) | 497 | #define num_online_cpus() cpumask_weight(cpu_online_mask) |
481 | #define num_possible_cpus() cpus_weight_nr(cpu_possible_map) | 498 | #define num_possible_cpus() cpumask_weight(cpu_possible_mask) |
482 | #define num_present_cpus() cpus_weight_nr(cpu_present_map) | 499 | #define num_present_cpus() cpumask_weight(cpu_present_mask) |
483 | #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) | 500 | #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) |
484 | #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) | 501 | #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) |
485 | #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) | 502 | #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) |
486 | #define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) | 503 | #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) |
487 | #else | 504 | #else |
488 | #define num_online_cpus() 1 | 505 | #define num_online_cpus() 1 |
489 | #define num_possible_cpus() 1 | 506 | #define num_possible_cpus() 1 |
@@ -496,10 +513,6 @@ extern cpumask_t cpu_active_map; | |||
496 | 513 | ||
497 | #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) | 514 | #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) |
498 | 515 | ||
499 | #define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) | ||
500 | #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) | ||
501 | #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) | ||
502 | |||
503 | /* These are the new versions of the cpumask operators: passed by pointer. | 516 | /* These are the new versions of the cpumask operators: passed by pointer. |
504 | * The older versions will be implemented in terms of these, then deleted. */ | 517 | * The older versions will be implemented in terms of these, then deleted. */ |
505 | #define cpumask_bits(maskp) ((maskp)->bits) | 518 | #define cpumask_bits(maskp) ((maskp)->bits) |
@@ -687,7 +700,7 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) | |||
687 | * No static inline type checking - see Subtlety (1) above. | 700 | * No static inline type checking - see Subtlety (1) above. |
688 | */ | 701 | */ |
689 | #define cpumask_test_cpu(cpu, cpumask) \ | 702 | #define cpumask_test_cpu(cpu, cpumask) \ |
690 | test_bit(cpumask_check(cpu), (cpumask)->bits) | 703 | test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) |
691 | 704 | ||
692 | /** | 705 | /** |
693 | * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask | 706 | * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask |
@@ -930,7 +943,7 @@ static inline void cpumask_copy(struct cpumask *dstp, | |||
930 | static inline int cpumask_scnprintf(char *buf, int len, | 943 | static inline int cpumask_scnprintf(char *buf, int len, |
931 | const struct cpumask *srcp) | 944 | const struct cpumask *srcp) |
932 | { | 945 | { |
933 | return bitmap_scnprintf(buf, len, srcp->bits, nr_cpumask_bits); | 946 | return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits); |
934 | } | 947 | } |
935 | 948 | ||
936 | /** | 949 | /** |
@@ -944,7 +957,7 @@ static inline int cpumask_scnprintf(char *buf, int len, | |||
944 | static inline int cpumask_parse_user(const char __user *buf, int len, | 957 | static inline int cpumask_parse_user(const char __user *buf, int len, |
945 | struct cpumask *dstp) | 958 | struct cpumask *dstp) |
946 | { | 959 | { |
947 | return bitmap_parse_user(buf, len, dstp->bits, nr_cpumask_bits); | 960 | return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); |
948 | } | 961 | } |
949 | 962 | ||
950 | /** | 963 | /** |
@@ -959,7 +972,8 @@ static inline int cpumask_parse_user(const char __user *buf, int len, | |||
959 | static inline int cpulist_scnprintf(char *buf, int len, | 972 | static inline int cpulist_scnprintf(char *buf, int len, |
960 | const struct cpumask *srcp) | 973 | const struct cpumask *srcp) |
961 | { | 974 | { |
962 | return bitmap_scnlistprintf(buf, len, srcp->bits, nr_cpumask_bits); | 975 | return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp), |
976 | nr_cpumask_bits); | ||
963 | } | 977 | } |
964 | 978 | ||
965 | /** | 979 | /** |
@@ -972,26 +986,7 @@ static inline int cpulist_scnprintf(char *buf, int len, | |||
972 | */ | 986 | */ |
973 | static inline int cpulist_parse(const char *buf, struct cpumask *dstp) | 987 | static inline int cpulist_parse(const char *buf, struct cpumask *dstp) |
974 | { | 988 | { |
975 | return bitmap_parselist(buf, dstp->bits, nr_cpumask_bits); | 989 | return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); |
976 | } | ||
977 | |||
978 | /** | ||
979 | * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * | ||
980 | * @bitmap: the bitmap | ||
981 | * | ||
982 | * There are a few places where cpumask_var_t isn't appropriate and | ||
983 | * static cpumasks must be used (eg. very early boot), yet we don't | ||
984 | * expose the definition of 'struct cpumask'. | ||
985 | * | ||
986 | * This does the conversion, and can be used as a constant initializer. | ||
987 | */ | ||
988 | #define to_cpumask(bitmap) \ | ||
989 | ((struct cpumask *)(1 ? (bitmap) \ | ||
990 | : (void *)sizeof(__check_is_bitmap(bitmap)))) | ||
991 | |||
992 | static inline int __check_is_bitmap(const unsigned long *bitmap) | ||
993 | { | ||
994 | return 1; | ||
995 | } | 990 | } |
996 | 991 | ||
997 | /** | 992 | /** |
@@ -1025,6 +1020,7 @@ static inline size_t cpumask_size(void) | |||
1025 | #ifdef CONFIG_CPUMASK_OFFSTACK | 1020 | #ifdef CONFIG_CPUMASK_OFFSTACK |
1026 | typedef struct cpumask *cpumask_var_t; | 1021 | typedef struct cpumask *cpumask_var_t; |
1027 | 1022 | ||
1023 | bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); | ||
1028 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); | 1024 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); |
1029 | void alloc_bootmem_cpumask_var(cpumask_var_t *mask); | 1025 | void alloc_bootmem_cpumask_var(cpumask_var_t *mask); |
1030 | void free_cpumask_var(cpumask_var_t mask); | 1026 | void free_cpumask_var(cpumask_var_t mask); |
@@ -1038,6 +1034,12 @@ static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | |||
1038 | return true; | 1034 | return true; |
1039 | } | 1035 | } |
1040 | 1036 | ||
1037 | static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, | ||
1038 | int node) | ||
1039 | { | ||
1040 | return true; | ||
1041 | } | ||
1042 | |||
1041 | static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) | 1043 | static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) |
1042 | { | 1044 | { |
1043 | } | 1045 | } |
@@ -1051,12 +1053,6 @@ static inline void free_bootmem_cpumask_var(cpumask_var_t mask) | |||
1051 | } | 1053 | } |
1052 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | 1054 | #endif /* CONFIG_CPUMASK_OFFSTACK */ |
1053 | 1055 | ||
1054 | /* The pointer versions of the maps, these will become the primary versions. */ | ||
1055 | #define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map) | ||
1056 | #define cpu_online_mask ((const struct cpumask *)&cpu_online_map) | ||
1057 | #define cpu_present_mask ((const struct cpumask *)&cpu_present_map) | ||
1058 | #define cpu_active_mask ((const struct cpumask *)&cpu_active_map) | ||
1059 | |||
1060 | /* It's common to want to use cpu_all_mask in struct member initializers, | 1056 | /* It's common to want to use cpu_all_mask in struct member initializers, |
1061 | * so it has to refer to an address rather than a pointer. */ | 1057 | * so it has to refer to an address rather than a pointer. */ |
1062 | extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); | 1058 | extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); |
@@ -1065,51 +1061,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); | |||
1065 | /* First bits of cpu_bit_bitmap are in fact unset. */ | 1061 | /* First bits of cpu_bit_bitmap are in fact unset. */ |
1066 | #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) | 1062 | #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) |
1067 | 1063 | ||
1068 | /* Wrappers for arch boot code to manipulate normally-constant masks */ | 1064 | #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) |
1069 | static inline void set_cpu_possible(unsigned int cpu, bool possible) | 1065 | #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) |
1070 | { | 1066 | #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) |
1071 | if (possible) | ||
1072 | cpumask_set_cpu(cpu, &cpu_possible_map); | ||
1073 | else | ||
1074 | cpumask_clear_cpu(cpu, &cpu_possible_map); | ||
1075 | } | ||
1076 | |||
1077 | static inline void set_cpu_present(unsigned int cpu, bool present) | ||
1078 | { | ||
1079 | if (present) | ||
1080 | cpumask_set_cpu(cpu, &cpu_present_map); | ||
1081 | else | ||
1082 | cpumask_clear_cpu(cpu, &cpu_present_map); | ||
1083 | } | ||
1084 | |||
1085 | static inline void set_cpu_online(unsigned int cpu, bool online) | ||
1086 | { | ||
1087 | if (online) | ||
1088 | cpumask_set_cpu(cpu, &cpu_online_map); | ||
1089 | else | ||
1090 | cpumask_clear_cpu(cpu, &cpu_online_map); | ||
1091 | } | ||
1092 | 1067 | ||
1093 | static inline void set_cpu_active(unsigned int cpu, bool active) | 1068 | /* Wrappers for arch boot code to manipulate normally-constant masks */ |
1094 | { | 1069 | void set_cpu_possible(unsigned int cpu, bool possible); |
1095 | if (active) | 1070 | void set_cpu_present(unsigned int cpu, bool present); |
1096 | cpumask_set_cpu(cpu, &cpu_active_map); | 1071 | void set_cpu_online(unsigned int cpu, bool online); |
1097 | else | 1072 | void set_cpu_active(unsigned int cpu, bool active); |
1098 | cpumask_clear_cpu(cpu, &cpu_active_map); | 1073 | void init_cpu_present(const struct cpumask *src); |
1099 | } | 1074 | void init_cpu_possible(const struct cpumask *src); |
1100 | 1075 | void init_cpu_online(const struct cpumask *src); | |
1101 | static inline void init_cpu_present(const struct cpumask *src) | ||
1102 | { | ||
1103 | cpumask_copy(&cpu_present_map, src); | ||
1104 | } | ||
1105 | |||
1106 | static inline void init_cpu_possible(const struct cpumask *src) | ||
1107 | { | ||
1108 | cpumask_copy(&cpu_possible_map, src); | ||
1109 | } | ||
1110 | |||
1111 | static inline void init_cpu_online(const struct cpumask *src) | ||
1112 | { | ||
1113 | cpumask_copy(&cpu_online_map, src); | ||
1114 | } | ||
1115 | #endif /* __LINUX_CPUMASK_H */ | 1076 | #endif /* __LINUX_CPUMASK_H */ |