aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/cpumask.h
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-08 04:24:19 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-09 15:09:54 -0500
commit984f2f377fdfd098f5ae58d09ee04d5e29e6112b (patch)
tree6f6ea07057f5680586a8ac6f77700c118f253bcb /include/linux/cpumask.h
parentcd83e42c6b0413dcbb548c2ead799111ff7e6a13 (diff)
cpumask: introduce new API, without changing anything, v3
Impact: cleanup Clean up based on feedback from Andrew Morton and others: - change to inline functions instead of macros - add __init to bootmem method - add a missing debug check Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/cpumask.h')
-rw-r--r--include/linux/cpumask.h58
1 files changed, 52 insertions, 6 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 31caa1bc620a..21e1dd43e52a 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -564,12 +564,36 @@ static inline unsigned int cpumask_check(unsigned int cpu)
564} 564}
565 565
566#if NR_CPUS == 1 566#if NR_CPUS == 1
567/* Uniprocesor. */ 567/* Uniprocessor. Assume all masks are "1". */
568#define cpumask_first(src) ({ (void)(src); 0; }) 568static inline unsigned int cpumask_first(const struct cpumask *srcp)
569#define cpumask_next(n, src) ({ (void)(src); 1; }) 569{
570#define cpumask_next_zero(n, src) ({ (void)(src); 1; }) 570 return 0;
571#define cpumask_next_and(n, srcp, andp) ({ (void)(srcp), (void)(andp); 1; }) 571}
572#define cpumask_any_but(mask, cpu) ({ (void)(mask); (void)(cpu); 0; }) 572
573/* Valid inputs for n are -1 and 0. */
574static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
575{
576 return n+1;
577}
578
579static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
580{
581 return n+1;
582}
583
584static inline unsigned int cpumask_next_and(int n,
585 const struct cpumask *srcp,
586 const struct cpumask *andp)
587{
588 return n+1;
589}
590
591/* cpu must be a valid cpu, ie 0, so there's no other choice. */
592static inline unsigned int cpumask_any_but(const struct cpumask *mask,
593 unsigned int cpu)
594{
595 return 1;
596}
573 597
574#define for_each_cpu(cpu, mask) \ 598#define for_each_cpu(cpu, mask) \
575 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) 599 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
@@ -620,10 +644,32 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
620int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); 644int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
621int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); 645int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
622 646
647/**
648 * for_each_cpu - iterate over every cpu in a mask
649 * @cpu: the (optionally unsigned) integer iterator
650 * @mask: the cpumask pointer
651 *
652 * After the loop, cpu is >= nr_cpu_ids.
653 */
623#define for_each_cpu(cpu, mask) \ 654#define for_each_cpu(cpu, mask) \
624 for ((cpu) = -1; \ 655 for ((cpu) = -1; \
625 (cpu) = cpumask_next((cpu), (mask)), \ 656 (cpu) = cpumask_next((cpu), (mask)), \
626 (cpu) < nr_cpu_ids;) 657 (cpu) < nr_cpu_ids;)
658
659/**
660 * for_each_cpu_and - iterate over every cpu in both masks
661 * @cpu: the (optionally unsigned) integer iterator
662 * @mask: the first cpumask pointer
663 * @and: the second cpumask pointer
664 *
665 * This saves a temporary CPU mask in many places. It is equivalent to:
666 * struct cpumask tmp;
667 * cpumask_and(&tmp, &mask, &and);
668 * for_each_cpu(cpu, &tmp)
669 * ...
670 *
671 * After the loop, cpu is >= nr_cpu_ids.
672 */
627#define for_each_cpu_and(cpu, mask, and) \ 673#define for_each_cpu_and(cpu, mask, and) \
628 for ((cpu) = -1; \ 674 for ((cpu) = -1; \
629 (cpu) = cpumask_next_and((cpu), (mask), (and)), \ 675 (cpu) = cpumask_next_and((cpu), (mask), (and)), \