diff options
-rw-r--r-- | include/linux/cpumask.h | 502 | ||||
-rw-r--r-- | include/linux/smp.h | 9 | ||||
-rw-r--r-- | include/linux/workqueue.h | 8 | ||||
-rw-r--r-- | kernel/cpu.c | 3 | ||||
-rw-r--r-- | kernel/workqueue.c | 45 | ||||
-rw-r--r-- | lib/cpumask.c | 73 |
6 files changed, 638 insertions, 2 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index d3219d73f8e6..c8e66619097b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -5,6 +5,9 @@ | |||
5 | * Cpumasks provide a bitmap suitable for representing the | 5 | * Cpumasks provide a bitmap suitable for representing the |
6 | * set of CPU's in a system, one bit position per CPU number. | 6 | * set of CPU's in a system, one bit position per CPU number. |
7 | * | 7 | * |
8 | * The new cpumask_ ops take a "struct cpumask *"; the old ones | ||
9 | * use cpumask_t. | ||
10 | * | ||
8 | * See detailed comments in the file linux/bitmap.h describing the | 11 | * See detailed comments in the file linux/bitmap.h describing the |
9 | * data type on which these cpumasks are based. | 12 | * data type on which these cpumasks are based. |
10 | * | 13 | * |
@@ -31,7 +34,7 @@ | |||
31 | * will span the entire range of NR_CPUS. | 34 | * will span the entire range of NR_CPUS. |
32 | * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | 35 | * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
33 | * | 36 | * |
34 | * The available cpumask operations are: | 37 | * The obsolescent cpumask operations are: |
35 | * | 38 | * |
36 | * void cpu_set(cpu, mask) turn on bit 'cpu' in mask | 39 | * void cpu_set(cpu, mask) turn on bit 'cpu' in mask |
37 | * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask | 40 | * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask |
@@ -138,7 +141,7 @@ | |||
138 | #include <linux/threads.h> | 141 | #include <linux/threads.h> |
139 | #include <linux/bitmap.h> | 142 | #include <linux/bitmap.h> |
140 | 143 | ||
141 | typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; | 144 | typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; |
142 | extern cpumask_t _unused_cpumask_arg_; | 145 | extern cpumask_t _unused_cpumask_arg_; |
143 | 146 | ||
144 | #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) | 147 | #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) |
@@ -527,4 +530,499 @@ extern cpumask_t cpu_active_map; | |||
527 | #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) | 530 | #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) |
528 | #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) | 531 | #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) |
529 | 532 | ||
533 | /* These are the new versions of the cpumask operators: passed by pointer. | ||
534 | * The older versions will be implemented in terms of these, then deleted. */ | ||
535 | #define cpumask_bits(maskp) ((maskp)->bits) | ||
536 | |||
537 | #if NR_CPUS <= BITS_PER_LONG | ||
538 | #define CPU_BITS_ALL \ | ||
539 | { \ | ||
540 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | ||
541 | } | ||
542 | |||
543 | /* This produces more efficient code. */ | ||
544 | #define nr_cpumask_bits NR_CPUS | ||
545 | |||
546 | #else /* NR_CPUS > BITS_PER_LONG */ | ||
547 | |||
548 | #define CPU_BITS_ALL \ | ||
549 | { \ | ||
550 | [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ | ||
551 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | ||
552 | } | ||
553 | |||
554 | #define nr_cpumask_bits nr_cpu_ids | ||
555 | #endif /* NR_CPUS > BITS_PER_LONG */ | ||
556 | |||
557 | /* verify cpu argument to cpumask_* operators */ | ||
558 | static inline unsigned int cpumask_check(unsigned int cpu) | ||
559 | { | ||
560 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
561 | WARN_ON_ONCE(cpu >= nr_cpumask_bits); | ||
562 | #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ | ||
563 | return cpu; | ||
564 | } | ||
565 | |||
566 | #if NR_CPUS == 1 | ||
567 | /* Uniprocesor. */ | ||
568 | #define cpumask_first(src) ({ (void)(src); 0; }) | ||
569 | #define cpumask_next(n, src) ({ (void)(src); 1; }) | ||
570 | #define cpumask_next_zero(n, src) ({ (void)(src); 1; }) | ||
571 | #define cpumask_next_and(n, srcp, andp) ({ (void)(srcp), (void)(andp); 1; }) | ||
572 | #define cpumask_any_but(mask, cpu) ({ (void)(mask); (void)(cpu); 0; }) | ||
573 | |||
574 | #define for_each_cpu(cpu, mask) \ | ||
575 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) | ||
576 | #define for_each_cpu_and(cpu, mask, and) \ | ||
577 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) | ||
578 | #else | ||
579 | /** | ||
580 | * cpumask_first - get the first cpu in a cpumask | ||
581 | * @srcp: the cpumask pointer | ||
582 | * | ||
583 | * Returns >= nr_cpu_ids if no cpus set. | ||
584 | */ | ||
585 | static inline unsigned int cpumask_first(const struct cpumask *srcp) | ||
586 | { | ||
587 | return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); | ||
588 | } | ||
589 | |||
590 | /** | ||
591 | * cpumask_next - get the next cpu in a cpumask | ||
592 | * @n: the cpu prior to the place to search (ie. return will be > @n) | ||
593 | * @srcp: the cpumask pointer | ||
594 | * | ||
595 | * Returns >= nr_cpu_ids if no further cpus set. | ||
596 | */ | ||
597 | static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) | ||
598 | { | ||
599 | /* -1 is a legal arg here. */ | ||
600 | if (n != -1) | ||
601 | cpumask_check(n); | ||
602 | return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); | ||
603 | } | ||
604 | |||
605 | /** | ||
606 | * cpumask_next_zero - get the next unset cpu in a cpumask | ||
607 | * @n: the cpu prior to the place to search (ie. return will be > @n) | ||
608 | * @srcp: the cpumask pointer | ||
609 | * | ||
610 | * Returns >= nr_cpu_ids if no further cpus unset. | ||
611 | */ | ||
612 | static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) | ||
613 | { | ||
614 | /* -1 is a legal arg here. */ | ||
615 | if (n != -1) | ||
616 | cpumask_check(n); | ||
617 | return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); | ||
618 | } | ||
619 | |||
620 | int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); | ||
621 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); | ||
622 | |||
623 | #define for_each_cpu(cpu, mask) \ | ||
624 | for ((cpu) = -1; \ | ||
625 | (cpu) = cpumask_next((cpu), (mask)), \ | ||
626 | (cpu) < nr_cpu_ids;) | ||
627 | #define for_each_cpu_and(cpu, mask, and) \ | ||
628 | for ((cpu) = -1; \ | ||
629 | (cpu) = cpumask_next_and((cpu), (mask), (and)), \ | ||
630 | (cpu) < nr_cpu_ids;) | ||
631 | #endif /* SMP */ | ||
632 | |||
633 | #define CPU_BITS_NONE \ | ||
634 | { \ | ||
635 | [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ | ||
636 | } | ||
637 | |||
638 | #define CPU_BITS_CPU0 \ | ||
639 | { \ | ||
640 | [0] = 1UL \ | ||
641 | } | ||
642 | |||
643 | /** | ||
644 | * cpumask_set_cpu - set a cpu in a cpumask | ||
645 | * @cpu: cpu number (< nr_cpu_ids) | ||
646 | * @dstp: the cpumask pointer | ||
647 | */ | ||
648 | static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) | ||
649 | { | ||
650 | set_bit(cpumask_check(cpu), cpumask_bits(dstp)); | ||
651 | } | ||
652 | |||
653 | /** | ||
654 | * cpumask_clear_cpu - clear a cpu in a cpumask | ||
655 | * @cpu: cpu number (< nr_cpu_ids) | ||
656 | * @dstp: the cpumask pointer | ||
657 | */ | ||
658 | static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) | ||
659 | { | ||
660 | clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); | ||
661 | } | ||
662 | |||
663 | /** | ||
664 | * cpumask_test_cpu - test for a cpu in a cpumask | ||
665 | * @cpu: cpu number (< nr_cpu_ids) | ||
666 | * @cpumask: the cpumask pointer | ||
667 | * | ||
668 | * No static inline type checking - see Subtlety (1) above. | ||
669 | */ | ||
670 | #define cpumask_test_cpu(cpu, cpumask) \ | ||
671 | test_bit(cpumask_check(cpu), (cpumask)->bits) | ||
672 | |||
673 | /** | ||
674 | * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask | ||
675 | * @cpu: cpu number (< nr_cpu_ids) | ||
676 | * @cpumask: the cpumask pointer | ||
677 | * | ||
678 | * test_and_set_bit wrapper for cpumasks. | ||
679 | */ | ||
680 | static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) | ||
681 | { | ||
682 | return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); | ||
683 | } | ||
684 | |||
685 | /** | ||
686 | * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask | ||
687 | * @dstp: the cpumask pointer | ||
688 | */ | ||
689 | static inline void cpumask_setall(struct cpumask *dstp) | ||
690 | { | ||
691 | bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits); | ||
692 | } | ||
693 | |||
694 | /** | ||
695 | * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask | ||
696 | * @dstp: the cpumask pointer | ||
697 | */ | ||
698 | static inline void cpumask_clear(struct cpumask *dstp) | ||
699 | { | ||
700 | bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits); | ||
701 | } | ||
702 | |||
703 | /** | ||
704 | * cpumask_and - *dstp = *src1p & *src2p | ||
705 | * @dstp: the cpumask result | ||
706 | * @src1p: the first input | ||
707 | * @src2p: the second input | ||
708 | */ | ||
709 | static inline void cpumask_and(struct cpumask *dstp, | ||
710 | const struct cpumask *src1p, | ||
711 | const struct cpumask *src2p) | ||
712 | { | ||
713 | bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), | ||
714 | cpumask_bits(src2p), nr_cpumask_bits); | ||
715 | } | ||
716 | |||
717 | /** | ||
718 | * cpumask_or - *dstp = *src1p | *src2p | ||
719 | * @dstp: the cpumask result | ||
720 | * @src1p: the first input | ||
721 | * @src2p: the second input | ||
722 | */ | ||
723 | static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, | ||
724 | const struct cpumask *src2p) | ||
725 | { | ||
726 | bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), | ||
727 | cpumask_bits(src2p), nr_cpumask_bits); | ||
728 | } | ||
729 | |||
730 | /** | ||
731 | * cpumask_xor - *dstp = *src1p ^ *src2p | ||
732 | * @dstp: the cpumask result | ||
733 | * @src1p: the first input | ||
734 | * @src2p: the second input | ||
735 | */ | ||
736 | static inline void cpumask_xor(struct cpumask *dstp, | ||
737 | const struct cpumask *src1p, | ||
738 | const struct cpumask *src2p) | ||
739 | { | ||
740 | bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), | ||
741 | cpumask_bits(src2p), nr_cpumask_bits); | ||
742 | } | ||
743 | |||
744 | /** | ||
745 | * cpumask_andnot - *dstp = *src1p & ~*src2p | ||
746 | * @dstp: the cpumask result | ||
747 | * @src1p: the first input | ||
748 | * @src2p: the second input | ||
749 | */ | ||
750 | static inline void cpumask_andnot(struct cpumask *dstp, | ||
751 | const struct cpumask *src1p, | ||
752 | const struct cpumask *src2p) | ||
753 | { | ||
754 | bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), | ||
755 | cpumask_bits(src2p), nr_cpumask_bits); | ||
756 | } | ||
757 | |||
758 | /** | ||
759 | * cpumask_complement - *dstp = ~*srcp | ||
760 | * @dstp: the cpumask result | ||
761 | * @srcp: the input to invert | ||
762 | */ | ||
763 | static inline void cpumask_complement(struct cpumask *dstp, | ||
764 | const struct cpumask *srcp) | ||
765 | { | ||
766 | bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), | ||
767 | nr_cpumask_bits); | ||
768 | } | ||
769 | |||
770 | /** | ||
771 | * cpumask_equal - *src1p == *src2p | ||
772 | * @src1p: the first input | ||
773 | * @src2p: the second input | ||
774 | */ | ||
775 | static inline bool cpumask_equal(const struct cpumask *src1p, | ||
776 | const struct cpumask *src2p) | ||
777 | { | ||
778 | return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), | ||
779 | nr_cpumask_bits); | ||
780 | } | ||
781 | |||
782 | /** | ||
783 | * cpumask_intersects - (*src1p & *src2p) != 0 | ||
784 | * @src1p: the first input | ||
785 | * @src2p: the second input | ||
786 | */ | ||
787 | static inline bool cpumask_intersects(const struct cpumask *src1p, | ||
788 | const struct cpumask *src2p) | ||
789 | { | ||
790 | return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), | ||
791 | nr_cpumask_bits); | ||
792 | } | ||
793 | |||
794 | /** | ||
795 | * cpumask_subset - (*src1p & ~*src2p) == 0 | ||
796 | * @src1p: the first input | ||
797 | * @src2p: the second input | ||
798 | */ | ||
799 | static inline int cpumask_subset(const struct cpumask *src1p, | ||
800 | const struct cpumask *src2p) | ||
801 | { | ||
802 | return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), | ||
803 | nr_cpumask_bits); | ||
804 | } | ||
805 | |||
806 | /** | ||
807 | * cpumask_empty - *srcp == 0 | ||
808 | * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear. | ||
809 | */ | ||
810 | static inline bool cpumask_empty(const struct cpumask *srcp) | ||
811 | { | ||
812 | return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits); | ||
813 | } | ||
814 | |||
815 | /** | ||
816 | * cpumask_full - *srcp == 0xFFFFFFFF... | ||
817 | * @srcp: the cpumask to that all cpus < nr_cpu_ids are set. | ||
818 | */ | ||
819 | static inline bool cpumask_full(const struct cpumask *srcp) | ||
820 | { | ||
821 | return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits); | ||
822 | } | ||
823 | |||
824 | /** | ||
825 | * cpumask_weight - Count of bits in *srcp | ||
826 | * @srcp: the cpumask to count bits (< nr_cpu_ids) in. | ||
827 | */ | ||
828 | static inline unsigned int cpumask_weight(const struct cpumask *srcp) | ||
829 | { | ||
830 | return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); | ||
831 | } | ||
832 | |||
833 | /** | ||
834 | * cpumask_shift_right - *dstp = *srcp >> n | ||
835 | * @dstp: the cpumask result | ||
836 | * @srcp: the input to shift | ||
837 | * @n: the number of bits to shift by | ||
838 | */ | ||
839 | static inline void cpumask_shift_right(struct cpumask *dstp, | ||
840 | const struct cpumask *srcp, int n) | ||
841 | { | ||
842 | bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n, | ||
843 | nr_cpumask_bits); | ||
844 | } | ||
845 | |||
846 | /** | ||
847 | * cpumask_shift_left - *dstp = *srcp << n | ||
848 | * @dstp: the cpumask result | ||
849 | * @srcp: the input to shift | ||
850 | * @n: the number of bits to shift by | ||
851 | */ | ||
852 | static inline void cpumask_shift_left(struct cpumask *dstp, | ||
853 | const struct cpumask *srcp, int n) | ||
854 | { | ||
855 | bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n, | ||
856 | nr_cpumask_bits); | ||
857 | } | ||
858 | |||
859 | /** | ||
860 | * cpumask_copy - *dstp = *srcp | ||
861 | * @dstp: the result | ||
862 | * @srcp: the input cpumask | ||
863 | */ | ||
864 | static inline void cpumask_copy(struct cpumask *dstp, | ||
865 | const struct cpumask *srcp) | ||
866 | { | ||
867 | bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); | ||
868 | } | ||
869 | |||
870 | /** | ||
871 | * cpumask_any - pick a "random" cpu from *srcp | ||
872 | * @srcp: the input cpumask | ||
873 | * | ||
874 | * Returns >= nr_cpu_ids if no cpus set. | ||
875 | */ | ||
876 | #define cpumask_any(srcp) cpumask_first(srcp) | ||
877 | |||
878 | /** | ||
879 | * cpumask_first_and - return the first cpu from *srcp1 & *srcp2 | ||
880 | * @src1p: the first input | ||
881 | * @src2p: the second input | ||
882 | * | ||
883 | * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). | ||
884 | */ | ||
885 | #define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p)) | ||
886 | |||
887 | /** | ||
888 | * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2 | ||
889 | * @mask1: the first input cpumask | ||
890 | * @mask2: the second input cpumask | ||
891 | * | ||
892 | * Returns >= nr_cpu_ids if no cpus set. | ||
893 | */ | ||
894 | #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2)) | ||
895 | |||
896 | /** | ||
897 | * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * | ||
898 | * @bitmap: the bitmap | ||
899 | * | ||
900 | * There are a few places where cpumask_var_t isn't appropriate and | ||
901 | * static cpumasks must be used (eg. very early boot), yet we don't | ||
902 | * expose the definition of 'struct cpumask'. | ||
903 | * | ||
904 | * This does the conversion, and can be used as a constant initializer. | ||
905 | */ | ||
906 | #define to_cpumask(bitmap) \ | ||
907 | ((struct cpumask *)(1 ? (bitmap) \ | ||
908 | : (void *)sizeof(__check_is_bitmap(bitmap)))) | ||
909 | |||
910 | static inline int __check_is_bitmap(const unsigned long *bitmap) | ||
911 | { | ||
912 | return 1; | ||
913 | } | ||
914 | |||
915 | /** | ||
916 | * cpumask_size - size to allocate for a 'struct cpumask' in bytes | ||
917 | * | ||
918 | * This will eventually be a runtime variable, depending on nr_cpu_ids. | ||
919 | */ | ||
920 | static inline size_t cpumask_size(void) | ||
921 | { | ||
922 | /* FIXME: Once all cpumask assignments are eliminated, this | ||
923 | * can be nr_cpumask_bits */ | ||
924 | return BITS_TO_LONGS(NR_CPUS) * sizeof(long); | ||
925 | } | ||
926 | |||
927 | /* | ||
928 | * cpumask_var_t: struct cpumask for stack usage. | ||
929 | * | ||
930 | * Oh, the wicked games we play! In order to make kernel coding a | ||
931 | * little more difficult, we typedef cpumask_var_t to an array or a | ||
932 | * pointer: doing &mask on an array is a noop, so it still works. | ||
933 | * | ||
934 | * ie. | ||
935 | * cpumask_var_t tmpmask; | ||
936 | * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) | ||
937 | * return -ENOMEM; | ||
938 | * | ||
939 | * ... use 'tmpmask' like a normal struct cpumask * ... | ||
940 | * | ||
941 | * free_cpumask_var(tmpmask); | ||
942 | */ | ||
943 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
944 | typedef struct cpumask *cpumask_var_t; | ||
945 | |||
946 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); | ||
947 | void alloc_bootmem_cpumask_var(cpumask_var_t *mask); | ||
948 | void free_cpumask_var(cpumask_var_t mask); | ||
949 | |||
950 | #else | ||
951 | typedef struct cpumask cpumask_var_t[1]; | ||
952 | |||
953 | static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | ||
954 | { | ||
955 | return true; | ||
956 | } | ||
957 | |||
958 | static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) | ||
959 | { | ||
960 | } | ||
961 | |||
962 | static inline void free_cpumask_var(cpumask_var_t mask) | ||
963 | { | ||
964 | } | ||
965 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | ||
966 | |||
967 | /* The pointer versions of the maps, these will become the primary versions. */ | ||
968 | #define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map) | ||
969 | #define cpu_online_mask ((const struct cpumask *)&cpu_online_map) | ||
970 | #define cpu_present_mask ((const struct cpumask *)&cpu_present_map) | ||
971 | #define cpu_active_mask ((const struct cpumask *)&cpu_active_map) | ||
972 | |||
973 | /* It's common to want to use cpu_all_mask in struct member initializers, | ||
974 | * so it has to refer to an address rather than a pointer. */ | ||
975 | extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); | ||
976 | #define cpu_all_mask to_cpumask(cpu_all_bits) | ||
977 | |||
978 | /* First bits of cpu_bit_bitmap are in fact unset. */ | ||
979 | #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) | ||
980 | |||
981 | /* Wrappers for arch boot code to manipulate normally-constant masks */ | ||
982 | static inline void set_cpu_possible(unsigned int cpu, bool possible) | ||
983 | { | ||
984 | if (possible) | ||
985 | cpumask_set_cpu(cpu, &cpu_possible_map); | ||
986 | else | ||
987 | cpumask_clear_cpu(cpu, &cpu_possible_map); | ||
988 | } | ||
989 | |||
990 | static inline void set_cpu_present(unsigned int cpu, bool present) | ||
991 | { | ||
992 | if (present) | ||
993 | cpumask_set_cpu(cpu, &cpu_present_map); | ||
994 | else | ||
995 | cpumask_clear_cpu(cpu, &cpu_present_map); | ||
996 | } | ||
997 | |||
998 | static inline void set_cpu_online(unsigned int cpu, bool online) | ||
999 | { | ||
1000 | if (online) | ||
1001 | cpumask_set_cpu(cpu, &cpu_online_map); | ||
1002 | else | ||
1003 | cpumask_clear_cpu(cpu, &cpu_online_map); | ||
1004 | } | ||
1005 | |||
1006 | static inline void set_cpu_active(unsigned int cpu, bool active) | ||
1007 | { | ||
1008 | if (active) | ||
1009 | cpumask_set_cpu(cpu, &cpu_active_map); | ||
1010 | else | ||
1011 | cpumask_clear_cpu(cpu, &cpu_active_map); | ||
1012 | } | ||
1013 | |||
1014 | static inline void init_cpu_present(const struct cpumask *src) | ||
1015 | { | ||
1016 | cpumask_copy(&cpu_present_map, src); | ||
1017 | } | ||
1018 | |||
1019 | static inline void init_cpu_possible(const struct cpumask *src) | ||
1020 | { | ||
1021 | cpumask_copy(&cpu_possible_map, src); | ||
1022 | } | ||
1023 | |||
1024 | static inline void init_cpu_online(const struct cpumask *src) | ||
1025 | { | ||
1026 | cpumask_copy(&cpu_online_map, src); | ||
1027 | } | ||
530 | #endif /* __LINUX_CPUMASK_H */ | 1028 | #endif /* __LINUX_CPUMASK_H */ |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 2e4d58b26c06..3f9a60043a97 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -64,8 +64,17 @@ extern void smp_cpus_done(unsigned int max_cpus); | |||
64 | * Call a function on all other processors | 64 | * Call a function on all other processors |
65 | */ | 65 | */ |
66 | int smp_call_function(void(*func)(void *info), void *info, int wait); | 66 | int smp_call_function(void(*func)(void *info), void *info, int wait); |
67 | /* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */ | ||
67 | int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, | 68 | int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, |
68 | int wait); | 69 | int wait); |
70 | |||
71 | static inline void smp_call_function_many(const struct cpumask *mask, | ||
72 | void (*func)(void *info), void *info, | ||
73 | int wait) | ||
74 | { | ||
75 | smp_call_function_mask(*mask, func, info, wait); | ||
76 | } | ||
77 | |||
69 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | 78 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, |
70 | int wait); | 79 | int wait); |
71 | void __smp_call_function_single(int cpuid, struct call_single_data *data); | 80 | void __smp_call_function_single(int cpuid, struct call_single_data *data); |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 89a5a1231ffb..b36291130f22 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -240,4 +240,12 @@ void cancel_rearming_delayed_work(struct delayed_work *work) | |||
240 | cancel_delayed_work_sync(work); | 240 | cancel_delayed_work_sync(work); |
241 | } | 241 | } |
242 | 242 | ||
243 | #ifndef CONFIG_SMP | ||
244 | static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | ||
245 | { | ||
246 | return fn(arg); | ||
247 | } | ||
248 | #else | ||
249 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); | ||
250 | #endif /* CONFIG_SMP */ | ||
243 | #endif | 251 | #endif |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 86d49045daed..5a732c5ef08b 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -499,3 +499,6 @@ const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { | |||
499 | #endif | 499 | #endif |
500 | }; | 500 | }; |
501 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); | 501 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); |
502 | |||
503 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; | ||
504 | EXPORT_SYMBOL(cpu_all_bits); | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f928f2a87b9b..d4dc69ddebd7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -970,6 +970,51 @@ undo: | |||
970 | return ret; | 970 | return ret; |
971 | } | 971 | } |
972 | 972 | ||
973 | #ifdef CONFIG_SMP | ||
974 | struct work_for_cpu { | ||
975 | struct work_struct work; | ||
976 | long (*fn)(void *); | ||
977 | void *arg; | ||
978 | long ret; | ||
979 | }; | ||
980 | |||
981 | static void do_work_for_cpu(struct work_struct *w) | ||
982 | { | ||
983 | struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work); | ||
984 | |||
985 | wfc->ret = wfc->fn(wfc->arg); | ||
986 | } | ||
987 | |||
988 | /** | ||
989 | * work_on_cpu - run a function in user context on a particular cpu | ||
990 | * @cpu: the cpu to run on | ||
991 | * @fn: the function to run | ||
992 | * @arg: the function arg | ||
993 | * | ||
994 | * This will return -EINVAL in the cpu is not online, or the return value | ||
995 | * of @fn otherwise. | ||
996 | */ | ||
997 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | ||
998 | { | ||
999 | struct work_for_cpu wfc; | ||
1000 | |||
1001 | INIT_WORK(&wfc.work, do_work_for_cpu); | ||
1002 | wfc.fn = fn; | ||
1003 | wfc.arg = arg; | ||
1004 | get_online_cpus(); | ||
1005 | if (unlikely(!cpu_online(cpu))) | ||
1006 | wfc.ret = -EINVAL; | ||
1007 | else { | ||
1008 | schedule_work_on(cpu, &wfc.work); | ||
1009 | flush_work(&wfc.work); | ||
1010 | } | ||
1011 | put_online_cpus(); | ||
1012 | |||
1013 | return wfc.ret; | ||
1014 | } | ||
1015 | EXPORT_SYMBOL_GPL(work_on_cpu); | ||
1016 | #endif /* CONFIG_SMP */ | ||
1017 | |||
973 | void __init init_workqueues(void) | 1018 | void __init init_workqueues(void) |
974 | { | 1019 | { |
975 | cpu_populated_map = cpu_online_map; | 1020 | cpu_populated_map = cpu_online_map; |
diff --git a/lib/cpumask.c b/lib/cpumask.c index 5f97dc25ef9c..5ceb4211c834 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/bitops.h> | 2 | #include <linux/bitops.h> |
3 | #include <linux/cpumask.h> | 3 | #include <linux/cpumask.h> |
4 | #include <linux/module.h> | 4 | #include <linux/module.h> |
5 | #include <linux/bootmem.h> | ||
5 | 6 | ||
6 | int __first_cpu(const cpumask_t *srcp) | 7 | int __first_cpu(const cpumask_t *srcp) |
7 | { | 8 | { |
@@ -35,3 +36,75 @@ int __any_online_cpu(const cpumask_t *mask) | |||
35 | return cpu; | 36 | return cpu; |
36 | } | 37 | } |
37 | EXPORT_SYMBOL(__any_online_cpu); | 38 | EXPORT_SYMBOL(__any_online_cpu); |
39 | |||
40 | /** | ||
41 | * cpumask_next_and - get the next cpu in *src1p & *src2p | ||
42 | * @n: the cpu prior to the place to search (ie. return will be > @n) | ||
43 | * @src1p: the first cpumask pointer | ||
44 | * @src2p: the second cpumask pointer | ||
45 | * | ||
46 | * Returns >= nr_cpu_ids if no further cpus set in both. | ||
47 | */ | ||
48 | int cpumask_next_and(int n, const struct cpumask *src1p, | ||
49 | const struct cpumask *src2p) | ||
50 | { | ||
51 | while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) | ||
52 | if (cpumask_test_cpu(n, src2p)) | ||
53 | break; | ||
54 | return n; | ||
55 | } | ||
56 | EXPORT_SYMBOL(cpumask_next_and); | ||
57 | |||
58 | /** | ||
59 | * cpumask_any_but - return a "random" in a cpumask, but not this one. | ||
60 | * @mask: the cpumask to search | ||
61 | * @cpu: the cpu to ignore. | ||
62 | * | ||
63 | * Often used to find any cpu but smp_processor_id() in a mask. | ||
64 | * Returns >= nr_cpu_ids if no cpus set. | ||
65 | */ | ||
66 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) | ||
67 | { | ||
68 | unsigned int i; | ||
69 | |||
70 | for_each_cpu(i, mask) | ||
71 | if (i != cpu) | ||
72 | break; | ||
73 | return i; | ||
74 | } | ||
75 | |||
76 | /* These are not inline because of header tangles. */ | ||
77 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
78 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | ||
79 | { | ||
80 | if (likely(slab_is_available())) | ||
81 | *mask = kmalloc(cpumask_size(), flags); | ||
82 | else { | ||
83 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
84 | printk(KERN_ERR | ||
85 | "=> alloc_cpumask_var: kmalloc not available!\n"); | ||
86 | dump_stack(); | ||
87 | #endif | ||
88 | *mask = NULL; | ||
89 | } | ||
90 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
91 | if (!*mask) { | ||
92 | printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); | ||
93 | dump_stack(); | ||
94 | } | ||
95 | #endif | ||
96 | return *mask != NULL; | ||
97 | } | ||
98 | EXPORT_SYMBOL(alloc_cpumask_var); | ||
99 | |||
100 | void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) | ||
101 | { | ||
102 | *mask = alloc_bootmem(cpumask_size()); | ||
103 | } | ||
104 | |||
105 | void free_cpumask_var(cpumask_var_t mask) | ||
106 | { | ||
107 | kfree(mask); | ||
108 | } | ||
109 | EXPORT_SYMBOL(free_cpumask_var); | ||
110 | #endif | ||