diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2015-03-04 19:19:19 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2015-03-09 23:24:41 -0400 |
commit | 2f0f267ea0720ec6adbe9cf7386450425fac8258 (patch) | |
tree | c03a0bf6822f8a1df5afb34e92fab15d63b996a2 | |
parent | 1ed1835f5fadf057ab081cbe31ac353d4547a25b (diff) |
cpumask: remove deprecated functions.
Using these functions with offstack cpus is unsafe. They use all NR_CPUS
bits, unstead of nr_cpumask_bits.
In particular, lustre (in staging) used cpus_ and that caused a bug.
Reported-by: Oleg Drokin <green@linuxhacker.ru>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r-- | include/linux/cpumask.h | 151 | ||||
-rw-r--r-- | lib/Kconfig | 4 |
2 files changed, 0 insertions, 155 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index dc037ae6f4f2..646fadee5caf 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -815,155 +815,4 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) | |||
815 | [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ | 815 | [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ |
816 | } } | 816 | } } |
817 | 817 | ||
818 | /* | ||
819 | * | ||
820 | * From here down, all obsolete. Use cpumask_ variants! | ||
821 | * | ||
822 | */ | ||
823 | #ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS | ||
824 | |||
825 | #define CPU_MASK_CPU0 \ | ||
826 | (cpumask_t) { { \ | ||
827 | [0] = 1UL \ | ||
828 | } } | ||
829 | |||
830 | #if NR_CPUS == 1 | ||
831 | #define first_cpu(src) ({ (void)(src); 0; }) | ||
832 | #define next_cpu(n, src) ({ (void)(src); 1; }) | ||
833 | #define any_online_cpu(mask) 0 | ||
834 | #define for_each_cpu_mask(cpu, mask) \ | ||
835 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) | ||
836 | #else /* NR_CPUS > 1 */ | ||
837 | int __first_cpu(const cpumask_t *srcp); | ||
838 | int __next_cpu(int n, const cpumask_t *srcp); | ||
839 | |||
840 | #define first_cpu(src) __first_cpu(&(src)) | ||
841 | #define next_cpu(n, src) __next_cpu((n), &(src)) | ||
842 | #define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask) | ||
843 | #define for_each_cpu_mask(cpu, mask) \ | ||
844 | for ((cpu) = -1; \ | ||
845 | (cpu) = next_cpu((cpu), (mask)), \ | ||
846 | (cpu) < NR_CPUS; ) | ||
847 | #endif /* SMP */ | ||
848 | |||
849 | #if NR_CPUS <= 64 | ||
850 | |||
851 | #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) | ||
852 | |||
853 | #else /* NR_CPUS > 64 */ | ||
854 | |||
855 | int __next_cpu_nr(int n, const cpumask_t *srcp); | ||
856 | #define for_each_cpu_mask_nr(cpu, mask) \ | ||
857 | for ((cpu) = -1; \ | ||
858 | (cpu) = __next_cpu_nr((cpu), &(mask)), \ | ||
859 | (cpu) < nr_cpu_ids; ) | ||
860 | |||
861 | #endif /* NR_CPUS > 64 */ | ||
862 | |||
863 | #define cpus_addr(src) ((src).bits) | ||
864 | |||
865 | #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) | ||
866 | static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) | ||
867 | { | ||
868 | set_bit(cpu, dstp->bits); | ||
869 | } | ||
870 | |||
871 | #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) | ||
872 | static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) | ||
873 | { | ||
874 | clear_bit(cpu, dstp->bits); | ||
875 | } | ||
876 | |||
877 | #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) | ||
878 | static inline void __cpus_setall(cpumask_t *dstp, unsigned int nbits) | ||
879 | { | ||
880 | bitmap_fill(dstp->bits, nbits); | ||
881 | } | ||
882 | |||
883 | #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) | ||
884 | static inline void __cpus_clear(cpumask_t *dstp, unsigned int nbits) | ||
885 | { | ||
886 | bitmap_zero(dstp->bits, nbits); | ||
887 | } | ||
888 | |||
889 | /* No static inline type checking - see Subtlety (1) above. */ | ||
890 | #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) | ||
891 | |||
892 | #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) | ||
893 | static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) | ||
894 | { | ||
895 | return test_and_set_bit(cpu, addr->bits); | ||
896 | } | ||
897 | |||
898 | #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) | ||
899 | static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, | ||
900 | const cpumask_t *src2p, unsigned int nbits) | ||
901 | { | ||
902 | return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); | ||
903 | } | ||
904 | |||
905 | #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) | ||
906 | static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, | ||
907 | const cpumask_t *src2p, unsigned int nbits) | ||
908 | { | ||
909 | bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); | ||
910 | } | ||
911 | |||
912 | #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) | ||
913 | static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, | ||
914 | const cpumask_t *src2p, unsigned int nbits) | ||
915 | { | ||
916 | bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); | ||
917 | } | ||
918 | |||
919 | #define cpus_andnot(dst, src1, src2) \ | ||
920 | __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) | ||
921 | static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, | ||
922 | const cpumask_t *src2p, unsigned int nbits) | ||
923 | { | ||
924 | return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); | ||
925 | } | ||
926 | |||
927 | #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) | ||
928 | static inline int __cpus_equal(const cpumask_t *src1p, | ||
929 | const cpumask_t *src2p, unsigned int nbits) | ||
930 | { | ||
931 | return bitmap_equal(src1p->bits, src2p->bits, nbits); | ||
932 | } | ||
933 | |||
934 | #define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) | ||
935 | static inline int __cpus_intersects(const cpumask_t *src1p, | ||
936 | const cpumask_t *src2p, unsigned int nbits) | ||
937 | { | ||
938 | return bitmap_intersects(src1p->bits, src2p->bits, nbits); | ||
939 | } | ||
940 | |||
941 | #define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) | ||
942 | static inline int __cpus_subset(const cpumask_t *src1p, | ||
943 | const cpumask_t *src2p, unsigned int nbits) | ||
944 | { | ||
945 | return bitmap_subset(src1p->bits, src2p->bits, nbits); | ||
946 | } | ||
947 | |||
948 | #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) | ||
949 | static inline int __cpus_empty(const cpumask_t *srcp, unsigned int nbits) | ||
950 | { | ||
951 | return bitmap_empty(srcp->bits, nbits); | ||
952 | } | ||
953 | |||
954 | #define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) | ||
955 | static inline int __cpus_weight(const cpumask_t *srcp, unsigned int nbits) | ||
956 | { | ||
957 | return bitmap_weight(srcp->bits, nbits); | ||
958 | } | ||
959 | |||
960 | #define cpus_shift_left(dst, src, n) \ | ||
961 | __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) | ||
962 | static inline void __cpus_shift_left(cpumask_t *dstp, | ||
963 | const cpumask_t *srcp, int n, int nbits) | ||
964 | { | ||
965 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); | ||
966 | } | ||
967 | #endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ | ||
968 | |||
969 | #endif /* __LINUX_CPUMASK_H */ | 818 | #endif /* __LINUX_CPUMASK_H */ |
diff --git a/lib/Kconfig b/lib/Kconfig index 87da53bb1fef..47d262b3251e 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -397,10 +397,6 @@ config CPUMASK_OFFSTACK | |||
397 | them on the stack. This is a bit more expensive, but avoids | 397 | them on the stack. This is a bit more expensive, but avoids |
398 | stack overflow. | 398 | stack overflow. |
399 | 399 | ||
400 | config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS | ||
401 | bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS | ||
402 | depends on BROKEN | ||
403 | |||
404 | config CPU_RMAP | 400 | config CPU_RMAP |
405 | bool | 401 | bool |
406 | depends on SMP | 402 | depends on SMP |