aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-04 21:39:10 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-06 03:05:33 -0500
commit2d3854a37e8b767a51aba38ed6d22817b0631e33 (patch)
tree3b55cc93720b2e525460216b196ed20298ae985b /include
parent75fa67706cce5272bcfc51ed646f2da21f3bdb6e (diff)
cpumask: introduce new API, without changing anything
Impact: introduce new APIs We want to deprecate cpumasks on the stack, as we are headed for gynormous numbers of CPUs. Eventually, we want to head towards an undefined 'struct cpumask' so they can never be declared on stack. 1) New cpumask functions which take pointers instead of copies. (cpus_* -> cpumask_*) 2) Several new helpers to reduce requirements for temporary cpumasks (cpumask_first_and, cpumask_next_and, cpumask_any_and) 3) Helpers for declaring cpumasks on or offstack for large NR_CPUS (cpumask_var_t, alloc_cpumask_var and free_cpumask_var) 4) 'struct cpumask' for explicitness and to mark new-style code. 5) Make iterator functions stop at nr_cpu_ids (a runtime constant), not NR_CPUS for time efficiency and for smaller dynamic allocations in future. 6) cpumask_copy() so we can allocate less than a full cpumask eventually (for alloc_cpumask_var), and so we can eliminate the 'struct cpumask' definition eventually. 7) work_on_cpu() helper for doing task on a CPU, rather than saving old cpumask for current thread and manipulating it. 8) smp_call_function_many() which is smp_call_function_mask() except taking a cpumask pointer. Note that this patch simply introduces the new functions and leaves the obsolescent ones in place. This is to simplify the transition patches. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/linux/cpumask.h502
-rw-r--r--include/linux/smp.h9
-rw-r--r--include/linux/workqueue.h8
3 files changed, 517 insertions, 2 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d3219d73f8e6..c8e66619097b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -5,6 +5,9 @@
5 * Cpumasks provide a bitmap suitable for representing the 5 * Cpumasks provide a bitmap suitable for representing the
6 * set of CPU's in a system, one bit position per CPU number. 6 * set of CPU's in a system, one bit position per CPU number.
7 * 7 *
8 * The new cpumask_ ops take a "struct cpumask *"; the old ones
9 * use cpumask_t.
10 *
8 * See detailed comments in the file linux/bitmap.h describing the 11 * See detailed comments in the file linux/bitmap.h describing the
9 * data type on which these cpumasks are based. 12 * data type on which these cpumasks are based.
10 * 13 *
@@ -31,7 +34,7 @@
31 * will span the entire range of NR_CPUS. 34 * will span the entire range of NR_CPUS.
32 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 35 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
33 * 36 *
34 * The available cpumask operations are: 37 * The obsolescent cpumask operations are:
35 * 38 *
36 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask 39 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
37 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask 40 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
@@ -138,7 +141,7 @@
138#include <linux/threads.h> 141#include <linux/threads.h>
139#include <linux/bitmap.h> 142#include <linux/bitmap.h>
140 143
141typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 144typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
142extern cpumask_t _unused_cpumask_arg_; 145extern cpumask_t _unused_cpumask_arg_;
143 146
144#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 147#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
@@ -527,4 +530,499 @@ extern cpumask_t cpu_active_map;
527#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) 530#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map)
528#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) 531#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map)
529 532
533/* These are the new versions of the cpumask operators: passed by pointer.
534 * The older versions will be implemented in terms of these, then deleted. */
535#define cpumask_bits(maskp) ((maskp)->bits)
536
537#if NR_CPUS <= BITS_PER_LONG
538#define CPU_BITS_ALL \
539{ \
540 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
541}
542
543/* This produces more efficient code. */
544#define nr_cpumask_bits NR_CPUS
545
546#else /* NR_CPUS > BITS_PER_LONG */
547
548#define CPU_BITS_ALL \
549{ \
550 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
551 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
552}
553
554#define nr_cpumask_bits nr_cpu_ids
555#endif /* NR_CPUS > BITS_PER_LONG */
556
557/* verify cpu argument to cpumask_* operators */
558static inline unsigned int cpumask_check(unsigned int cpu)
559{
560#ifdef CONFIG_DEBUG_PER_CPU_MAPS
561 WARN_ON_ONCE(cpu >= nr_cpumask_bits);
562#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
563 return cpu;
564}
565
566#if NR_CPUS == 1
567/* Uniprocesor. */
568#define cpumask_first(src) ({ (void)(src); 0; })
569#define cpumask_next(n, src) ({ (void)(src); 1; })
570#define cpumask_next_zero(n, src) ({ (void)(src); 1; })
571#define cpumask_next_and(n, srcp, andp) ({ (void)(srcp), (void)(andp); 1; })
572#define cpumask_any_but(mask, cpu) ({ (void)(mask); (void)(cpu); 0; })
573
574#define for_each_cpu(cpu, mask) \
575 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
576#define for_each_cpu_and(cpu, mask, and) \
577 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
578#else
579/**
580 * cpumask_first - get the first cpu in a cpumask
581 * @srcp: the cpumask pointer
582 *
583 * Returns >= nr_cpu_ids if no cpus set.
584 */
585static inline unsigned int cpumask_first(const struct cpumask *srcp)
586{
587 return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
588}
589
590/**
591 * cpumask_next - get the next cpu in a cpumask
592 * @n: the cpu prior to the place to search (ie. return will be > @n)
593 * @srcp: the cpumask pointer
594 *
595 * Returns >= nr_cpu_ids if no further cpus set.
596 */
597static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
598{
599 /* -1 is a legal arg here. */
600 if (n != -1)
601 cpumask_check(n);
602 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
603}
604
605/**
606 * cpumask_next_zero - get the next unset cpu in a cpumask
607 * @n: the cpu prior to the place to search (ie. return will be > @n)
608 * @srcp: the cpumask pointer
609 *
610 * Returns >= nr_cpu_ids if no further cpus unset.
611 */
612static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
613{
614 /* -1 is a legal arg here. */
615 if (n != -1)
616 cpumask_check(n);
617 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
618}
619
620int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
621int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
622
623#define for_each_cpu(cpu, mask) \
624 for ((cpu) = -1; \
625 (cpu) = cpumask_next((cpu), (mask)), \
626 (cpu) < nr_cpu_ids;)
627#define for_each_cpu_and(cpu, mask, and) \
628 for ((cpu) = -1; \
629 (cpu) = cpumask_next_and((cpu), (mask), (and)), \
630 (cpu) < nr_cpu_ids;)
631#endif /* SMP */
632
633#define CPU_BITS_NONE \
634{ \
635 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
636}
637
638#define CPU_BITS_CPU0 \
639{ \
640 [0] = 1UL \
641}
642
643/**
644 * cpumask_set_cpu - set a cpu in a cpumask
645 * @cpu: cpu number (< nr_cpu_ids)
646 * @dstp: the cpumask pointer
647 */
648static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
649{
650 set_bit(cpumask_check(cpu), cpumask_bits(dstp));
651}
652
653/**
654 * cpumask_clear_cpu - clear a cpu in a cpumask
655 * @cpu: cpu number (< nr_cpu_ids)
656 * @dstp: the cpumask pointer
657 */
658static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
659{
660 clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
661}
662
663/**
664 * cpumask_test_cpu - test for a cpu in a cpumask
665 * @cpu: cpu number (< nr_cpu_ids)
666 * @cpumask: the cpumask pointer
667 *
668 * No static inline type checking - see Subtlety (1) above.
669 */
670#define cpumask_test_cpu(cpu, cpumask) \
671 test_bit(cpumask_check(cpu), (cpumask)->bits)
672
673/**
674 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
675 * @cpu: cpu number (< nr_cpu_ids)
676 * @cpumask: the cpumask pointer
677 *
678 * test_and_set_bit wrapper for cpumasks.
679 */
680static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
681{
682 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
683}
684
685/**
686 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
687 * @dstp: the cpumask pointer
688 */
689static inline void cpumask_setall(struct cpumask *dstp)
690{
691 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
692}
693
694/**
695 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
696 * @dstp: the cpumask pointer
697 */
698static inline void cpumask_clear(struct cpumask *dstp)
699{
700 bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
701}
702
703/**
704 * cpumask_and - *dstp = *src1p & *src2p
705 * @dstp: the cpumask result
706 * @src1p: the first input
707 * @src2p: the second input
708 */
709static inline void cpumask_and(struct cpumask *dstp,
710 const struct cpumask *src1p,
711 const struct cpumask *src2p)
712{
713 bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
714 cpumask_bits(src2p), nr_cpumask_bits);
715}
716
717/**
718 * cpumask_or - *dstp = *src1p | *src2p
719 * @dstp: the cpumask result
720 * @src1p: the first input
721 * @src2p: the second input
722 */
723static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
724 const struct cpumask *src2p)
725{
726 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
727 cpumask_bits(src2p), nr_cpumask_bits);
728}
729
730/**
731 * cpumask_xor - *dstp = *src1p ^ *src2p
732 * @dstp: the cpumask result
733 * @src1p: the first input
734 * @src2p: the second input
735 */
736static inline void cpumask_xor(struct cpumask *dstp,
737 const struct cpumask *src1p,
738 const struct cpumask *src2p)
739{
740 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
741 cpumask_bits(src2p), nr_cpumask_bits);
742}
743
744/**
745 * cpumask_andnot - *dstp = *src1p & ~*src2p
746 * @dstp: the cpumask result
747 * @src1p: the first input
748 * @src2p: the second input
749 */
750static inline void cpumask_andnot(struct cpumask *dstp,
751 const struct cpumask *src1p,
752 const struct cpumask *src2p)
753{
754 bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
755 cpumask_bits(src2p), nr_cpumask_bits);
756}
757
758/**
759 * cpumask_complement - *dstp = ~*srcp
760 * @dstp: the cpumask result
761 * @srcp: the input to invert
762 */
763static inline void cpumask_complement(struct cpumask *dstp,
764 const struct cpumask *srcp)
765{
766 bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
767 nr_cpumask_bits);
768}
769
770/**
771 * cpumask_equal - *src1p == *src2p
772 * @src1p: the first input
773 * @src2p: the second input
774 */
775static inline bool cpumask_equal(const struct cpumask *src1p,
776 const struct cpumask *src2p)
777{
778 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
779 nr_cpumask_bits);
780}
781
782/**
783 * cpumask_intersects - (*src1p & *src2p) != 0
784 * @src1p: the first input
785 * @src2p: the second input
786 */
787static inline bool cpumask_intersects(const struct cpumask *src1p,
788 const struct cpumask *src2p)
789{
790 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
791 nr_cpumask_bits);
792}
793
794/**
795 * cpumask_subset - (*src1p & ~*src2p) == 0
796 * @src1p: the first input
797 * @src2p: the second input
798 */
799static inline int cpumask_subset(const struct cpumask *src1p,
800 const struct cpumask *src2p)
801{
802 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
803 nr_cpumask_bits);
804}
805
806/**
807 * cpumask_empty - *srcp == 0
808 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
809 */
810static inline bool cpumask_empty(const struct cpumask *srcp)
811{
812 return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
813}
814
815/**
816 * cpumask_full - *srcp == 0xFFFFFFFF...
817 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
818 */
819static inline bool cpumask_full(const struct cpumask *srcp)
820{
821 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
822}
823
824/**
825 * cpumask_weight - Count of bits in *srcp
826 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
827 */
828static inline unsigned int cpumask_weight(const struct cpumask *srcp)
829{
830 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
831}
832
833/**
834 * cpumask_shift_right - *dstp = *srcp >> n
835 * @dstp: the cpumask result
836 * @srcp: the input to shift
837 * @n: the number of bits to shift by
838 */
839static inline void cpumask_shift_right(struct cpumask *dstp,
840 const struct cpumask *srcp, int n)
841{
842 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
843 nr_cpumask_bits);
844}
845
846/**
847 * cpumask_shift_left - *dstp = *srcp << n
848 * @dstp: the cpumask result
849 * @srcp: the input to shift
850 * @n: the number of bits to shift by
851 */
852static inline void cpumask_shift_left(struct cpumask *dstp,
853 const struct cpumask *srcp, int n)
854{
855 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
856 nr_cpumask_bits);
857}
858
859/**
860 * cpumask_copy - *dstp = *srcp
861 * @dstp: the result
862 * @srcp: the input cpumask
863 */
864static inline void cpumask_copy(struct cpumask *dstp,
865 const struct cpumask *srcp)
866{
867 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
868}
869
870/**
871 * cpumask_any - pick a "random" cpu from *srcp
872 * @srcp: the input cpumask
873 *
874 * Returns >= nr_cpu_ids if no cpus set.
875 */
876#define cpumask_any(srcp) cpumask_first(srcp)
877
878/**
879 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
880 * @src1p: the first input
881 * @src2p: the second input
882 *
883 * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
884 */
885#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
886
887/**
888 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
889 * @mask1: the first input cpumask
890 * @mask2: the second input cpumask
891 *
892 * Returns >= nr_cpu_ids if no cpus set.
893 */
894#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
895
896/**
897 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
898 * @bitmap: the bitmap
899 *
900 * There are a few places where cpumask_var_t isn't appropriate and
901 * static cpumasks must be used (eg. very early boot), yet we don't
902 * expose the definition of 'struct cpumask'.
903 *
904 * This does the conversion, and can be used as a constant initializer.
905 */
906#define to_cpumask(bitmap) \
907 ((struct cpumask *)(1 ? (bitmap) \
908 : (void *)sizeof(__check_is_bitmap(bitmap))))
909
910static inline int __check_is_bitmap(const unsigned long *bitmap)
911{
912 return 1;
913}
914
915/**
916 * cpumask_size - size to allocate for a 'struct cpumask' in bytes
917 *
918 * This will eventually be a runtime variable, depending on nr_cpu_ids.
919 */
920static inline size_t cpumask_size(void)
921{
922 /* FIXME: Once all cpumask assignments are eliminated, this
923 * can be nr_cpumask_bits */
924 return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
925}
926
927/*
928 * cpumask_var_t: struct cpumask for stack usage.
929 *
930 * Oh, the wicked games we play! In order to make kernel coding a
931 * little more difficult, we typedef cpumask_var_t to an array or a
932 * pointer: doing &mask on an array is a noop, so it still works.
933 *
934 * ie.
935 * cpumask_var_t tmpmask;
936 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
937 * return -ENOMEM;
938 *
939 * ... use 'tmpmask' like a normal struct cpumask * ...
940 *
941 * free_cpumask_var(tmpmask);
942 */
943#ifdef CONFIG_CPUMASK_OFFSTACK
944typedef struct cpumask *cpumask_var_t;
945
946bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
947void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
948void free_cpumask_var(cpumask_var_t mask);
949
950#else
951typedef struct cpumask cpumask_var_t[1];
952
953static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
954{
955 return true;
956}
957
958static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
959{
960}
961
962static inline void free_cpumask_var(cpumask_var_t mask)
963{
964}
965#endif /* CONFIG_CPUMASK_OFFSTACK */
966
967/* The pointer versions of the maps, these will become the primary versions. */
968#define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map)
969#define cpu_online_mask ((const struct cpumask *)&cpu_online_map)
970#define cpu_present_mask ((const struct cpumask *)&cpu_present_map)
971#define cpu_active_mask ((const struct cpumask *)&cpu_active_map)
972
973/* It's common to want to use cpu_all_mask in struct member initializers,
974 * so it has to refer to an address rather than a pointer. */
975extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
976#define cpu_all_mask to_cpumask(cpu_all_bits)
977
978/* First bits of cpu_bit_bitmap are in fact unset. */
979#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
980
981/* Wrappers for arch boot code to manipulate normally-constant masks */
982static inline void set_cpu_possible(unsigned int cpu, bool possible)
983{
984 if (possible)
985 cpumask_set_cpu(cpu, &cpu_possible_map);
986 else
987 cpumask_clear_cpu(cpu, &cpu_possible_map);
988}
989
990static inline void set_cpu_present(unsigned int cpu, bool present)
991{
992 if (present)
993 cpumask_set_cpu(cpu, &cpu_present_map);
994 else
995 cpumask_clear_cpu(cpu, &cpu_present_map);
996}
997
998static inline void set_cpu_online(unsigned int cpu, bool online)
999{
1000 if (online)
1001 cpumask_set_cpu(cpu, &cpu_online_map);
1002 else
1003 cpumask_clear_cpu(cpu, &cpu_online_map);
1004}
1005
1006static inline void set_cpu_active(unsigned int cpu, bool active)
1007{
1008 if (active)
1009 cpumask_set_cpu(cpu, &cpu_active_map);
1010 else
1011 cpumask_clear_cpu(cpu, &cpu_active_map);
1012}
1013
1014static inline void init_cpu_present(const struct cpumask *src)
1015{
1016 cpumask_copy(&cpu_present_map, src);
1017}
1018
1019static inline void init_cpu_possible(const struct cpumask *src)
1020{
1021 cpumask_copy(&cpu_possible_map, src);
1022}
1023
1024static inline void init_cpu_online(const struct cpumask *src)
1025{
1026 cpumask_copy(&cpu_online_map, src);
1027}
530#endif /* __LINUX_CPUMASK_H */ 1028#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 2e4d58b26c06..3f9a60043a97 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -64,8 +64,17 @@ extern void smp_cpus_done(unsigned int max_cpus);
64 * Call a function on all other processors 64 * Call a function on all other processors
65 */ 65 */
66int smp_call_function(void(*func)(void *info), void *info, int wait); 66int smp_call_function(void(*func)(void *info), void *info, int wait);
67/* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */
67int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 68int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
68 int wait); 69 int wait);
70
71static inline void smp_call_function_many(const struct cpumask *mask,
72 void (*func)(void *info), void *info,
73 int wait)
74{
75 smp_call_function_mask(*mask, func, info, wait);
76}
77
69int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, 78int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
70 int wait); 79 int wait);
71void __smp_call_function_single(int cpuid, struct call_single_data *data); 80void __smp_call_function_single(int cpuid, struct call_single_data *data);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 89a5a1231ffb..b36291130f22 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -240,4 +240,12 @@ void cancel_rearming_delayed_work(struct delayed_work *work)
240 cancel_delayed_work_sync(work); 240 cancel_delayed_work_sync(work);
241} 241}
242 242
243#ifndef CONFIG_SMP
244static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
245{
246 return fn(arg);
247}
248#else
249long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
250#endif /* CONFIG_SMP */
243#endif 251#endif