aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-12-22 12:58:51 -0500
committerTejun Heo <tj@kernel.org>2011-12-22 13:40:20 -0500
commit933393f58fef9963eac61db8093689544e29a600 (patch)
tree719f8b231499aa4ea023bc1a06db4582df5f0965 /include/linux/percpu.h
parentecefc36b41ac0fe92d76273a23faf27b2da13411 (diff)
percpu: Remove irqsafe_cpu_xxx variants
We simply say that regular this_cpu use must be safe regardless of preemption and interrupt state. That has no material change for x86 and s390 implementations of this_cpu operations. However, arches that do not provide their own implementation for this_cpu operations will now get code generated that disables interrupts instead of preemption. -tj: This is part of on-going percpu API cleanup. For detailed discussion of the subject, please refer to the following thread. http://thread.gmane.org/gmane.linux.kernel/1222078 Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org> LKML-Reference: <alpine.DEB.2.00.1112221154380.11787@router.home>
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r--include/linux/percpu.h190
1 files changed, 22 insertions, 168 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9ca008f0c542..32cd1f67462e 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -172,10 +172,10 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
172 * equal char, int or long. percpu_read() evaluates to a lvalue and 172 * equal char, int or long. percpu_read() evaluates to a lvalue and
173 * all others to void. 173 * all others to void.
174 * 174 *
175 * These operations are guaranteed to be atomic w.r.t. preemption. 175 * These operations are guaranteed to be atomic.
176 * The generic versions use plain get/put_cpu_var(). Archs are 176 * The generic versions disable interrupts. Archs are
177 * encouraged to implement single-instruction alternatives which don't 177 * encouraged to implement single-instruction alternatives which don't
178 * require preemption protection. 178 * require protection.
179 */ 179 */
180#ifndef percpu_read 180#ifndef percpu_read
181# define percpu_read(var) \ 181# define percpu_read(var) \
@@ -347,9 +347,10 @@ do { \
347 347
348#define _this_cpu_generic_to_op(pcp, val, op) \ 348#define _this_cpu_generic_to_op(pcp, val, op) \
349do { \ 349do { \
350 preempt_disable(); \ 350 unsigned long flags; \
351 local_irq_save(flags); \
351 *__this_cpu_ptr(&(pcp)) op val; \ 352 *__this_cpu_ptr(&(pcp)) op val; \
352 preempt_enable(); \ 353 local_irq_restore(flags); \
353} while (0) 354} while (0)
354 355
355#ifndef this_cpu_write 356#ifndef this_cpu_write
@@ -447,10 +448,11 @@ do { \
447#define _this_cpu_generic_add_return(pcp, val) \ 448#define _this_cpu_generic_add_return(pcp, val) \
448({ \ 449({ \
449 typeof(pcp) ret__; \ 450 typeof(pcp) ret__; \
450 preempt_disable(); \ 451 unsigned long flags; \
452 local_irq_save(flags); \
451 __this_cpu_add(pcp, val); \ 453 __this_cpu_add(pcp, val); \
452 ret__ = __this_cpu_read(pcp); \ 454 ret__ = __this_cpu_read(pcp); \
453 preempt_enable(); \ 455 local_irq_restore(flags); \
454 ret__; \ 456 ret__; \
455}) 457})
456 458
@@ -476,10 +478,11 @@ do { \
476 478
477#define _this_cpu_generic_xchg(pcp, nval) \ 479#define _this_cpu_generic_xchg(pcp, nval) \
478({ typeof(pcp) ret__; \ 480({ typeof(pcp) ret__; \
479 preempt_disable(); \ 481 unsigned long flags; \
482 local_irq_save(flags); \
480 ret__ = __this_cpu_read(pcp); \ 483 ret__ = __this_cpu_read(pcp); \
481 __this_cpu_write(pcp, nval); \ 484 __this_cpu_write(pcp, nval); \
482 preempt_enable(); \ 485 local_irq_restore(flags); \
483 ret__; \ 486 ret__; \
484}) 487})
485 488
@@ -501,12 +504,14 @@ do { \
501#endif 504#endif
502 505
503#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ 506#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
504({ typeof(pcp) ret__; \ 507({ \
505 preempt_disable(); \ 508 typeof(pcp) ret__; \
509 unsigned long flags; \
510 local_irq_save(flags); \
506 ret__ = __this_cpu_read(pcp); \ 511 ret__ = __this_cpu_read(pcp); \
507 if (ret__ == (oval)) \ 512 if (ret__ == (oval)) \
508 __this_cpu_write(pcp, nval); \ 513 __this_cpu_write(pcp, nval); \
509 preempt_enable(); \ 514 local_irq_restore(flags); \
510 ret__; \ 515 ret__; \
511}) 516})
512 517
@@ -538,10 +543,11 @@ do { \
538#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 543#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
539({ \ 544({ \
540 int ret__; \ 545 int ret__; \
541 preempt_disable(); \ 546 unsigned long flags; \
547 local_irq_save(flags); \
542 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \ 548 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
543 oval1, oval2, nval1, nval2); \ 549 oval1, oval2, nval1, nval2); \
544 preempt_enable(); \ 550 local_irq_restore(flags); \
545 ret__; \ 551 ret__; \
546}) 552})
547 553
@@ -567,9 +573,9 @@ do { \
567#endif 573#endif
568 574
569/* 575/*
570 * Generic percpu operations that do not require preemption handling. 576 * Generic percpu operations for context that are safe from preemption/interrupts.
571 * Either we do not care about races or the caller has the 577 * Either we do not care about races or the caller has the
572 * responsibility of handling preemptions issues. Arch code can still 578 * responsibility of handling preemption/interrupt issues. Arch code can still
573 * override these instructions since the arch per cpu code may be more 579 * override these instructions since the arch per cpu code may be more
574 * efficient and may actually get race freeness for free (that is the 580 * efficient and may actually get race freeness for free (that is the
575 * case for x86 for example). 581 * case for x86 for example).
@@ -802,156 +808,4 @@ do { \
802 __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) 808 __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
803#endif 809#endif
804 810
805/*
806 * IRQ safe versions of the per cpu RMW operations. Note that these operations
807 * are *not* safe against modification of the same variable from another
808 * processors (which one gets when using regular atomic operations)
809 * They are guaranteed to be atomic vs. local interrupts and
810 * preemption only.
811 */
812#define irqsafe_cpu_generic_to_op(pcp, val, op) \
813do { \
814 unsigned long flags; \
815 local_irq_save(flags); \
816 *__this_cpu_ptr(&(pcp)) op val; \
817 local_irq_restore(flags); \
818} while (0)
819
820#ifndef irqsafe_cpu_add
821# ifndef irqsafe_cpu_add_1
822# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
823# endif
824# ifndef irqsafe_cpu_add_2
825# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
826# endif
827# ifndef irqsafe_cpu_add_4
828# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
829# endif
830# ifndef irqsafe_cpu_add_8
831# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
832# endif
833# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
834#endif
835
836#ifndef irqsafe_cpu_sub
837# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
838#endif
839
840#ifndef irqsafe_cpu_inc
841# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
842#endif
843
844#ifndef irqsafe_cpu_dec
845# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
846#endif
847
848#ifndef irqsafe_cpu_and
849# ifndef irqsafe_cpu_and_1
850# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
851# endif
852# ifndef irqsafe_cpu_and_2
853# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
854# endif
855# ifndef irqsafe_cpu_and_4
856# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
857# endif
858# ifndef irqsafe_cpu_and_8
859# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
860# endif
861# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
862#endif
863
864#ifndef irqsafe_cpu_or
865# ifndef irqsafe_cpu_or_1
866# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
867# endif
868# ifndef irqsafe_cpu_or_2
869# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
870# endif
871# ifndef irqsafe_cpu_or_4
872# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
873# endif
874# ifndef irqsafe_cpu_or_8
875# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
876# endif
877# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
878#endif
879
880#ifndef irqsafe_cpu_xor
881# ifndef irqsafe_cpu_xor_1
882# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
883# endif
884# ifndef irqsafe_cpu_xor_2
885# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
886# endif
887# ifndef irqsafe_cpu_xor_4
888# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
889# endif
890# ifndef irqsafe_cpu_xor_8
891# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
892# endif
893# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
894#endif
895
896#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
897({ \
898 typeof(pcp) ret__; \
899 unsigned long flags; \
900 local_irq_save(flags); \
901 ret__ = __this_cpu_read(pcp); \
902 if (ret__ == (oval)) \
903 __this_cpu_write(pcp, nval); \
904 local_irq_restore(flags); \
905 ret__; \
906})
907
908#ifndef irqsafe_cpu_cmpxchg
909# ifndef irqsafe_cpu_cmpxchg_1
910# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
911# endif
912# ifndef irqsafe_cpu_cmpxchg_2
913# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
914# endif
915# ifndef irqsafe_cpu_cmpxchg_4
916# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
917# endif
918# ifndef irqsafe_cpu_cmpxchg_8
919# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
920# endif
921# define irqsafe_cpu_cmpxchg(pcp, oval, nval) \
922 __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
923#endif
924
925#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
926({ \
927 int ret__; \
928 unsigned long flags; \
929 local_irq_save(flags); \
930 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
931 oval1, oval2, nval1, nval2); \
932 local_irq_restore(flags); \
933 ret__; \
934})
935
936#ifndef irqsafe_cpu_cmpxchg_double
937# ifndef irqsafe_cpu_cmpxchg_double_1
938# define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
939 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
940# endif
941# ifndef irqsafe_cpu_cmpxchg_double_2
942# define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
943 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
944# endif
945# ifndef irqsafe_cpu_cmpxchg_double_4
946# define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
947 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
948# endif
949# ifndef irqsafe_cpu_cmpxchg_double_8
950# define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
951 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
952# endif
953# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
954 __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
955#endif
956
957#endif /* __LINUX_PERCPU_H */ 811#endif /* __LINUX_PERCPU_H */