aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2010-12-18 09:54:04 -0500
committerTejun Heo <tj@kernel.org>2010-12-18 09:54:04 -0500
commit2b7124428561c7c3cfa4a58cc4c6feea53f3148e (patch)
tree3d8ceae0abd1ec410aeaa29f3d793e54be267e8a
parent403047754cf690b012369b8fb563b738b88086e6 (diff)
percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support
Generic code to provide new per cpu atomic features this_cpu_cmpxchg this_cpu_xchg Fallback occurs to functions using interrupts disable/enable to ensure correct per cpu atomicity. Fallback to regular cmpxchg and xchg is not possible since per cpu atomic semantics include the guarantee that the current cpus per cpu data is accessed atomically. Use of regular cmpxchg and xchg requires the determination of the address of the per cpu data before regular cmpxchg or xchg which therefore cannot be atomically included in an xchg or cmpxchg without segment override. tj: - Relocated new ops to conform better to the general organization. - This patch contains a trivial comment fix. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--include/linux/percpu.h134
1 files changed, 133 insertions, 1 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 3484e88d93f8..27c3c6fcfad3 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -447,6 +447,59 @@ do { \
447#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) 447#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
448#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) 448#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
449 449
450#define _this_cpu_generic_xchg(pcp, nval) \
451({ typeof(pcp) ret__; \
452 preempt_disable(); \
453 ret__ = __this_cpu_read(pcp); \
454 __this_cpu_write(pcp, nval); \
455 preempt_enable(); \
456 ret__; \
457})
458
459#ifndef this_cpu_xchg
460# ifndef this_cpu_xchg_1
461# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
462# endif
463# ifndef this_cpu_xchg_2
464# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
465# endif
466# ifndef this_cpu_xchg_4
467# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
468# endif
469# ifndef this_cpu_xchg_8
470# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
471# endif
472# define this_cpu_xchg(pcp, nval) \
473 __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
474#endif
475
476#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
477({ typeof(pcp) ret__; \
478 preempt_disable(); \
479 ret__ = __this_cpu_read(pcp); \
480 if (ret__ == (oval)) \
481 __this_cpu_write(pcp, nval); \
482 preempt_enable(); \
483 ret__; \
484})
485
486#ifndef this_cpu_cmpxchg
487# ifndef this_cpu_cmpxchg_1
488# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
489# endif
490# ifndef this_cpu_cmpxchg_2
491# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
492# endif
493# ifndef this_cpu_cmpxchg_4
494# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
495# endif
496# ifndef this_cpu_cmpxchg_8
497# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
498# endif
499# define this_cpu_cmpxchg(pcp, oval, nval) \
500 __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
501#endif
502
450/* 503/*
451 * Generic percpu operations that do not require preemption handling. 504 * Generic percpu operations that do not require preemption handling.
452 * Either we do not care about races or the caller has the 505 * Either we do not care about races or the caller has the
@@ -600,11 +653,61 @@ do { \
600#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) 653#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
601#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) 654#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
602 655
656#define __this_cpu_generic_xchg(pcp, nval) \
657({ typeof(pcp) ret__; \
658 ret__ = __this_cpu_read(pcp); \
659 __this_cpu_write(pcp, nval); \
660 ret__; \
661})
662
663#ifndef __this_cpu_xchg
664# ifndef __this_cpu_xchg_1
665# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
666# endif
667# ifndef __this_cpu_xchg_2
668# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
669# endif
670# ifndef __this_cpu_xchg_4
671# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
672# endif
673# ifndef __this_cpu_xchg_8
674# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
675# endif
676# define __this_cpu_xchg(pcp, nval) \
677 __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
678#endif
679
680#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \
681({ \
682 typeof(pcp) ret__; \
683 ret__ = __this_cpu_read(pcp); \
684 if (ret__ == (oval)) \
685 __this_cpu_write(pcp, nval); \
686 ret__; \
687})
688
689#ifndef __this_cpu_cmpxchg
690# ifndef __this_cpu_cmpxchg_1
691# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
692# endif
693# ifndef __this_cpu_cmpxchg_2
694# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
695# endif
696# ifndef __this_cpu_cmpxchg_4
697# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
698# endif
699# ifndef __this_cpu_cmpxchg_8
700# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
701# endif
702# define __this_cpu_cmpxchg(pcp, oval, nval) \
703 __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
704#endif
705
603/* 706/*
604 * IRQ safe versions of the per cpu RMW operations. Note that these operations 707 * IRQ safe versions of the per cpu RMW operations. Note that these operations
605 * are *not* safe against modification of the same variable from another 708 * are *not* safe against modification of the same variable from another
606 * processors (which one gets when using regular atomic operations) 709 * processors (which one gets when using regular atomic operations)
607 . They are guaranteed to be atomic vs. local interrupts and 710 * They are guaranteed to be atomic vs. local interrupts and
608 * preemption only. 711 * preemption only.
609 */ 712 */
610#define irqsafe_cpu_generic_to_op(pcp, val, op) \ 713#define irqsafe_cpu_generic_to_op(pcp, val, op) \
@@ -691,4 +794,33 @@ do { \
691# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) 794# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
692#endif 795#endif
693 796
797#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
798({ \
799 typeof(pcp) ret__; \
800 unsigned long flags; \
801 local_irq_save(flags); \
802 ret__ = __this_cpu_read(pcp); \
803 if (ret__ == (oval)) \
804 __this_cpu_write(pcp, nval); \
805 local_irq_restore(flags); \
806 ret__; \
807})
808
809#ifndef irqsafe_cpu_cmpxchg
810# ifndef irqsafe_cpu_cmpxchg_1
811# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
812# endif
813# ifndef irqsafe_cpu_cmpxchg_2
814# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
815# endif
816# ifndef irqsafe_cpu_cmpxchg_4
817# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
818# endif
819# ifndef irqsafe_cpu_cmpxchg_8
820# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
821# endif
822# define irqsafe_cpu_cmpxchg(pcp, oval, nval) \
823 __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
824#endif
825
694#endif /* __LINUX_PERCPU_H */ 826#endif /* __LINUX_PERCPU_H */