aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2010-12-06 12:39:59 -0500
committerTejun Heo <tj@kernel.org>2010-12-17 09:15:28 -0500
commita663ffff1d2e94a7c549a37d08ed9169ce83bdd6 (patch)
treeb33e87a2b1051b8ae57eb39cd147122c59da9189 /include
parenta3383e8372c0c11238f9bb9777929bfc3a2d320a (diff)
percpu: Generic support for this_cpu_add, sub, dec, inc_return
Introduce generic support for this_cpu_add_return etc. The fallback is to realize these operations with simpler __this_cpu_ops. tj: - Reformatted __cpu_size_call_return2() to make it more consistent with its neighbors. - Dropped unnecessary temp variable ret__ from __this_cpu_generic_add_return(). Reviewed-by: Tejun Heo <tj@kernel.org> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Acked-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/percpu.h71
1 files changed, 71 insertions, 0 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5095b834a6fb..4d593defc47d 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -240,6 +240,21 @@ extern void __bad_size_call_parameter(void);
240 pscr_ret__; \ 240 pscr_ret__; \
241}) 241})
242 242
243#define __pcpu_size_call_return2(stem, variable, ...) \
244({ \
245 typeof(variable) pscr2_ret__; \
246 __verify_pcpu_ptr(&(variable)); \
247 switch(sizeof(variable)) { \
248 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
249 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
250 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
251 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
252 default: \
253 __bad_size_call_parameter(); break; \
254 } \
255 pscr2_ret__; \
256})
257
243#define __pcpu_size_call(stem, variable, ...) \ 258#define __pcpu_size_call(stem, variable, ...) \
244do { \ 259do { \
245 __verify_pcpu_ptr(&(variable)); \ 260 __verify_pcpu_ptr(&(variable)); \
@@ -529,6 +544,62 @@ do { \
529# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) 544# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
530#endif 545#endif
531 546
547#define _this_cpu_generic_add_return(pcp, val) \
548({ \
549 typeof(pcp) ret__; \
550 preempt_disable(); \
551 __this_cpu_add(pcp, val); \
552 ret__ = __this_cpu_read(pcp); \
553 preempt_enable(); \
554 ret__; \
555})
556
557#ifndef this_cpu_add_return
558# ifndef this_cpu_add_return_1
559# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
560# endif
561# ifndef this_cpu_add_return_2
562# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
563# endif
564# ifndef this_cpu_add_return_4
565# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
566# endif
567# ifndef this_cpu_add_return_8
568# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
569# endif
570# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
571#endif
572
573#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
574#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
575#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
576
577#define __this_cpu_generic_add_return(pcp, val) \
578({ \
579 __this_cpu_add(pcp, val); \
580 __this_cpu_read(pcp); \
581})
582
583#ifndef __this_cpu_add_return
584# ifndef __this_cpu_add_return_1
585# define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val)
586# endif
587# ifndef __this_cpu_add_return_2
588# define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val)
589# endif
590# ifndef __this_cpu_add_return_4
591# define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val)
592# endif
593# ifndef __this_cpu_add_return_8
594# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
595# endif
596# define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
597#endif
598
599#define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
600#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
601#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
602
532/* 603/*
533 * IRQ safe versions of the per cpu RMW operations. Note that these operations 604 * IRQ safe versions of the per cpu RMW operations. Note that these operations
534 * are *not* safe against modification of the same variable from another 605 * are *not* safe against modification of the same variable from another