aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-12-17 09:47:04 -0500
committerTejun Heo <tj@kernel.org>2010-12-17 10:13:22 -0500
commit403047754cf690b012369b8fb563b738b88086e6 (patch)
treed784465cb02ea3898094ad5aa83566fecb6c7046
parent8f1d97c79eb65de1d05799d6b81d79cd94169114 (diff)
percpu,x86: relocate this_cpu_add_return() and friends
- include/linux/percpu.h: this_cpu_add_return() and friends were located next to __this_cpu_add_return(). However, the overall organization is to first group by preemption safeness. Relocate this_cpu_add_return() and friends to preemption-safe area. - arch/x86/include/asm/percpu.h: Relocate percpu_add_return_op() after other more basic operations. Relocate [__]this_cpu_add_return_8() so that they're first grouped by preemption safeness. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com>
-rw-r--r--arch/x86/include/asm/percpu.h71
-rw-r--r--include/linux/percpu.h60
2 files changed, 65 insertions, 66 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 38f9e965ff96..dd0cd4b6a76f 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -177,39 +177,6 @@ do { \
177 } \ 177 } \
178} while (0) 178} while (0)
179 179
180/*
181 * Add return operation
182 */
183#define percpu_add_return_op(var, val) \
184({ \
185 typeof(var) paro_ret__ = val; \
186 switch (sizeof(var)) { \
187 case 1: \
188 asm("xaddb %0, "__percpu_arg(1) \
189 : "+q" (paro_ret__), "+m" (var) \
190 : : "memory"); \
191 break; \
192 case 2: \
193 asm("xaddw %0, "__percpu_arg(1) \
194 : "+r" (paro_ret__), "+m" (var) \
195 : : "memory"); \
196 break; \
197 case 4: \
198 asm("xaddl %0, "__percpu_arg(1) \
199 : "+r" (paro_ret__), "+m" (var) \
200 : : "memory"); \
201 break; \
202 case 8: \
203 asm("xaddq %0, "__percpu_arg(1) \
204 : "+re" (paro_ret__), "+m" (var) \
205 : : "memory"); \
206 break; \
207 default: __bad_percpu_size(); \
208 } \
209 paro_ret__ += val; \
210 paro_ret__; \
211})
212
213#define percpu_from_op(op, var, constraint) \ 180#define percpu_from_op(op, var, constraint) \
214({ \ 181({ \
215 typeof(var) pfo_ret__; \ 182 typeof(var) pfo_ret__; \
@@ -263,6 +230,39 @@ do { \
263}) 230})
264 231
265/* 232/*
233 * Add return operation
234 */
235#define percpu_add_return_op(var, val) \
236({ \
237 typeof(var) paro_ret__ = val; \
238 switch (sizeof(var)) { \
239 case 1: \
240 asm("xaddb %0, "__percpu_arg(1) \
241 : "+q" (paro_ret__), "+m" (var) \
242 : : "memory"); \
243 break; \
244 case 2: \
245 asm("xaddw %0, "__percpu_arg(1) \
246 : "+r" (paro_ret__), "+m" (var) \
247 : : "memory"); \
248 break; \
249 case 4: \
250 asm("xaddl %0, "__percpu_arg(1) \
251 : "+r" (paro_ret__), "+m" (var) \
252 : : "memory"); \
253 break; \
254 case 8: \
255 asm("xaddq %0, "__percpu_arg(1) \
256 : "+re" (paro_ret__), "+m" (var) \
257 : : "memory"); \
258 break; \
259 default: __bad_percpu_size(); \
260 } \
261 paro_ret__ += val; \
262 paro_ret__; \
263})
264
265/*
266 * percpu_read() makes gcc load the percpu variable every time it is 266 * percpu_read() makes gcc load the percpu variable every time it is
267 * accessed while percpu_read_stable() allows the value to be cached. 267 * accessed while percpu_read_stable() allows the value to be cached.
268 * percpu_read_stable() is more efficient and can be used if its value 268 * percpu_read_stable() is more efficient and can be used if its value
@@ -352,6 +352,7 @@ do { \
352#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 352#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
353#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 353#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
354#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 354#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
355#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
355 356
356#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 357#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
357#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) 358#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
@@ -359,14 +360,12 @@ do { \
359#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 360#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
360#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 361#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
361#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 362#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
363#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
362 364
363#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 365#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
364#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 366#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
365#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 367#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
366#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 368#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
367
368#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
369#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
370#endif 369#endif
371 370
372/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 371/* This is not atomic against other CPUs -- CPU preemption needs to be off */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 4d593defc47d..3484e88d93f8 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -417,6 +417,36 @@ do { \
417# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) 417# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
418#endif 418#endif
419 419
420#define _this_cpu_generic_add_return(pcp, val) \
421({ \
422 typeof(pcp) ret__; \
423 preempt_disable(); \
424 __this_cpu_add(pcp, val); \
425 ret__ = __this_cpu_read(pcp); \
426 preempt_enable(); \
427 ret__; \
428})
429
430#ifndef this_cpu_add_return
431# ifndef this_cpu_add_return_1
432# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
433# endif
434# ifndef this_cpu_add_return_2
435# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
436# endif
437# ifndef this_cpu_add_return_4
438# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
439# endif
440# ifndef this_cpu_add_return_8
441# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
442# endif
443# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
444#endif
445
446#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
447#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
448#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
449
420/* 450/*
421 * Generic percpu operations that do not require preemption handling. 451 * Generic percpu operations that do not require preemption handling.
422 * Either we do not care about races or the caller has the 452 * Either we do not care about races or the caller has the
@@ -544,36 +574,6 @@ do { \
544# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) 574# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
545#endif 575#endif
546 576
547#define _this_cpu_generic_add_return(pcp, val) \
548({ \
549 typeof(pcp) ret__; \
550 preempt_disable(); \
551 __this_cpu_add(pcp, val); \
552 ret__ = __this_cpu_read(pcp); \
553 preempt_enable(); \
554 ret__; \
555})
556
557#ifndef this_cpu_add_return
558# ifndef this_cpu_add_return_1
559# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
560# endif
561# ifndef this_cpu_add_return_2
562# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
563# endif
564# ifndef this_cpu_add_return_4
565# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
566# endif
567# ifndef this_cpu_add_return_8
568# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
569# endif
570# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
571#endif
572
573#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
574#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
575#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
576
577#define __this_cpu_generic_add_return(pcp, val) \ 577#define __this_cpu_generic_add_return(pcp, val) \
578({ \ 578({ \
579 __this_cpu_add(pcp, val); \ 579 __this_cpu_add(pcp, val); \