aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/percpu.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2010-12-06 12:40:00 -0500
committerTejun Heo <tj@kernel.org>2010-12-17 09:15:28 -0500
commit8f1d97c79eb65de1d05799d6b81d79cd94169114 (patch)
treef9732488505ee3782b48bed139a14190095911d6 /arch/x86/include/asm/percpu.h
parenta663ffff1d2e94a7c549a37d08ed9169ce83bdd6 (diff)
x86: Support for this_cpu_add, sub, dec, inc_return
Supply an implementation for x86 in order to generate more efficient code. V2->V3: - Cleanup - Remove strange type checking from percpu_add_return_op. tj: - Dropped unused typedef from percpu_add_return_op(). - Renamed ret__ to paro_ret__ in percpu_add_return_op(). - Minor indentation adjustments. Acked-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/include/asm/percpu.h')
-rw-r--r--arch/x86/include/asm/percpu.h43
1 files changed, 43 insertions, 0 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index f899e01a8ac9..38f9e965ff96 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -177,6 +177,39 @@ do { \
177 } \ 177 } \
178} while (0) 178} while (0)
179 179
180/*
181 * Add return operation
182 */
183#define percpu_add_return_op(var, val) \
184({ \
185 typeof(var) paro_ret__ = val; \
186 switch (sizeof(var)) { \
187 case 1: \
188 asm("xaddb %0, "__percpu_arg(1) \
189 : "+q" (paro_ret__), "+m" (var) \
190 : : "memory"); \
191 break; \
192 case 2: \
193 asm("xaddw %0, "__percpu_arg(1) \
194 : "+r" (paro_ret__), "+m" (var) \
195 : : "memory"); \
196 break; \
197 case 4: \
198 asm("xaddl %0, "__percpu_arg(1) \
199 : "+r" (paro_ret__), "+m" (var) \
200 : : "memory"); \
201 break; \
202 case 8: \
203 asm("xaddq %0, "__percpu_arg(1) \
204 : "+re" (paro_ret__), "+m" (var) \
205 : : "memory"); \
206 break; \
207 default: __bad_percpu_size(); \
208 } \
209 paro_ret__ += val; \
210 paro_ret__; \
211})
212
180#define percpu_from_op(op, var, constraint) \ 213#define percpu_from_op(op, var, constraint) \
181({ \ 214({ \
182 typeof(var) pfo_ret__; \ 215 typeof(var) pfo_ret__; \
@@ -300,6 +333,14 @@ do { \
300#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) 333#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
301#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) 334#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
302 335
336#ifndef CONFIG_M386
337#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
338#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
339#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
340#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
341#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
342#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
343#endif
303/* 344/*
304 * Per cpu atomic 64 bit operations are only available under 64 bit. 345 * Per cpu atomic 64 bit operations are only available under 64 bit.
305 * 32 bit must fall back to generic operations. 346 * 32 bit must fall back to generic operations.
@@ -324,6 +365,8 @@ do { \
324#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 365#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
325#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 366#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
326 367
368#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
369#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
327#endif 370#endif
328 371
329/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 372/* This is not atomic against other CPUs -- CPU preemption needs to be off */