aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/percpu.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-12-17 09:47:04 -0500
committerTejun Heo <tj@kernel.org>2010-12-17 10:13:22 -0500
commit403047754cf690b012369b8fb563b738b88086e6 (patch)
treed784465cb02ea3898094ad5aa83566fecb6c7046 /arch/x86/include/asm/percpu.h
parent8f1d97c79eb65de1d05799d6b81d79cd94169114 (diff)
percpu,x86: relocate this_cpu_add_return() and friends
- include/linux/percpu.h: this_cpu_add_return() and friends were located next to __this_cpu_add_return(). However, the overall organization is to first group by preemption safeness. Relocate this_cpu_add_return() and friends to preemption-safe area. - arch/x86/include/asm/percpu.h: Relocate percpu_add_return_op() after other more basic operations. Relocate [__]this_cpu_add_return_8() so that they're first grouped by preemption safeness. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com>
Diffstat (limited to 'arch/x86/include/asm/percpu.h')
-rw-r--r--arch/x86/include/asm/percpu.h71
1 files changed, 35 insertions, 36 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 38f9e965ff96..dd0cd4b6a76f 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -177,39 +177,6 @@ do { \
177 } \ 177 } \
178} while (0) 178} while (0)
179 179
180/*
181 * Add return operation
182 */
183#define percpu_add_return_op(var, val) \
184({ \
185 typeof(var) paro_ret__ = val; \
186 switch (sizeof(var)) { \
187 case 1: \
188 asm("xaddb %0, "__percpu_arg(1) \
189 : "+q" (paro_ret__), "+m" (var) \
190 : : "memory"); \
191 break; \
192 case 2: \
193 asm("xaddw %0, "__percpu_arg(1) \
194 : "+r" (paro_ret__), "+m" (var) \
195 : : "memory"); \
196 break; \
197 case 4: \
198 asm("xaddl %0, "__percpu_arg(1) \
199 : "+r" (paro_ret__), "+m" (var) \
200 : : "memory"); \
201 break; \
202 case 8: \
203 asm("xaddq %0, "__percpu_arg(1) \
204 : "+re" (paro_ret__), "+m" (var) \
205 : : "memory"); \
206 break; \
207 default: __bad_percpu_size(); \
208 } \
209 paro_ret__ += val; \
210 paro_ret__; \
211})
212
213#define percpu_from_op(op, var, constraint) \ 180#define percpu_from_op(op, var, constraint) \
214({ \ 181({ \
215 typeof(var) pfo_ret__; \ 182 typeof(var) pfo_ret__; \
@@ -263,6 +230,39 @@ do { \
263}) 230})
264 231
265/* 232/*
233 * Add return operation
234 */
235#define percpu_add_return_op(var, val) \
236({ \
237 typeof(var) paro_ret__ = val; \
238 switch (sizeof(var)) { \
239 case 1: \
240 asm("xaddb %0, "__percpu_arg(1) \
241 : "+q" (paro_ret__), "+m" (var) \
242 : : "memory"); \
243 break; \
244 case 2: \
245 asm("xaddw %0, "__percpu_arg(1) \
246 : "+r" (paro_ret__), "+m" (var) \
247 : : "memory"); \
248 break; \
249 case 4: \
250 asm("xaddl %0, "__percpu_arg(1) \
251 : "+r" (paro_ret__), "+m" (var) \
252 : : "memory"); \
253 break; \
254 case 8: \
255 asm("xaddq %0, "__percpu_arg(1) \
256 : "+re" (paro_ret__), "+m" (var) \
257 : : "memory"); \
258 break; \
259 default: __bad_percpu_size(); \
260 } \
261 paro_ret__ += val; \
262 paro_ret__; \
263})
264
265/*
266 * percpu_read() makes gcc load the percpu variable every time it is 266 * percpu_read() makes gcc load the percpu variable every time it is
267 * accessed while percpu_read_stable() allows the value to be cached. 267 * accessed while percpu_read_stable() allows the value to be cached.
268 * percpu_read_stable() is more efficient and can be used if its value 268 * percpu_read_stable() is more efficient and can be used if its value
@@ -352,6 +352,7 @@ do { \
352#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 352#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
353#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 353#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
354#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 354#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
355#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
355 356
356#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 357#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
357#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) 358#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
@@ -359,14 +360,12 @@ do { \
359#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 360#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
360#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 361#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
361#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 362#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
363#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
362 364
363#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 365#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
364#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 366#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
365#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 367#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
366#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 368#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
367
368#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
369#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
370#endif 369#endif
371 370
372/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 371/* This is not atomic against other CPUs -- CPU preemption needs to be off */