diff options
author | Tejun Heo <tj@kernel.org> | 2014-06-17 19:12:40 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-06-17 19:12:40 -0400 |
commit | eba117889ac444bea6e8270049cbaeed48169889 (patch) | |
tree | 81aa1ba187d57a52b0cd81788bc381cc3dc17b09 /include/linux/percpu-defs.h | |
parent | cadb1c4db2d33e0a818f645cd1963a479dab91e2 (diff) |
percpu: preffity percpu header files
percpu macros are difficult to read. It's partly because they're
fairly complex but also because they simply lack visual and
conventional consistency to an unusual degree. The preceding patches
tried to organize macro definitions consistently by their roles. This
patch makes the following cosmetic changes to improve overall
readability.
* Use consistent convention for multi-line macro definitions - "do {"
or "({" are now put on their own lines and the line continuing '\'
are all put on the same column.
* Temp variables used inside macro are consistently given "__" prefix.
* When a macro argument is passed to another macro or a function,
putting extra parenthses around it doesn't help anything. Don't put
them.
* _this_cpu_generic_*() are renamed to this_cpu_generic_*() so that
they're consistent with raw_cpu_generic_*().
* Reorganize raw_cpu_*() and this_cpu_*() definitions so that trivial
wrappers are collected in one place after actual operation
definitions.
* Other misc cleanups including reorganizing comments.
All changes in this patch are cosmetic and cause no functional
difference.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Christoph Lameter <cl@linux.com>
Diffstat (limited to 'include/linux/percpu-defs.h')
-rw-r--r-- | include/linux/percpu-defs.h | 253 |
1 files changed, 139 insertions, 114 deletions
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 215917e9a176..d8bb6e001c6a 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
@@ -198,7 +198,8 @@ | |||
198 | * + 0 is required in order to convert the pointer type from a | 198 | * + 0 is required in order to convert the pointer type from a |
199 | * potential array type to a pointer to a single item of the array. | 199 | * potential array type to a pointer to a single item of the array. |
200 | */ | 200 | */ |
201 | #define __verify_pcpu_ptr(ptr) do { \ | 201 | #define __verify_pcpu_ptr(ptr) \ |
202 | do { \ | ||
202 | const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ | 203 | const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ |
203 | (void)__vpp_verify; \ | 204 | (void)__vpp_verify; \ |
204 | } while (0) | 205 | } while (0) |
@@ -210,12 +211,13 @@ | |||
210 | * to prevent the compiler from making incorrect assumptions about the | 211 | * to prevent the compiler from making incorrect assumptions about the |
211 | * pointer value. The weird cast keeps both GCC and sparse happy. | 212 | * pointer value. The weird cast keeps both GCC and sparse happy. |
212 | */ | 213 | */ |
213 | #define SHIFT_PERCPU_PTR(__p, __offset) ({ \ | 214 | #define SHIFT_PERCPU_PTR(__p, __offset) \ |
214 | __verify_pcpu_ptr((__p)); \ | 215 | ({ \ |
216 | __verify_pcpu_ptr(__p); \ | ||
215 | RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \ | 217 | RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \ |
216 | }) | 218 | }) |
217 | 219 | ||
218 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | 220 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR(ptr, per_cpu_offset(cpu)) |
219 | #define raw_cpu_ptr(ptr) arch_raw_cpu_ptr(ptr) | 221 | #define raw_cpu_ptr(ptr) arch_raw_cpu_ptr(ptr) |
220 | 222 | ||
221 | #ifdef CONFIG_DEBUG_PREEMPT | 223 | #ifdef CONFIG_DEBUG_PREEMPT |
@@ -226,12 +228,13 @@ | |||
226 | 228 | ||
227 | #else /* CONFIG_SMP */ | 229 | #else /* CONFIG_SMP */ |
228 | 230 | ||
229 | #define VERIFY_PERCPU_PTR(__p) ({ \ | 231 | #define VERIFY_PERCPU_PTR(__p) \ |
230 | __verify_pcpu_ptr((__p)); \ | 232 | ({ \ |
231 | (typeof(*(__p)) __kernel __force *)(__p); \ | 233 | __verify_pcpu_ptr(__p); \ |
234 | (typeof(*(__p)) __kernel __force *)(__p); \ | ||
232 | }) | 235 | }) |
233 | 236 | ||
234 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) | 237 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) |
235 | #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | 238 | #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) |
236 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) | 239 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) |
237 | 240 | ||
@@ -248,26 +251,32 @@ | |||
248 | * Must be an lvalue. Since @var must be a simple identifier, | 251 | * Must be an lvalue. Since @var must be a simple identifier, |
249 | * we force a syntax error here if it isn't. | 252 | * we force a syntax error here if it isn't. |
250 | */ | 253 | */ |
251 | #define get_cpu_var(var) (*({ \ | 254 | #define get_cpu_var(var) \ |
252 | preempt_disable(); \ | 255 | (*({ \ |
253 | this_cpu_ptr(&var); })) | 256 | preempt_disable(); \ |
257 | this_cpu_ptr(&var); \ | ||
258 | })) | ||
254 | 259 | ||
255 | /* | 260 | /* |
256 | * The weird & is necessary because sparse considers (void)(var) to be | 261 | * The weird & is necessary because sparse considers (void)(var) to be |
257 | * a direct dereference of percpu variable (var). | 262 | * a direct dereference of percpu variable (var). |
258 | */ | 263 | */ |
259 | #define put_cpu_var(var) do { \ | 264 | #define put_cpu_var(var) \ |
260 | (void)&(var); \ | 265 | do { \ |
261 | preempt_enable(); \ | 266 | (void)&(var); \ |
267 | preempt_enable(); \ | ||
262 | } while (0) | 268 | } while (0) |
263 | 269 | ||
264 | #define get_cpu_ptr(var) ({ \ | 270 | #define get_cpu_ptr(var) \ |
265 | preempt_disable(); \ | 271 | ({ \ |
266 | this_cpu_ptr(var); }) | 272 | preempt_disable(); \ |
273 | this_cpu_ptr(var); \ | ||
274 | }) | ||
267 | 275 | ||
268 | #define put_cpu_ptr(var) do { \ | 276 | #define put_cpu_ptr(var) \ |
269 | (void)(var); \ | 277 | do { \ |
270 | preempt_enable(); \ | 278 | (void)(var); \ |
279 | preempt_enable(); \ | ||
271 | } while (0) | 280 | } while (0) |
272 | 281 | ||
273 | /* | 282 | /* |
@@ -284,15 +293,16 @@ static inline void __this_cpu_preempt_check(const char *op) { } | |||
284 | #endif | 293 | #endif |
285 | 294 | ||
286 | #define __pcpu_size_call_return(stem, variable) \ | 295 | #define __pcpu_size_call_return(stem, variable) \ |
287 | ({ typeof(variable) pscr_ret__; \ | 296 | ({ \ |
297 | typeof(variable) pscr_ret__; \ | ||
288 | __verify_pcpu_ptr(&(variable)); \ | 298 | __verify_pcpu_ptr(&(variable)); \ |
289 | switch(sizeof(variable)) { \ | 299 | switch(sizeof(variable)) { \ |
290 | case 1: pscr_ret__ = stem##1(variable);break; \ | 300 | case 1: pscr_ret__ = stem##1(variable); break; \ |
291 | case 2: pscr_ret__ = stem##2(variable);break; \ | 301 | case 2: pscr_ret__ = stem##2(variable); break; \ |
292 | case 4: pscr_ret__ = stem##4(variable);break; \ | 302 | case 4: pscr_ret__ = stem##4(variable); break; \ |
293 | case 8: pscr_ret__ = stem##8(variable);break; \ | 303 | case 8: pscr_ret__ = stem##8(variable); break; \ |
294 | default: \ | 304 | default: \ |
295 | __bad_size_call_parameter();break; \ | 305 | __bad_size_call_parameter(); break; \ |
296 | } \ | 306 | } \ |
297 | pscr_ret__; \ | 307 | pscr_ret__; \ |
298 | }) | 308 | }) |
@@ -323,11 +333,11 @@ static inline void __this_cpu_preempt_check(const char *op) { } | |||
323 | #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ | 333 | #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ |
324 | ({ \ | 334 | ({ \ |
325 | bool pdcrb_ret__; \ | 335 | bool pdcrb_ret__; \ |
326 | __verify_pcpu_ptr(&pcp1); \ | 336 | __verify_pcpu_ptr(&(pcp1)); \ |
327 | BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ | 337 | BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ |
328 | VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \ | 338 | VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \ |
329 | VM_BUG_ON((unsigned long)(&pcp2) != \ | 339 | VM_BUG_ON((unsigned long)(&(pcp2)) != \ |
330 | (unsigned long)(&pcp1) + sizeof(pcp1)); \ | 340 | (unsigned long)(&(pcp1)) + sizeof(pcp1)); \ |
331 | switch(sizeof(pcp1)) { \ | 341 | switch(sizeof(pcp1)) { \ |
332 | case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ | 342 | case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ |
333 | case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ | 343 | case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ |
@@ -367,117 +377,132 @@ do { \ | |||
367 | * cpu atomic operations for 2 byte sized RMW actions. If arch code does | 377 | * cpu atomic operations for 2 byte sized RMW actions. If arch code does |
368 | * not provide operations for a scalar size then the fallback in the | 378 | * not provide operations for a scalar size then the fallback in the |
369 | * generic code will be used. | 379 | * generic code will be used. |
380 | * | ||
381 | * cmpxchg_double replaces two adjacent scalars at once. The first two | ||
382 | * parameters are per cpu variables which have to be of the same size. A | ||
383 | * truth value is returned to indicate success or failure (since a double | ||
384 | * register result is difficult to handle). There is very limited hardware | ||
385 | * support for these operations, so only certain sizes may work. | ||
370 | */ | 386 | */ |
371 | 387 | ||
372 | /* | 388 | /* |
373 | * Generic percpu operations for contexts where we do not want to do | 389 | * Operations for contexts where we do not want to do any checks for |
374 | * any checks for preemptiosn. | 390 | * preemptions. Unless strictly necessary, always use [__]this_cpu_*() |
391 | * instead. | ||
375 | * | 392 | * |
376 | * If there is no other protection through preempt disable and/or | 393 | * If there is no other protection through preempt disable and/or disabling |
377 | * disabling interupts then one of these RMW operations can show unexpected | 394 | * interupts then one of these RMW operations can show unexpected behavior |
378 | * behavior because the execution thread was rescheduled on another processor | 395 | * because the execution thread was rescheduled on another processor or an |
379 | * or an interrupt occurred and the same percpu variable was modified from | 396 | * interrupt occurred and the same percpu variable was modified from the |
380 | * the interrupt context. | 397 | * interrupt context. |
381 | */ | 398 | */ |
382 | # define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp)) | 399 | #define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) |
383 | # define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val)) | 400 | #define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) |
384 | # define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val)) | 401 | #define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) |
385 | # define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val)) | 402 | #define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) |
386 | # define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1) | 403 | #define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) |
387 | # define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1) | 404 | #define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) |
388 | # define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val)) | 405 | #define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) |
389 | # define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val)) | 406 | #define raw_cpu_cmpxchg(pcp, oval, nval) \ |
390 | # define raw_cpu_add_return(pcp, val) \ | ||
391 | __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) | ||
392 | #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
393 | #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) | ||
394 | #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) | ||
395 | # define raw_cpu_xchg(pcp, nval) \ | ||
396 | __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval) | ||
397 | # define raw_cpu_cmpxchg(pcp, oval, nval) \ | ||
398 | __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) | 407 | __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) |
399 | # define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | 408 | #define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
400 | __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | 409 | __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) |
410 | |||
411 | #define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) | ||
412 | #define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) | ||
413 | #define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) | ||
414 | #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
415 | #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) | ||
416 | #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) | ||
401 | 417 | ||
402 | /* | 418 | /* |
403 | * Generic percpu operations for context that are safe from preemption/interrupts. | 419 | * Operations for contexts that are safe from preemption/interrupts. These |
420 | * operations verify that preemption is disabled. | ||
404 | */ | 421 | */ |
405 | # define __this_cpu_read(pcp) \ | 422 | #define __this_cpu_read(pcp) \ |
406 | (__this_cpu_preempt_check("read"),raw_cpu_read(pcp)) | 423 | ({ \ |
424 | __this_cpu_preempt_check("read"); \ | ||
425 | raw_cpu_read(pcp); \ | ||
426 | }) | ||
407 | 427 | ||
408 | # define __this_cpu_write(pcp, val) \ | 428 | #define __this_cpu_write(pcp, val) \ |
409 | do { __this_cpu_preempt_check("write"); \ | 429 | ({ \ |
410 | raw_cpu_write(pcp, val); \ | 430 | __this_cpu_preempt_check("write"); \ |
411 | } while (0) | 431 | raw_cpu_write(pcp, val); \ |
432 | }) | ||
412 | 433 | ||
413 | # define __this_cpu_add(pcp, val) \ | 434 | #define __this_cpu_add(pcp, val) \ |
414 | do { __this_cpu_preempt_check("add"); \ | 435 | ({ \ |
436 | __this_cpu_preempt_check("add"); \ | ||
415 | raw_cpu_add(pcp, val); \ | 437 | raw_cpu_add(pcp, val); \ |
416 | } while (0) | 438 | }) |
417 | |||
418 | # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val)) | ||
419 | # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) | ||
420 | # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) | ||
421 | 439 | ||
422 | # define __this_cpu_and(pcp, val) \ | 440 | #define __this_cpu_and(pcp, val) \ |
423 | do { __this_cpu_preempt_check("and"); \ | 441 | ({ \ |
442 | __this_cpu_preempt_check("and"); \ | ||
424 | raw_cpu_and(pcp, val); \ | 443 | raw_cpu_and(pcp, val); \ |
425 | } while (0) | 444 | }) |
426 | 445 | ||
427 | # define __this_cpu_or(pcp, val) \ | 446 | #define __this_cpu_or(pcp, val) \ |
428 | do { __this_cpu_preempt_check("or"); \ | 447 | ({ \ |
448 | __this_cpu_preempt_check("or"); \ | ||
429 | raw_cpu_or(pcp, val); \ | 449 | raw_cpu_or(pcp, val); \ |
430 | } while (0) | 450 | }) |
431 | 451 | ||
432 | # define __this_cpu_add_return(pcp, val) \ | 452 | #define __this_cpu_add_return(pcp, val) \ |
433 | (__this_cpu_preempt_check("add_return"),raw_cpu_add_return(pcp, val)) | 453 | ({ \ |
454 | __this_cpu_preempt_check("add_return"); \ | ||
455 | raw_cpu_add_return(pcp, val); \ | ||
456 | }) | ||
434 | 457 | ||
435 | #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) | 458 | #define __this_cpu_xchg(pcp, nval) \ |
436 | #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) | 459 | ({ \ |
437 | #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) | 460 | __this_cpu_preempt_check("xchg"); \ |
461 | raw_cpu_xchg(pcp, nval); \ | ||
462 | }) | ||
438 | 463 | ||
439 | # define __this_cpu_xchg(pcp, nval) \ | 464 | #define __this_cpu_cmpxchg(pcp, oval, nval) \ |
440 | (__this_cpu_preempt_check("xchg"),raw_cpu_xchg(pcp, nval)) | 465 | ({ \ |
466 | __this_cpu_preempt_check("cmpxchg"); \ | ||
467 | raw_cpu_cmpxchg(pcp, oval, nval); \ | ||
468 | }) | ||
441 | 469 | ||
442 | # define __this_cpu_cmpxchg(pcp, oval, nval) \ | 470 | #define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
443 | (__this_cpu_preempt_check("cmpxchg"),raw_cpu_cmpxchg(pcp, oval, nval)) | 471 | ({ __this_cpu_preempt_check("cmpxchg_double"); \ |
472 | raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \ | ||
473 | }) | ||
444 | 474 | ||
445 | # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | 475 | #define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) |
446 | (__this_cpu_preempt_check("cmpxchg_double"),raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)) | 476 | #define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) |
477 | #define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) | ||
478 | #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
479 | #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) | ||
480 | #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) | ||
447 | 481 | ||
448 | /* | 482 | /* |
449 | * this_cpu_*() operations are used for accesses that must be done in a | 483 | * Operations with implied preemption protection. These operations can be |
450 | * preemption safe way since we know that the context is not preempt | 484 | * used without worrying about preemption. Note that interrupts may still |
451 | * safe. Interrupts may occur. If the interrupt modifies the variable too | 485 | * occur while an operation is in progress and if the interrupt modifies |
452 | * then RMW actions will not be reliable. | 486 | * the variable too then RMW actions may not be reliable. |
453 | */ | 487 | */ |
454 | # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) | 488 | #define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) |
455 | # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) | 489 | #define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) |
456 | # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) | 490 | #define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) |
457 | # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val)) | 491 | #define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) |
458 | # define this_cpu_inc(pcp) this_cpu_add((pcp), 1) | 492 | #define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) |
459 | # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) | 493 | #define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) |
460 | # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) | 494 | #define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) |
461 | # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) | 495 | #define this_cpu_cmpxchg(pcp, oval, nval) \ |
462 | # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | 496 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) |
497 | #define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
498 | __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
499 | |||
500 | #define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) | ||
501 | #define this_cpu_inc(pcp) this_cpu_add(pcp, 1) | ||
502 | #define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) | ||
463 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) | 503 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) |
464 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | 504 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) |
465 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | 505 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) |
466 | # define this_cpu_xchg(pcp, nval) \ | ||
467 | __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) | ||
468 | # define this_cpu_cmpxchg(pcp, oval, nval) \ | ||
469 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) | ||
470 | |||
471 | /* | ||
472 | * cmpxchg_double replaces two adjacent scalars at once. The first | ||
473 | * two parameters are per cpu variables which have to be of the same | ||
474 | * size. A truth value is returned to indicate success or failure | ||
475 | * (since a double register result is difficult to handle). There is | ||
476 | * very limited hardware support for these operations, so only certain | ||
477 | * sizes may work. | ||
478 | */ | ||
479 | # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
480 | __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | ||
481 | 506 | ||
482 | #endif /* __ASSEMBLY__ */ | 507 | #endif /* __ASSEMBLY__ */ |
483 | #endif /* _LINUX_PERCPU_DEFS_H */ | 508 | #endif /* _LINUX_PERCPU_DEFS_H */ |