diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2010-02-21 14:17:22 -0500 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2010-02-21 14:17:22 -0500 |
| commit | 5f854cfc024622e4aae14d7cf422f6ff86278688 (patch) | |
| tree | 426e77c6f6e4939c80440bf1fabcb020e3ee145b /include/linux/percpu.h | |
| parent | cc24da0742870f152ddf1002aa39dfcd83f7cf9c (diff) | |
| parent | 4ec62b2b2e6bd7ddef7b6cea6e5db7b5578a6532 (diff) | |
Forward to 2.6.33-rc8
Merge branch 'linus' into rt/head with a pile of conflicts.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/linux/percpu.h')
| -rw-r--r-- | include/linux/percpu.h | 526 |
1 files changed, 485 insertions, 41 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 0b4575758cf9..ea3dfffa0ac4 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
| @@ -34,14 +34,14 @@ | |||
| 34 | 34 | ||
| 35 | /* | 35 | /* |
| 36 | * Per-CPU data structures with an additional lock - useful for | 36 | * Per-CPU data structures with an additional lock - useful for |
| 37 | * PREEMPT_RT code that wants to reschedule but also wants | 37 | * PREEMPT_RT code that wants to reschedule but also wants per-CPU |
| 38 | * per-CPU data structures. | 38 | * data structures. |
| 39 | * | 39 | * |
| 40 | * 'cpu' gets updated with the CPU the task is currently executing on. | 40 | * 'cpu' gets updated with the CPU the task is currently executing on. |
| 41 | * | 41 | * |
| 42 | * NOTE: on normal !PREEMPT_RT kernels these per-CPU variables | 42 | * NOTE: on normal !PREEMPT_RT kernels these per-CPU variables are the |
| 43 | * are the same as the normal per-CPU variables, so there no | 43 | * same as the normal per-CPU variables, so there is no runtime |
| 44 | * runtime overhead. | 44 | * overhead. |
| 45 | */ | 45 | */ |
| 46 | #ifdef CONFIG_PREEMPT_RT | 46 | #ifdef CONFIG_PREEMPT_RT |
| 47 | #define get_cpu_var_locked(var, cpuptr) \ | 47 | #define get_cpu_var_locked(var, cpuptr) \ |
| @@ -79,8 +79,6 @@ again: \ | |||
| 79 | 79 | ||
| 80 | #ifdef CONFIG_SMP | 80 | #ifdef CONFIG_SMP |
| 81 | 81 | ||
| 82 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | ||
| 83 | |||
| 84 | /* minimum unit size, also is the maximum supported allocation size */ | 82 | /* minimum unit size, also is the maximum supported allocation size */ |
| 85 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) | 83 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) |
| 86 | 84 | ||
| @@ -102,19 +100,70 @@ again: \ | |||
| 102 | #endif | 100 | #endif |
| 103 | 101 | ||
| 104 | extern void *pcpu_base_addr; | 102 | extern void *pcpu_base_addr; |
| 103 | extern const unsigned long *pcpu_unit_offsets; | ||
| 104 | |||
| 105 | struct pcpu_group_info { | ||
| 106 | int nr_units; /* aligned # of units */ | ||
| 107 | unsigned long base_offset; /* base address offset */ | ||
| 108 | unsigned int *cpu_map; /* unit->cpu map, empty | ||
| 109 | * entries contain NR_CPUS */ | ||
| 110 | }; | ||
| 105 | 111 | ||
| 106 | typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); | 112 | struct pcpu_alloc_info { |
| 107 | typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); | 113 | size_t static_size; |
| 114 | size_t reserved_size; | ||
| 115 | size_t dyn_size; | ||
| 116 | size_t unit_size; | ||
| 117 | size_t atom_size; | ||
| 118 | size_t alloc_size; | ||
| 119 | size_t __ai_size; /* internal, don't use */ | ||
| 120 | int nr_groups; /* 0 if grouping unnecessary */ | ||
| 121 | struct pcpu_group_info groups[]; | ||
| 122 | }; | ||
| 108 | 123 | ||
| 109 | extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | 124 | enum pcpu_fc { |
| 110 | size_t static_size, size_t reserved_size, | 125 | PCPU_FC_AUTO, |
| 111 | ssize_t dyn_size, ssize_t unit_size, | 126 | PCPU_FC_EMBED, |
| 112 | void *base_addr, | 127 | PCPU_FC_PAGE, |
| 113 | pcpu_populate_pte_fn_t populate_pte_fn); | 128 | |
| 129 | PCPU_FC_NR, | ||
| 130 | }; | ||
| 131 | extern const char *pcpu_fc_names[PCPU_FC_NR]; | ||
| 132 | |||
| 133 | extern enum pcpu_fc pcpu_chosen_fc; | ||
| 134 | |||
| 135 | typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, | ||
| 136 | size_t align); | ||
| 137 | typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); | ||
| 138 | typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); | ||
| 139 | typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); | ||
| 140 | |||
| 141 | extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, | ||
| 142 | int nr_units); | ||
| 143 | extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); | ||
| 144 | |||
| 145 | extern struct pcpu_alloc_info * __init pcpu_build_alloc_info( | ||
| 146 | size_t reserved_size, ssize_t dyn_size, | ||
| 147 | size_t atom_size, | ||
| 148 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn); | ||
| 149 | |||
| 150 | extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | ||
| 151 | void *base_addr); | ||
| 152 | |||
| 153 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK | ||
| 154 | extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, | ||
| 155 | size_t atom_size, | ||
| 156 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, | ||
| 157 | pcpu_fc_alloc_fn_t alloc_fn, | ||
| 158 | pcpu_fc_free_fn_t free_fn); | ||
| 159 | #endif | ||
| 114 | 160 | ||
| 115 | extern ssize_t __init pcpu_embed_first_chunk( | 161 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
| 116 | size_t static_size, size_t reserved_size, | 162 | extern int __init pcpu_page_first_chunk(size_t reserved_size, |
| 117 | ssize_t dyn_size, ssize_t unit_size); | 163 | pcpu_fc_alloc_fn_t alloc_fn, |
| 164 | pcpu_fc_free_fn_t free_fn, | ||
| 165 | pcpu_fc_populate_pte_fn_t populate_pte_fn); | ||
| 166 | #endif | ||
| 118 | 167 | ||
| 119 | /* | 168 | /* |
| 120 | * Use this to get to a cpu's version of the per-cpu object | 169 | * Use this to get to a cpu's version of the per-cpu object |
| @@ -124,30 +173,13 @@ extern ssize_t __init pcpu_embed_first_chunk( | |||
| 124 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | 173 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
| 125 | 174 | ||
| 126 | extern void *__alloc_reserved_percpu(size_t size, size_t align); | 175 | extern void *__alloc_reserved_percpu(size_t size, size_t align); |
| 127 | |||
| 128 | #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ | ||
| 129 | |||
| 130 | struct percpu_data { | ||
| 131 | void *ptrs[1]; | ||
| 132 | }; | ||
| 133 | |||
| 134 | /* pointer disguising messes up the kmemleak objects tracking */ | ||
| 135 | #ifndef CONFIG_DEBUG_KMEMLEAK | ||
| 136 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | ||
| 137 | #else | ||
| 138 | #define __percpu_disguise(pdata) (struct percpu_data *)(pdata) | ||
| 139 | #endif | ||
| 140 | |||
| 141 | #define per_cpu_ptr(ptr, cpu) \ | ||
| 142 | ({ \ | ||
| 143 | struct percpu_data *__p = __percpu_disguise(ptr); \ | ||
| 144 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | ||
| 145 | }) | ||
| 146 | |||
| 147 | #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ | ||
| 148 | |||
| 149 | extern void *__alloc_percpu(size_t size, size_t align); | 176 | extern void *__alloc_percpu(size_t size, size_t align); |
| 150 | extern void free_percpu(void *__pdata); | 177 | extern void free_percpu(void *__pdata); |
| 178 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | ||
| 179 | |||
| 180 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | ||
| 181 | extern void __init setup_per_cpu_areas(void); | ||
| 182 | #endif | ||
| 151 | 183 | ||
| 152 | #else /* CONFIG_SMP */ | 184 | #else /* CONFIG_SMP */ |
| 153 | 185 | ||
| @@ -169,10 +201,22 @@ static inline void free_percpu(void *p) | |||
| 169 | kfree(p); | 201 | kfree(p); |
| 170 | } | 202 | } |
| 171 | 203 | ||
| 204 | static inline phys_addr_t per_cpu_ptr_to_phys(void *addr) | ||
| 205 | { | ||
| 206 | return __pa(addr); | ||
| 207 | } | ||
| 208 | |||
| 209 | static inline void __init setup_per_cpu_areas(void) { } | ||
| 210 | |||
| 211 | static inline void *pcpu_lpage_remapped(void *kaddr) | ||
| 212 | { | ||
| 213 | return NULL; | ||
| 214 | } | ||
| 215 | |||
| 172 | #endif /* CONFIG_SMP */ | 216 | #endif /* CONFIG_SMP */ |
| 173 | 217 | ||
| 174 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ | 218 | #define alloc_percpu(type) \ |
| 175 | __alignof__(type)) | 219 | (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type)) |
| 176 | 220 | ||
| 177 | /* | 221 | /* |
| 178 | * Optional methods for optimized non-lvalue per-cpu variable access. | 222 | * Optional methods for optimized non-lvalue per-cpu variable access. |
| @@ -226,4 +270,404 @@ do { \ | |||
| 226 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) | 270 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) |
| 227 | #endif | 271 | #endif |
| 228 | 272 | ||
| 273 | /* | ||
| 274 | * Branching function to split up a function into a set of functions that | ||
| 275 | * are called for different scalar sizes of the objects handled. | ||
| 276 | */ | ||
| 277 | |||
| 278 | extern void __bad_size_call_parameter(void); | ||
| 279 | |||
| 280 | #define __pcpu_size_call_return(stem, variable) \ | ||
| 281 | ({ typeof(variable) pscr_ret__; \ | ||
| 282 | switch(sizeof(variable)) { \ | ||
| 283 | case 1: pscr_ret__ = stem##1(variable);break; \ | ||
| 284 | case 2: pscr_ret__ = stem##2(variable);break; \ | ||
| 285 | case 4: pscr_ret__ = stem##4(variable);break; \ | ||
| 286 | case 8: pscr_ret__ = stem##8(variable);break; \ | ||
| 287 | default: \ | ||
| 288 | __bad_size_call_parameter();break; \ | ||
| 289 | } \ | ||
| 290 | pscr_ret__; \ | ||
| 291 | }) | ||
| 292 | |||
| 293 | #define __pcpu_size_call(stem, variable, ...) \ | ||
| 294 | do { \ | ||
| 295 | switch(sizeof(variable)) { \ | ||
| 296 | case 1: stem##1(variable, __VA_ARGS__);break; \ | ||
| 297 | case 2: stem##2(variable, __VA_ARGS__);break; \ | ||
| 298 | case 4: stem##4(variable, __VA_ARGS__);break; \ | ||
| 299 | case 8: stem##8(variable, __VA_ARGS__);break; \ | ||
| 300 | default: \ | ||
| 301 | __bad_size_call_parameter();break; \ | ||
| 302 | } \ | ||
| 303 | } while (0) | ||
| 304 | |||
| 305 | /* | ||
| 306 | * Optimized manipulation for memory allocated through the per cpu | ||
| 307 | * allocator or for addresses of per cpu variables (can be determined | ||
| 308 | * using per_cpu_var(xx). | ||
| 309 | * | ||
| 310 | * These operation guarantee exclusivity of access for other operations | ||
| 311 | * on the *same* processor. The assumption is that per cpu data is only | ||
| 312 | * accessed by a single processor instance (the current one). | ||
| 313 | * | ||
| 314 | * The first group is used for accesses that must be done in a | ||
| 315 | * preemption safe way since we know that the context is not preempt | ||
| 316 | * safe. Interrupts may occur. If the interrupt modifies the variable | ||
| 317 | * too then RMW actions will not be reliable. | ||
| 318 | * | ||
| 319 | * The arch code can provide optimized functions in two ways: | ||
| 320 | * | ||
| 321 | * 1. Override the function completely. F.e. define this_cpu_add(). | ||
| 322 | * The arch must then ensure that the various scalar format passed | ||
| 323 | * are handled correctly. | ||
| 324 | * | ||
| 325 | * 2. Provide functions for certain scalar sizes. F.e. provide | ||
| 326 | * this_cpu_add_2() to provide per cpu atomic operations for 2 byte | ||
| 327 | * sized RMW actions. If arch code does not provide operations for | ||
| 328 | * a scalar size then the fallback in the generic code will be | ||
| 329 | * used. | ||
| 330 | */ | ||
| 331 | |||
| 332 | #define _this_cpu_generic_read(pcp) \ | ||
| 333 | ({ typeof(pcp) ret__; \ | ||
| 334 | preempt_disable(); \ | ||
| 335 | ret__ = *this_cpu_ptr(&(pcp)); \ | ||
| 336 | preempt_enable(); \ | ||
| 337 | ret__; \ | ||
| 338 | }) | ||
| 339 | |||
| 340 | #ifndef this_cpu_read | ||
| 341 | # ifndef this_cpu_read_1 | ||
| 342 | # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) | ||
| 343 | # endif | ||
| 344 | # ifndef this_cpu_read_2 | ||
| 345 | # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) | ||
| 346 | # endif | ||
| 347 | # ifndef this_cpu_read_4 | ||
| 348 | # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) | ||
| 349 | # endif | ||
| 350 | # ifndef this_cpu_read_8 | ||
| 351 | # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) | ||
| 352 | # endif | ||
| 353 | # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) | ||
| 354 | #endif | ||
| 355 | |||
| 356 | #define _this_cpu_generic_to_op(pcp, val, op) \ | ||
| 357 | do { \ | ||
| 358 | preempt_disable(); \ | ||
| 359 | *__this_cpu_ptr(&pcp) op val; \ | ||
| 360 | preempt_enable(); \ | ||
| 361 | } while (0) | ||
| 362 | |||
| 363 | #ifndef this_cpu_write | ||
| 364 | # ifndef this_cpu_write_1 | ||
| 365 | # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | ||
| 366 | # endif | ||
| 367 | # ifndef this_cpu_write_2 | ||
| 368 | # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | ||
| 369 | # endif | ||
| 370 | # ifndef this_cpu_write_4 | ||
| 371 | # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | ||
| 372 | # endif | ||
| 373 | # ifndef this_cpu_write_8 | ||
| 374 | # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | ||
| 375 | # endif | ||
| 376 | # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) | ||
| 377 | #endif | ||
| 378 | |||
| 379 | #ifndef this_cpu_add | ||
| 380 | # ifndef this_cpu_add_1 | ||
| 381 | # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | ||
| 382 | # endif | ||
| 383 | # ifndef this_cpu_add_2 | ||
| 384 | # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | ||
| 385 | # endif | ||
| 386 | # ifndef this_cpu_add_4 | ||
| 387 | # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | ||
| 388 | # endif | ||
| 389 | # ifndef this_cpu_add_8 | ||
| 390 | # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | ||
| 391 | # endif | ||
| 392 | # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) | ||
| 393 | #endif | ||
| 394 | |||
| 395 | #ifndef this_cpu_sub | ||
| 396 | # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) | ||
| 397 | #endif | ||
| 398 | |||
| 399 | #ifndef this_cpu_inc | ||
| 400 | # define this_cpu_inc(pcp) this_cpu_add((pcp), 1) | ||
| 401 | #endif | ||
| 402 | |||
| 403 | #ifndef this_cpu_dec | ||
| 404 | # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) | ||
| 405 | #endif | ||
| 406 | |||
| 407 | #ifndef this_cpu_and | ||
| 408 | # ifndef this_cpu_and_1 | ||
| 409 | # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | ||
| 410 | # endif | ||
| 411 | # ifndef this_cpu_and_2 | ||
| 412 | # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | ||
| 413 | # endif | ||
| 414 | # ifndef this_cpu_and_4 | ||
| 415 | # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | ||
| 416 | # endif | ||
| 417 | # ifndef this_cpu_and_8 | ||
| 418 | # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | ||
| 419 | # endif | ||
| 420 | # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) | ||
| 421 | #endif | ||
| 422 | |||
| 423 | #ifndef this_cpu_or | ||
| 424 | # ifndef this_cpu_or_1 | ||
| 425 | # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | ||
| 426 | # endif | ||
| 427 | # ifndef this_cpu_or_2 | ||
| 428 | # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | ||
| 429 | # endif | ||
| 430 | # ifndef this_cpu_or_4 | ||
| 431 | # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | ||
| 432 | # endif | ||
| 433 | # ifndef this_cpu_or_8 | ||
| 434 | # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | ||
| 435 | # endif | ||
| 436 | # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) | ||
| 437 | #endif | ||
| 438 | |||
| 439 | #ifndef this_cpu_xor | ||
| 440 | # ifndef this_cpu_xor_1 | ||
| 441 | # define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | ||
| 442 | # endif | ||
| 443 | # ifndef this_cpu_xor_2 | ||
| 444 | # define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | ||
| 445 | # endif | ||
| 446 | # ifndef this_cpu_xor_4 | ||
| 447 | # define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | ||
| 448 | # endif | ||
| 449 | # ifndef this_cpu_xor_8 | ||
| 450 | # define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | ||
| 451 | # endif | ||
| 452 | # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) | ||
| 453 | #endif | ||
| 454 | |||
| 455 | /* | ||
| 456 | * Generic percpu operations that do not require preemption handling. | ||
| 457 | * Either we do not care about races or the caller has the | ||
| 458 | * responsibility of handling preemptions issues. Arch code can still | ||
| 459 | * override these instructions since the arch per cpu code may be more | ||
| 460 | * efficient and may actually get race freeness for free (that is the | ||
| 461 | * case for x86 for example). | ||
| 462 | * | ||
| 463 | * If there is no other protection through preempt disable and/or | ||
| 464 | * disabling interupts then one of these RMW operations can show unexpected | ||
| 465 | * behavior because the execution thread was rescheduled on another processor | ||
| 466 | * or an interrupt occurred and the same percpu variable was modified from | ||
| 467 | * the interrupt context. | ||
| 468 | */ | ||
| 469 | #ifndef __this_cpu_read | ||
| 470 | # ifndef __this_cpu_read_1 | ||
| 471 | # define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp))) | ||
| 472 | # endif | ||
| 473 | # ifndef __this_cpu_read_2 | ||
| 474 | # define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp))) | ||
| 475 | # endif | ||
| 476 | # ifndef __this_cpu_read_4 | ||
| 477 | # define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp))) | ||
| 478 | # endif | ||
| 479 | # ifndef __this_cpu_read_8 | ||
| 480 | # define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp))) | ||
| 481 | # endif | ||
| 482 | # define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp)) | ||
| 483 | #endif | ||
| 484 | |||
| 485 | #define __this_cpu_generic_to_op(pcp, val, op) \ | ||
| 486 | do { \ | ||
| 487 | *__this_cpu_ptr(&(pcp)) op val; \ | ||
| 488 | } while (0) | ||
| 489 | |||
| 490 | #ifndef __this_cpu_write | ||
| 491 | # ifndef __this_cpu_write_1 | ||
| 492 | # define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | ||
| 493 | # endif | ||
| 494 | # ifndef __this_cpu_write_2 | ||
| 495 | # define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | ||
| 496 | # endif | ||
| 497 | # ifndef __this_cpu_write_4 | ||
| 498 | # define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | ||
| 499 | # endif | ||
| 500 | # ifndef __this_cpu_write_8 | ||
| 501 | # define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | ||
| 502 | # endif | ||
| 503 | # define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val)) | ||
| 504 | #endif | ||
| 505 | |||
| 506 | #ifndef __this_cpu_add | ||
| 507 | # ifndef __this_cpu_add_1 | ||
| 508 | # define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | ||
| 509 | # endif | ||
| 510 | # ifndef __this_cpu_add_2 | ||
| 511 | # define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | ||
| 512 | # endif | ||
| 513 | # ifndef __this_cpu_add_4 | ||
| 514 | # define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | ||
| 515 | # endif | ||
| 516 | # ifndef __this_cpu_add_8 | ||
| 517 | # define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | ||
| 518 | # endif | ||
| 519 | # define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val)) | ||
| 520 | #endif | ||
| 521 | |||
| 522 | #ifndef __this_cpu_sub | ||
| 523 | # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) | ||
| 524 | #endif | ||
| 525 | |||
| 526 | #ifndef __this_cpu_inc | ||
| 527 | # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) | ||
| 528 | #endif | ||
| 529 | |||
| 530 | #ifndef __this_cpu_dec | ||
| 531 | # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) | ||
| 532 | #endif | ||
| 533 | |||
| 534 | #ifndef __this_cpu_and | ||
| 535 | # ifndef __this_cpu_and_1 | ||
| 536 | # define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | ||
| 537 | # endif | ||
| 538 | # ifndef __this_cpu_and_2 | ||
| 539 | # define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | ||
| 540 | # endif | ||
| 541 | # ifndef __this_cpu_and_4 | ||
| 542 | # define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | ||
| 543 | # endif | ||
| 544 | # ifndef __this_cpu_and_8 | ||
| 545 | # define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | ||
| 546 | # endif | ||
| 547 | # define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val)) | ||
| 548 | #endif | ||
| 549 | |||
| 550 | #ifndef __this_cpu_or | ||
| 551 | # ifndef __this_cpu_or_1 | ||
| 552 | # define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | ||
| 553 | # endif | ||
| 554 | # ifndef __this_cpu_or_2 | ||
| 555 | # define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | ||
| 556 | # endif | ||
| 557 | # ifndef __this_cpu_or_4 | ||
| 558 | # define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | ||
| 559 | # endif | ||
| 560 | # ifndef __this_cpu_or_8 | ||
| 561 | # define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | ||
| 562 | # endif | ||
| 563 | # define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val)) | ||
| 564 | #endif | ||
| 565 | |||
| 566 | #ifndef __this_cpu_xor | ||
| 567 | # ifndef __this_cpu_xor_1 | ||
| 568 | # define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | ||
| 569 | # endif | ||
| 570 | # ifndef __this_cpu_xor_2 | ||
| 571 | # define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | ||
| 572 | # endif | ||
| 573 | # ifndef __this_cpu_xor_4 | ||
| 574 | # define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | ||
| 575 | # endif | ||
| 576 | # ifndef __this_cpu_xor_8 | ||
| 577 | # define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | ||
| 578 | # endif | ||
| 579 | # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) | ||
| 580 | #endif | ||
| 581 | |||
| 582 | /* | ||
| 583 | * IRQ safe versions of the per cpu RMW operations. Note that these operations | ||
| 584 | * are *not* safe against modification of the same variable from another | ||
| 585 | * processors (which one gets when using regular atomic operations) | ||
| 586 | . They are guaranteed to be atomic vs. local interrupts and | ||
| 587 | * preemption only. | ||
| 588 | */ | ||
| 589 | #define irqsafe_cpu_generic_to_op(pcp, val, op) \ | ||
| 590 | do { \ | ||
| 591 | unsigned long flags; \ | ||
| 592 | local_irq_save(flags); \ | ||
| 593 | *__this_cpu_ptr(&(pcp)) op val; \ | ||
| 594 | local_irq_restore(flags); \ | ||
| 595 | } while (0) | ||
| 596 | |||
| 597 | #ifndef irqsafe_cpu_add | ||
| 598 | # ifndef irqsafe_cpu_add_1 | ||
| 599 | # define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | ||
| 600 | # endif | ||
| 601 | # ifndef irqsafe_cpu_add_2 | ||
| 602 | # define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | ||
| 603 | # endif | ||
| 604 | # ifndef irqsafe_cpu_add_4 | ||
| 605 | # define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | ||
| 606 | # endif | ||
| 607 | # ifndef irqsafe_cpu_add_8 | ||
| 608 | # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | ||
| 609 | # endif | ||
| 610 | # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val)) | ||
| 611 | #endif | ||
| 612 | |||
| 613 | #ifndef irqsafe_cpu_sub | ||
| 614 | # define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val)) | ||
| 615 | #endif | ||
| 616 | |||
| 617 | #ifndef irqsafe_cpu_inc | ||
| 618 | # define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1) | ||
| 619 | #endif | ||
| 620 | |||
| 621 | #ifndef irqsafe_cpu_dec | ||
| 622 | # define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1) | ||
| 623 | #endif | ||
| 624 | |||
| 625 | #ifndef irqsafe_cpu_and | ||
| 626 | # ifndef irqsafe_cpu_and_1 | ||
| 627 | # define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | ||
| 628 | # endif | ||
| 629 | # ifndef irqsafe_cpu_and_2 | ||
| 630 | # define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | ||
| 631 | # endif | ||
| 632 | # ifndef irqsafe_cpu_and_4 | ||
| 633 | # define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | ||
| 634 | # endif | ||
| 635 | # ifndef irqsafe_cpu_and_8 | ||
| 636 | # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | ||
| 637 | # endif | ||
| 638 | # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val)) | ||
| 639 | #endif | ||
| 640 | |||
| 641 | #ifndef irqsafe_cpu_or | ||
| 642 | # ifndef irqsafe_cpu_or_1 | ||
| 643 | # define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | ||
| 644 | # endif | ||
| 645 | # ifndef irqsafe_cpu_or_2 | ||
| 646 | # define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | ||
| 647 | # endif | ||
| 648 | # ifndef irqsafe_cpu_or_4 | ||
| 649 | # define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | ||
| 650 | # endif | ||
| 651 | # ifndef irqsafe_cpu_or_8 | ||
| 652 | # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | ||
| 653 | # endif | ||
| 654 | # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val)) | ||
| 655 | #endif | ||
| 656 | |||
| 657 | #ifndef irqsafe_cpu_xor | ||
| 658 | # ifndef irqsafe_cpu_xor_1 | ||
| 659 | # define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | ||
| 660 | # endif | ||
| 661 | # ifndef irqsafe_cpu_xor_2 | ||
| 662 | # define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | ||
| 663 | # endif | ||
| 664 | # ifndef irqsafe_cpu_xor_4 | ||
| 665 | # define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | ||
| 666 | # endif | ||
| 667 | # ifndef irqsafe_cpu_xor_8 | ||
| 668 | # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | ||
| 669 | # endif | ||
| 670 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) | ||
| 671 | #endif | ||
| 672 | |||
| 229 | #endif /* __LINUX_PERCPU_H */ | 673 | #endif /* __LINUX_PERCPU_H */ |
